CallFrameShuffler.h [plain text]
#pragma once
#if ENABLE(JIT)
#include "CachedRecovery.h"
#include "CallFrameShuffleData.h"
#include "MacroAssembler.h"
#include "RegisterSet.h"
#include <wtf/Vector.h>
namespace JSC {
class CallFrameShuffler {
WTF_MAKE_FAST_ALLOCATED;
public:
CallFrameShuffler(CCallHelpers&, const CallFrameShuffleData&);
void dump(PrintStream&) const;
void lockGPR(GPRReg gpr)
{
ASSERT(!m_lockedRegisters.get(gpr));
m_lockedRegisters.set(gpr);
if (verbose)
dataLog(" * Locking ", gpr, "\n");
}
GPRReg acquireGPR()
{
ensureGPR();
GPRReg gpr { getFreeGPR() };
ASSERT(!m_registers[gpr]);
lockGPR(gpr);
return gpr;
}
void releaseGPR(GPRReg gpr)
{
if (verbose) {
if (m_lockedRegisters.get(gpr))
dataLog(" * Releasing ", gpr, "\n");
else
dataLog(" * ", gpr, " was not locked\n");
}
m_lockedRegisters.clear(gpr);
}
void restoreGPR(GPRReg gpr)
{
if (!m_newRegisters[gpr])
return;
ensureGPR();
#if USE(JSVALUE32_64)
GPRReg tempGPR { getFreeGPR() };
lockGPR(tempGPR);
ensureGPR();
releaseGPR(tempGPR);
#endif
emitDisplace(*m_newRegisters[gpr]);
}
CallFrameShuffleData snapshot() const
{
ASSERT(isUndecided());
CallFrameShuffleData data;
data.numLocals = numLocals();
data.numPassedArgs = m_numPassedArgs;
data.callee = getNew(VirtualRegister { CallFrameSlot::callee })->recovery();
data.args.resize(argCount());
for (size_t i = 0; i < argCount(); ++i)
data.args[i] = getNew(virtualRegisterForArgument(i))->recovery();
for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
CachedRecovery* cachedRecovery { m_newRegisters[reg] };
if (!cachedRecovery)
continue;
#if USE(JSVALUE64)
data.registers[reg] = cachedRecovery->recovery();
#else
RELEASE_ASSERT_NOT_REACHED();
#endif
}
return data;
}
void setCalleeJSValueRegs(JSValueRegs jsValueRegs)
{
ASSERT(isUndecided());
ASSERT(!getNew(jsValueRegs));
CachedRecovery* cachedRecovery { getNew(VirtualRegister(CallFrameSlot::callee)) };
ASSERT(cachedRecovery);
addNew(jsValueRegs, cachedRecovery->recovery());
}
void assumeCalleeIsCell()
{
#if USE(JSVALUE32_64)
CachedRecovery& calleeCachedRecovery = *getNew(VirtualRegister(CallFrameSlot::callee));
switch (calleeCachedRecovery.recovery().technique()) {
case InPair:
updateRecovery(
calleeCachedRecovery,
ValueRecovery::inGPR(
calleeCachedRecovery.recovery().payloadGPR(),
DataFormatCell));
break;
case DisplacedInJSStack:
updateRecovery(
calleeCachedRecovery,
ValueRecovery::displacedInJSStack(
calleeCachedRecovery.recovery().virtualRegister(),
DataFormatCell));
break;
case InFPR:
case UnboxedCellInGPR:
case CellDisplacedInJSStack:
break;
case Constant:
ASSERT(calleeCachedRecovery.recovery().constant().isCell());
break;
default:
RELEASE_ASSERT_NOT_REACHED();
break;
}
#endif
}
void prepareForTailCall();
void prepareForSlowPath();
private:
static const bool verbose = false;
CCallHelpers& m_jit;
void prepareAny();
void spill(CachedRecovery&);
void emitBox(CachedRecovery&);
bool canBox(CachedRecovery& cachedRecovery)
{
if (cachedRecovery.boxingRequiresGPR() && getFreeGPR() == InvalidGPRReg)
return false;
if (cachedRecovery.boxingRequiresFPR() && getFreeFPR() == InvalidFPRReg)
return false;
return true;
}
void ensureBox(CachedRecovery& cachedRecovery)
{
if (canBox(cachedRecovery))
return;
if (cachedRecovery.boxingRequiresGPR())
ensureGPR();
if (cachedRecovery.boxingRequiresFPR())
ensureFPR();
}
void emitLoad(CachedRecovery&);
bool canLoad(CachedRecovery&);
void ensureLoad(CachedRecovery& cachedRecovery)
{
if (canLoad(cachedRecovery))
return;
ASSERT(cachedRecovery.loadsIntoGPR() || cachedRecovery.loadsIntoFPR());
if (cachedRecovery.loadsIntoFPR()) {
if (cachedRecovery.loadsIntoGPR())
ensureRegister();
else
ensureFPR();
} else
ensureGPR();
}
bool canLoadAndBox(CachedRecovery& cachedRecovery)
{
ASSERT(!cachedRecovery.loadsIntoFPR() || !cachedRecovery.boxingRequiresFPR());
ASSERT(!cachedRecovery.loadsIntoGPR() || !cachedRecovery.boxingRequiresGPR());
return canLoad(cachedRecovery) && canBox(cachedRecovery);
}
DataFormat emitStore(CachedRecovery&, MacroAssembler::Address);
void emitDisplace(CachedRecovery&);
void emitDeltaCheck();
Bag<CachedRecovery> m_cachedRecoveries;
void updateRecovery(CachedRecovery& cachedRecovery, ValueRecovery recovery)
{
clearCachedRecovery(cachedRecovery.recovery());
cachedRecovery.setRecovery(recovery);
setCachedRecovery(recovery, &cachedRecovery);
}
CachedRecovery* getCachedRecovery(ValueRecovery);
CachedRecovery* setCachedRecovery(ValueRecovery, CachedRecovery*);
void clearCachedRecovery(ValueRecovery recovery)
{
if (!recovery.isConstant())
setCachedRecovery(recovery, nullptr);
}
CachedRecovery* addCachedRecovery(ValueRecovery recovery)
{
if (recovery.isConstant())
return m_cachedRecoveries.add(recovery);
CachedRecovery* cachedRecovery = getCachedRecovery(recovery);
if (!cachedRecovery)
return setCachedRecovery(recovery, m_cachedRecoveries.add(recovery));
return cachedRecovery;
}
Vector<CachedRecovery*> m_oldFrame;
int numLocals() const
{
return m_oldFrame.size() - CallerFrameAndPC::sizeInRegisters;
}
CachedRecovery* getOld(VirtualRegister reg) const
{
return m_oldFrame[CallerFrameAndPC::sizeInRegisters - reg.offset() - 1];
}
void setOld(VirtualRegister reg, CachedRecovery* cachedRecovery)
{
m_oldFrame[CallerFrameAndPC::sizeInRegisters - reg.offset() - 1] = cachedRecovery;
}
VirtualRegister firstOld() const
{
return VirtualRegister { static_cast<int>(-numLocals()) };
}
VirtualRegister lastOld() const
{
return VirtualRegister { CallerFrameAndPC::sizeInRegisters - 1 };
}
bool isValidOld(VirtualRegister reg) const
{
return reg >= firstOld() && reg <= lastOld();
}
bool m_didExtendFrame { false };
void extendFrameIfNeeded();
Vector<CachedRecovery*> m_newFrame;
size_t argCount() const
{
return m_newFrame.size() - CallFrame::headerSizeInRegisters;
}
CachedRecovery* getNew(VirtualRegister newRegister) const
{
return m_newFrame[newRegister.offset()];
}
void setNew(VirtualRegister newRegister, CachedRecovery* cachedRecovery)
{
m_newFrame[newRegister.offset()] = cachedRecovery;
}
void addNew(VirtualRegister newRegister, ValueRecovery recovery)
{
CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
cachedRecovery->addTarget(newRegister);
setNew(newRegister, cachedRecovery);
}
VirtualRegister firstNew() const
{
return VirtualRegister { 0 };
}
VirtualRegister lastNew() const
{
return VirtualRegister { static_cast<int>(m_newFrame.size()) - 1 };
}
bool isValidNew(VirtualRegister reg) const
{
return reg >= firstNew() && reg <= lastNew();
}
int m_alignedOldFrameSize;
int m_alignedNewFrameSize;
int m_frameDelta;
VirtualRegister newAsOld(VirtualRegister reg) const
{
return reg - m_frameDelta;
}
mutable RegisterSet m_lockedRegisters;
RegisterMap<CachedRecovery*> m_registers;
#if USE(JSVALUE64)
mutable GPRReg m_tagTypeNumber;
bool tryAcquireTagTypeNumber();
#endif
RegisterMap<CachedRecovery*> m_newRegisters;
template<typename CheckFunctor>
Reg getFreeRegister(const CheckFunctor& check) const
{
Reg nonTemp { };
for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
if (m_lockedRegisters.get(reg))
continue;
if (!check(reg))
continue;
if (!m_registers[reg]) {
if (!m_newRegisters[reg])
return reg;
if (!nonTemp)
nonTemp = reg;
}
}
#if USE(JSVALUE64)
if (!nonTemp && m_tagTypeNumber != InvalidGPRReg && check(Reg { m_tagTypeNumber })) {
ASSERT(m_lockedRegisters.get(m_tagTypeNumber));
m_lockedRegisters.clear(m_tagTypeNumber);
nonTemp = Reg { m_tagTypeNumber };
m_tagTypeNumber = InvalidGPRReg;
}
#endif
return nonTemp;
}
GPRReg getFreeTempGPR() const
{
Reg freeTempGPR { getFreeRegister([this] (Reg reg) { return reg.isGPR() && !m_newRegisters[reg]; }) };
if (!freeTempGPR)
return InvalidGPRReg;
return freeTempGPR.gpr();
}
GPRReg getFreeGPR() const
{
Reg freeGPR { getFreeRegister([] (Reg reg) { return reg.isGPR(); }) };
if (!freeGPR)
return InvalidGPRReg;
return freeGPR.gpr();
}
FPRReg getFreeFPR() const
{
Reg freeFPR { getFreeRegister([] (Reg reg) { return reg.isFPR(); }) };
if (!freeFPR)
return InvalidFPRReg;
return freeFPR.fpr();
}
bool hasFreeRegister() const
{
return static_cast<bool>(getFreeRegister([] (Reg) { return true; }));
}
template<typename CheckFunctor>
void ensureRegister(const CheckFunctor& check)
{
for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
if (m_lockedRegisters.get(reg))
continue;
CachedRecovery* cachedRecovery { m_newRegisters[reg] };
if (!cachedRecovery)
continue;
if (check(*cachedRecovery)) {
if (verbose)
dataLog(" ", cachedRecovery->recovery(), " looks like a good spill candidate\n");
spill(*cachedRecovery);
return;
}
}
for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1) {
CachedRecovery* cachedRecovery { getNew(reg) };
if (!cachedRecovery)
continue;
if (check(*cachedRecovery)) {
spill(*cachedRecovery);
return;
}
}
RELEASE_ASSERT_NOT_REACHED();
}
void ensureRegister()
{
if (hasFreeRegister())
return;
if (verbose)
dataLog(" Finding a register to spill\n");
ensureRegister(
[this] (const CachedRecovery& cachedRecovery) {
if (cachedRecovery.recovery().isInGPR())
return !m_lockedRegisters.get(cachedRecovery.recovery().gpr());
if (cachedRecovery.recovery().isInFPR())
return !m_lockedRegisters.get(cachedRecovery.recovery().fpr());
#if USE(JSVALUE32_64)
if (cachedRecovery.recovery().technique() == InPair) {
return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
&& !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR());
}
#endif
return false;
});
}
void ensureTempGPR()
{
if (getFreeTempGPR() != InvalidGPRReg)
return;
if (verbose)
dataLog(" Finding a temp GPR to spill\n");
ensureRegister(
[this] (const CachedRecovery& cachedRecovery) {
if (cachedRecovery.recovery().isInGPR()) {
return !m_lockedRegisters.get(cachedRecovery.recovery().gpr())
&& !m_newRegisters[cachedRecovery.recovery().gpr()];
}
#if USE(JSVALUE32_64)
if (cachedRecovery.recovery().technique() == InPair) {
return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
&& !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR())
&& !m_newRegisters[cachedRecovery.recovery().tagGPR()]
&& !m_newRegisters[cachedRecovery.recovery().payloadGPR()];
}
#endif
return false;
});
}
void ensureGPR()
{
if (getFreeGPR() != InvalidGPRReg)
return;
if (verbose)
dataLog(" Finding a GPR to spill\n");
ensureRegister(
[this] (const CachedRecovery& cachedRecovery) {
if (cachedRecovery.recovery().isInGPR())
return !m_lockedRegisters.get(cachedRecovery.recovery().gpr());
#if USE(JSVALUE32_64)
if (cachedRecovery.recovery().technique() == InPair) {
return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
&& !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR());
}
#endif
return false;
});
}
void ensureFPR()
{
if (getFreeFPR() != InvalidFPRReg)
return;
if (verbose)
dataLog(" Finding an FPR to spill\n");
ensureRegister(
[this] (const CachedRecovery& cachedRecovery) {
if (cachedRecovery.recovery().isInFPR())
return !m_lockedRegisters.get(cachedRecovery.recovery().fpr());
return false;
});
}
CachedRecovery* getNew(JSValueRegs jsValueRegs) const
{
#if USE(JSVALUE64)
return m_newRegisters[jsValueRegs.gpr()];
#else
ASSERT(
jsValueRegs.tagGPR() == InvalidGPRReg || jsValueRegs.payloadGPR() == InvalidGPRReg
|| m_newRegisters[jsValueRegs.payloadGPR()] == m_newRegisters[jsValueRegs.tagGPR()]);
if (jsValueRegs.payloadGPR() == InvalidGPRReg)
return m_newRegisters[jsValueRegs.tagGPR()];
return m_newRegisters[jsValueRegs.payloadGPR()];
#endif
}
void addNew(JSValueRegs jsValueRegs, ValueRecovery recovery)
{
ASSERT(jsValueRegs && !getNew(jsValueRegs));
CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
#if USE(JSVALUE64)
if (cachedRecovery->wantedJSValueRegs())
m_newRegisters[cachedRecovery->wantedJSValueRegs().gpr()] = nullptr;
m_newRegisters[jsValueRegs.gpr()] = cachedRecovery;
#else
if (JSValueRegs oldRegs { cachedRecovery->wantedJSValueRegs() }) {
if (oldRegs.payloadGPR())
m_newRegisters[oldRegs.payloadGPR()] = nullptr;
if (oldRegs.tagGPR())
m_newRegisters[oldRegs.tagGPR()] = nullptr;
}
if (jsValueRegs.payloadGPR() != InvalidGPRReg)
m_newRegisters[jsValueRegs.payloadGPR()] = cachedRecovery;
if (jsValueRegs.tagGPR() != InvalidGPRReg)
m_newRegisters[jsValueRegs.tagGPR()] = cachedRecovery;
#endif
ASSERT(!cachedRecovery->wantedJSValueRegs());
cachedRecovery->setWantedJSValueRegs(jsValueRegs);
}
void addNew(FPRReg fpr, ValueRecovery recovery)
{
ASSERT(fpr != InvalidFPRReg && !m_newRegisters[fpr]);
CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
m_newRegisters[fpr] = cachedRecovery;
ASSERT(cachedRecovery->wantedFPR() == InvalidFPRReg);
cachedRecovery->setWantedFPR(fpr);
}
GPRReg m_oldFrameBase { MacroAssembler::framePointerRegister };
int m_oldFrameOffset { 0 };
MacroAssembler::Address addressForOld(VirtualRegister reg) const
{
return MacroAssembler::Address(m_oldFrameBase,
(m_oldFrameOffset + reg.offset()) * sizeof(Register));
}
GPRReg m_newFrameBase { InvalidGPRReg };
int m_newFrameOffset { 0};
bool isUndecided() const
{
return m_newFrameBase == InvalidGPRReg;
}
bool isSlowPath() const
{
return m_newFrameBase == MacroAssembler::stackPointerRegister;
}
MacroAssembler::Address addressForNew(VirtualRegister reg) const
{
return MacroAssembler::Address(m_newFrameBase,
(m_newFrameOffset + reg.offset()) * sizeof(Register));
}
VirtualRegister m_dangerFrontier;
VirtualRegister dangerFrontier() const
{
ASSERT(!isUndecided());
return m_dangerFrontier;
}
bool isDangerNew(VirtualRegister reg) const
{
ASSERT(!isUndecided() && isValidNew(reg));
return reg <= dangerFrontier();
}
void updateDangerFrontier()
{
ASSERT(!isUndecided());
m_dangerFrontier = firstNew() - 1;
for (VirtualRegister reg = lastNew(); reg >= firstNew(); reg -= 1) {
if (!getNew(reg) || !isValidOld(newAsOld(reg)) || !getOld(newAsOld(reg)))
continue;
m_dangerFrontier = reg;
if (verbose)
dataLog(" Danger frontier now at NEW ", m_dangerFrontier, "\n");
break;
}
if (verbose)
dataLog(" All clear! Danger zone is empty.\n");
}
bool hasOnlySafeWrites(CachedRecovery& cachedRecovery) const
{
for (VirtualRegister target : cachedRecovery.targets()) {
if (isDangerNew(target))
return false;
}
return true;
}
bool tryWrites(CachedRecovery&);
bool performSafeWrites();
unsigned m_numPassedArgs { UINT_MAX };
};
}
#endif // ENABLE(JIT)