CallFrameShuffler.cpp [plain text]
#include "config.h"
#include "CallFrameShuffler.h"
#if ENABLE(JIT)
#include "CachedRecovery.h"
#include "CCallHelpers.h"
#include "CodeBlock.h"
namespace JSC {
CallFrameShuffler::CallFrameShuffler(CCallHelpers& jit, const CallFrameShuffleData& data)
: m_jit(jit)
, m_oldFrame(data.numLocals + CallerFrameAndPC::sizeInRegisters, nullptr)
, m_newFrame(data.args.size() + CallFrame::headerSizeInRegisters, nullptr)
, m_alignedOldFrameSize(CallFrame::headerSizeInRegisters
+ roundArgumentCountToAlignFrame(jit.codeBlock()->numParameters()))
, m_alignedNewFrameSize(CallFrame::headerSizeInRegisters
+ roundArgumentCountToAlignFrame(data.args.size()))
, m_frameDelta(m_alignedNewFrameSize - m_alignedOldFrameSize)
, m_lockedRegisters(RegisterSet::allRegisters())
, m_numPassedArgs(data.numPassedArgs)
{
for (unsigned i = GPRInfo::numberOfRegisters; i--; )
m_lockedRegisters.clear(GPRInfo::toRegister(i));
for (unsigned i = FPRInfo::numberOfRegisters; i--; )
m_lockedRegisters.clear(FPRInfo::toRegister(i));
m_lockedRegisters.exclude(RegisterSet::vmCalleeSaveRegisters());
ASSERT(!data.callee.isInJSStack() || data.callee.virtualRegister().isLocal());
addNew(VirtualRegister(CallFrameSlot::callee), data.callee);
for (size_t i = 0; i < data.args.size(); ++i) {
ASSERT(!data.args[i].isInJSStack() || data.args[i].virtualRegister().isLocal());
addNew(virtualRegisterForArgument(i), data.args[i]);
}
#if USE(JSVALUE64)
for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
if (!data.registers[reg].isSet())
continue;
if (reg.isGPR())
addNew(JSValueRegs(reg.gpr()), data.registers[reg]);
else
addNew(reg.fpr(), data.registers[reg]);
}
m_tagTypeNumber = data.tagTypeNumber;
if (m_tagTypeNumber != InvalidGPRReg)
lockGPR(m_tagTypeNumber);
#endif
}
void CallFrameShuffler::dump(PrintStream& out) const
{
static const char* delimiter = " +-------------------------------+ ";
static const char* dangerDelimiter = " X-------------------------------X ";
static const char* dangerBoundsDelimiter = " XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ";
static const char* emptySpace = " ";
out.print(" ");
out.print(" Old frame ");
out.print(" New frame ");
out.print("\n");
int totalSize = m_alignedOldFrameSize + std::max(numLocals(), m_alignedNewFrameSize) + 3;
for (int i = 0; i < totalSize; ++i) {
VirtualRegister old { m_alignedOldFrameSize - i - 1 };
VirtualRegister newReg { old + m_frameDelta };
if (!isValidOld(old) && old != firstOld() - 1
&& !isValidNew(newReg) && newReg != firstNew() - 1)
continue;
out.print(" ");
if (dangerFrontier() >= firstNew()
&& (newReg == dangerFrontier() || newReg == firstNew() - 1))
out.print(dangerBoundsDelimiter);
else if (isValidOld(old))
out.print(isValidNew(newReg) && isDangerNew(newReg) ? dangerDelimiter : delimiter);
else if (old == firstOld() - 1)
out.print(delimiter);
else
out.print(emptySpace);
if (dangerFrontier() >= firstNew()
&& (newReg == dangerFrontier() || newReg == firstNew() - 1))
out.print(dangerBoundsDelimiter);
else if (isValidNew(newReg) || newReg == firstNew() - 1)
out.print(isDangerNew(newReg) ? dangerDelimiter : delimiter);
else
out.print(emptySpace);
out.print("\n");
if (old == firstOld())
out.print(" sp --> ");
else if (!old.offset())
out.print(" fp --> ");
else
out.print(" ");
if (isValidOld(old)) {
if (getOld(old)) {
auto str = toCString(old);
if (isValidNew(newReg) && isDangerNew(newReg))
out.printf(" X %18s X ", str.data());
else
out.printf(" | %18s | ", str.data());
} else if (isValidNew(newReg) && isDangerNew(newReg))
out.printf(" X%30s X ", "");
else
out.printf(" |%30s | ", "");
} else
out.print(emptySpace);
if (isValidNew(newReg)) {
const char d = isDangerNew(newReg) ? 'X' : '|';
auto str = toCString(newReg);
if (getNew(newReg)) {
if (getNew(newReg)->recovery().isConstant())
out.printf(" %c%8s <- constant %c ", d, str.data(), d);
else {
auto recoveryStr = toCString(getNew(newReg)->recovery());
out.printf(" %c%8s <- %18s %c ", d, str.data(),
recoveryStr.data(), d);
}
} else if (newReg == VirtualRegister { CallFrameSlot::argumentCount })
out.printf(" %c%8s <- %18zu %c ", d, str.data(), argCount(), d);
else
out.printf(" %c%30s %c ", d, "", d);
} else
out.print(emptySpace);
if (newReg == firstNew() - m_newFrameOffset && !isSlowPath())
out.print(" <-- new sp before jump (current ", m_newFrameBase, ") ");
if (newReg == firstNew())
out.print(" <-- new fp after prologue");
out.print("\n");
}
out.print(" ");
out.print(" Live registers ");
out.print(" Wanted registers ");
out.print("\n");
for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
CachedRecovery* oldCachedRecovery { m_registers[reg] };
CachedRecovery* newCachedRecovery { m_newRegisters[reg] };
if (!oldCachedRecovery && !newCachedRecovery)
continue;
out.print(" ");
if (oldCachedRecovery) {
auto str = toCString(reg);
out.printf(" %8s ", str.data());
} else
out.print(emptySpace);
#if USE(JSVALUE32_64)
if (newCachedRecovery) {
JSValueRegs wantedJSValueRegs { newCachedRecovery->wantedJSValueRegs() };
if (reg.isFPR())
out.print(reg, " <- ", newCachedRecovery->recovery());
else {
if (reg.gpr() == wantedJSValueRegs.tagGPR())
out.print(reg.gpr(), " <- tag(", newCachedRecovery->recovery(), ")");
else
out.print(reg.gpr(), " <- payload(", newCachedRecovery->recovery(), ")");
}
}
#else
if (newCachedRecovery)
out.print(" ", reg, " <- ", newCachedRecovery->recovery());
#endif
out.print("\n");
}
out.print(" Locked registers: ");
bool firstLocked { true };
for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
if (m_lockedRegisters.get(reg)) {
out.print(firstLocked ? "" : ", ", reg);
firstLocked = false;
}
}
out.print("\n");
if (isSlowPath())
out.print(" Using fp-relative addressing for slow path call\n");
else
out.print(" Using sp-relative addressing for jump (using ", m_newFrameBase, " as new sp)\n");
if (m_oldFrameOffset)
out.print(" Old frame offset is ", m_oldFrameOffset, "\n");
if (m_newFrameOffset)
out.print(" New frame offset is ", m_newFrameOffset, "\n");
#if USE(JSVALUE64)
if (m_tagTypeNumber != InvalidGPRReg)
out.print(" TagTypeNumber is currently in ", m_tagTypeNumber, "\n");
#endif
}
CachedRecovery* CallFrameShuffler::getCachedRecovery(ValueRecovery recovery)
{
ASSERT(!recovery.isConstant());
if (recovery.isInGPR())
return m_registers[recovery.gpr()];
if (recovery.isInFPR())
return m_registers[recovery.fpr()];
#if USE(JSVALUE32_64)
if (recovery.technique() == InPair) {
ASSERT(m_registers[recovery.tagGPR()] == m_registers[recovery.payloadGPR()]);
return m_registers[recovery.payloadGPR()];
}
#endif
ASSERT(recovery.isInJSStack());
return getOld(recovery.virtualRegister());
}
CachedRecovery* CallFrameShuffler::setCachedRecovery(ValueRecovery recovery, CachedRecovery* cachedRecovery)
{
ASSERT(!recovery.isConstant());
if (recovery.isInGPR())
return m_registers[recovery.gpr()] = cachedRecovery;
if (recovery.isInFPR())
return m_registers[recovery.fpr()] = cachedRecovery;
#if USE(JSVALUE32_64)
if (recovery.technique() == InPair) {
m_registers[recovery.tagGPR()] = cachedRecovery;
return m_registers[recovery.payloadGPR()] = cachedRecovery;
}
#endif
ASSERT(recovery.isInJSStack());
setOld(recovery.virtualRegister(), cachedRecovery);
return cachedRecovery;
}
void CallFrameShuffler::spill(CachedRecovery& cachedRecovery)
{
ASSERT(!isSlowPath());
ASSERT(cachedRecovery.recovery().isInRegisters());
VirtualRegister spillSlot { 0 };
for (VirtualRegister slot = firstOld(); slot <= lastOld(); slot += 1) {
if (slot >= newAsOld(firstNew()))
break;
if (getOld(slot))
continue;
spillSlot = slot;
break;
}
if (spillSlot >= newAsOld(firstNew()) || !spillSlot.isLocal()) {
RELEASE_ASSERT(!m_didExtendFrame);
extendFrameIfNeeded();
spill(cachedRecovery);
return;
}
if (verbose)
dataLog(" * Spilling ", cachedRecovery.recovery(), " into ", spillSlot, "\n");
auto format = emitStore(cachedRecovery, addressForOld(spillSlot));
ASSERT(format != DataFormatNone);
updateRecovery(cachedRecovery, ValueRecovery::displacedInJSStack(spillSlot, format));
}
void CallFrameShuffler::emitDeltaCheck()
{
if (ASSERT_DISABLED)
return;
GPRReg scratchGPR { getFreeGPR() };
if (scratchGPR != InvalidGPRReg) {
if (verbose)
dataLog(" Using ", scratchGPR, " for the fp-sp delta check\n");
m_jit.move(MacroAssembler::stackPointerRegister, scratchGPR);
m_jit.subPtr(GPRInfo::callFrameRegister, scratchGPR);
MacroAssembler::Jump ok = m_jit.branch32(
MacroAssembler::Equal, scratchGPR,
MacroAssembler::TrustedImm32(-numLocals() * sizeof(Register)));
m_jit.abortWithReason(JITUnexpectedCallFrameSize);
ok.link(&m_jit);
} else if (verbose)
dataLog(" Skipping the fp-sp delta check since there is too much pressure");
}
void CallFrameShuffler::extendFrameIfNeeded()
{
ASSERT(!m_didExtendFrame);
VirtualRegister firstRead { firstOld() };
for (; firstRead <= virtualRegisterForLocal(0); firstRead += 1) {
if (getOld(firstRead))
break;
}
size_t availableSize = static_cast<size_t>(firstRead.offset() - firstOld().offset());
size_t wantedSize = m_newFrame.size() + m_newFrameOffset;
if (availableSize < wantedSize) {
size_t delta = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), wantedSize - availableSize);
m_oldFrame.grow(m_oldFrame.size() + delta);
for (size_t i = 0; i < delta; ++i)
m_oldFrame[m_oldFrame.size() - i - 1] = nullptr;
m_jit.subPtr(MacroAssembler::TrustedImm32(delta * sizeof(Register)), MacroAssembler::stackPointerRegister);
if (isSlowPath())
m_frameDelta = numLocals() + CallerFrameAndPC::sizeInRegisters;
else
m_oldFrameOffset = numLocals();
if (verbose)
dataLogF(" Not enough space - extending the old frame %zu slot\n", delta);
}
m_didExtendFrame = true;
}
void CallFrameShuffler::prepareForSlowPath()
{
ASSERT(isUndecided());
emitDeltaCheck();
m_frameDelta = numLocals() + CallerFrameAndPC::sizeInRegisters;
m_newFrameBase = MacroAssembler::stackPointerRegister;
m_newFrameOffset = -CallerFrameAndPC::sizeInRegisters;
if (verbose)
dataLog("\n\nPreparing frame for slow path call:\n");
extendFrameIfNeeded();
if (verbose)
dataLog(*this);
prepareAny();
if (verbose)
dataLog("Ready for slow path call!\n");
}
void CallFrameShuffler::prepareForTailCall()
{
ASSERT(isUndecided());
emitDeltaCheck();
m_oldFrameBase = MacroAssembler::stackPointerRegister;
m_oldFrameOffset = numLocals();
m_newFrameBase = acquireGPR();
#if CPU(X86)
addNew(VirtualRegister { 0 },
ValueRecovery::displacedInJSStack(VirtualRegister(0), DataFormatJS));
m_newFrameOffset = 0;
#elif CPU(ARM) || CPU(MIPS)
m_newFrameOffset = -1;
#elif CPU(ARM64)
m_newFrameOffset = -2;
#elif CPU(X86_64)
addNew(VirtualRegister { 1 },
ValueRecovery::displacedInJSStack(VirtualRegister(1), DataFormatJS));
m_newFrameOffset = -1;
#else
UNREACHABLE_FOR_PLATFORM();
#endif
if (verbose)
dataLog(" Emitting code for computing the new frame base\n");
m_jit.load32(MacroAssembler::Address(GPRInfo::callFrameRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), m_newFrameBase);
MacroAssembler::Jump argumentCountOK =
m_jit.branch32(MacroAssembler::BelowOrEqual, m_newFrameBase,
MacroAssembler::TrustedImm32(m_jit.codeBlock()->numParameters()));
m_jit.add32(MacroAssembler::TrustedImm32(stackAlignmentRegisters() - 1 + CallFrame::headerSizeInRegisters), m_newFrameBase);
m_jit.and32(MacroAssembler::TrustedImm32(-stackAlignmentRegisters()), m_newFrameBase);
m_jit.mul32(MacroAssembler::TrustedImm32(sizeof(Register)), m_newFrameBase, m_newFrameBase);
MacroAssembler::Jump done = m_jit.jump();
argumentCountOK.link(&m_jit);
m_jit.move(
MacroAssembler::TrustedImm32(m_alignedOldFrameSize * sizeof(Register)),
m_newFrameBase);
done.link(&m_jit);
m_jit.addPtr(GPRInfo::callFrameRegister, m_newFrameBase);
m_jit.subPtr(
MacroAssembler::TrustedImm32(
(m_alignedNewFrameSize + m_newFrameOffset) * sizeof(Register)),
m_newFrameBase);
#if CPU(ARM) || CPU(ARM64)
m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister, sizeof(void*)),
MacroAssembler::linkRegister);
#elif CPU(MIPS)
m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister, sizeof(void*)),
MacroAssembler::returnAddressRegister);
#endif
m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister),
MacroAssembler::framePointerRegister);
if (verbose)
dataLog("Preparing frame for tail call:\n", *this);
prepareAny();
#if CPU(X86)
if (verbose)
dataLog(" Simulating pop of the call frame register\n");
m_jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*)), MacroAssembler::stackPointerRegister);
#endif
if (verbose)
dataLog("Ready for tail call!\n");
}
bool CallFrameShuffler::tryWrites(CachedRecovery& cachedRecovery)
{
ASSERT(m_newFrameBase != InvalidGPRReg);
if (isSlowPath() && cachedRecovery.recovery().isInJSStack()
&& cachedRecovery.targets().size() == 1
&& newAsOld(cachedRecovery.targets()[0]) == cachedRecovery.recovery().virtualRegister()) {
cachedRecovery.clearTargets();
if (!cachedRecovery.wantedJSValueRegs() && cachedRecovery.wantedFPR() == InvalidFPRReg)
clearCachedRecovery(cachedRecovery.recovery());
return true;
}
if (!canLoadAndBox(cachedRecovery))
return false;
emitLoad(cachedRecovery);
emitBox(cachedRecovery);
ASSERT(cachedRecovery.recovery().isInRegisters()
|| cachedRecovery.recovery().isConstant());
if (verbose)
dataLog(" * Storing ", cachedRecovery.recovery());
for (size_t i = 0; i < cachedRecovery.targets().size(); ++i) {
VirtualRegister target { cachedRecovery.targets()[i] };
ASSERT(!isDangerNew(target));
if (verbose)
dataLog(!i ? " into " : ", and ", "NEW ", target);
emitStore(cachedRecovery, addressForNew(target));
setNew(target, nullptr);
}
if (verbose)
dataLog("\n");
cachedRecovery.clearTargets();
if (!cachedRecovery.wantedJSValueRegs() && cachedRecovery.wantedFPR() == InvalidFPRReg)
clearCachedRecovery(cachedRecovery.recovery());
return true;
}
bool CallFrameShuffler::performSafeWrites()
{
VirtualRegister firstSafe;
VirtualRegister end { lastNew() + 1 };
Vector<VirtualRegister> failures;
do {
firstSafe = dangerFrontier() + 1;
if (verbose)
dataLog(" Trying safe writes (between NEW ", firstSafe, " and NEW ", end - 1, ")\n");
bool didProgress = false;
for (VirtualRegister reg = firstSafe; reg < end; reg += 1) {
CachedRecovery* cachedRecovery = getNew(reg);
if (!cachedRecovery) {
if (verbose)
dataLog(" + ", reg, " is OK.\n");
continue;
}
if (!hasOnlySafeWrites(*cachedRecovery)) {
if (verbose) {
dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg,
" but also has dangerous writes.\n");
}
continue;
}
if (cachedRecovery->wantedJSValueRegs()) {
if (verbose) {
dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg,
" but is also needed in registers.\n");
}
continue;
}
if (cachedRecovery->wantedFPR() != InvalidFPRReg) {
if (verbose) {
dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg,
" but is also needed in an FPR.\n");
}
continue;
}
if (!tryWrites(*cachedRecovery)) {
if (verbose)
dataLog(" - Unable to write to NEW ", reg, " from ", cachedRecovery->recovery(), "\n");
failures.append(reg);
}
didProgress = true;
}
end = firstSafe;
if (didProgress && hasFreeRegister()) {
Vector<VirtualRegister> stillFailing;
for (VirtualRegister failed : failures) {
CachedRecovery* cachedRecovery = getNew(failed);
if (!cachedRecovery)
continue;
ASSERT(hasOnlySafeWrites(*cachedRecovery)
&& !cachedRecovery->wantedJSValueRegs()
&& cachedRecovery->wantedFPR() == InvalidFPRReg);
if (!tryWrites(*cachedRecovery))
stillFailing.append(failed);
}
failures = WTFMove(stillFailing);
}
if (verbose && firstSafe != dangerFrontier() + 1)
dataLog(" We freed up danger slots!\n");
} while (firstSafe != dangerFrontier() + 1);
return failures.isEmpty();
}
void CallFrameShuffler::prepareAny()
{
ASSERT(!isUndecided());
updateDangerFrontier();
performSafeWrites();
for (VirtualRegister reg = dangerFrontier(); reg >= firstNew(); reg -= 1) {
if (reg == dangerFrontier()) {
if (verbose)
dataLog(" Next slot (NEW ", reg, ") is the danger frontier\n");
CachedRecovery* cachedRecovery { getOld(newAsOld(dangerFrontier())) };
ASSERT(cachedRecovery);
ensureLoad(*cachedRecovery);
emitLoad(*cachedRecovery);
ensureBox(*cachedRecovery);
emitBox(*cachedRecovery);
if (hasOnlySafeWrites(*cachedRecovery))
tryWrites(*cachedRecovery);
} else if (verbose)
dataLog(" Next slot is NEW ", reg, "\n");
ASSERT(!isDangerNew(reg));
CachedRecovery* cachedRecovery = getNew(reg);
if (!cachedRecovery) {
if (verbose)
dataLog(" + ", reg, " is OK\n");
continue;
}
if (canLoadAndBox(*cachedRecovery) && hasOnlySafeWrites(*cachedRecovery)
&& !cachedRecovery->wantedJSValueRegs()
&& cachedRecovery->wantedFPR() == InvalidFPRReg) {
emitLoad(*cachedRecovery);
emitBox(*cachedRecovery);
bool writesOK = tryWrites(*cachedRecovery);
ASSERT_UNUSED(writesOK, writesOK);
} else if (verbose)
dataLog(" - ", cachedRecovery->recovery(), " can't be handled just yet.\n");
}
ASSERT(dangerFrontier() < firstNew());
if (verbose)
dataLog(" Danger zone is clear, performing remaining writes.\n");
for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1) {
CachedRecovery* cachedRecovery { getNew(reg) };
if (!cachedRecovery)
continue;
emitLoad(*cachedRecovery);
emitBox(*cachedRecovery);
bool writesOK = tryWrites(*cachedRecovery);
ASSERT_UNUSED(writesOK, writesOK);
}
#if USE(JSVALUE64)
if (m_tagTypeNumber != InvalidGPRReg && m_newRegisters[m_tagTypeNumber])
releaseGPR(m_tagTypeNumber);
#endif
if (verbose)
dataLog(" Loading wanted registers into registers\n");
for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
CachedRecovery* cachedRecovery { m_newRegisters[reg] };
if (!cachedRecovery)
continue;
emitLoad(*cachedRecovery);
emitBox(*cachedRecovery);
ASSERT(cachedRecovery->targets().isEmpty());
}
#if USE(JSVALUE64)
if (m_tagTypeNumber != InvalidGPRReg)
releaseGPR(m_tagTypeNumber);
#endif
if (verbose)
dataLog(" Callee frame is fully set up\n");
if (!ASSERT_DISABLED) {
for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1)
ASSERT_UNUSED(reg, !getNew(reg));
for (CachedRecovery* cachedRecovery : m_cachedRecoveries) {
ASSERT_UNUSED(cachedRecovery, cachedRecovery->targets().isEmpty());
ASSERT(!cachedRecovery->recovery().isInJSStack());
}
}
if (verbose)
dataLog(" * Storing the argument count into ", VirtualRegister { CallFrameSlot::argumentCount }, "\n");
m_jit.store32(MacroAssembler::TrustedImm32(0),
addressForNew(VirtualRegister { CallFrameSlot::argumentCount }).withOffset(TagOffset));
RELEASE_ASSERT(m_numPassedArgs != UINT_MAX);
m_jit.store32(MacroAssembler::TrustedImm32(m_numPassedArgs),
addressForNew(VirtualRegister { CallFrameSlot::argumentCount }).withOffset(PayloadOffset));
if (!isSlowPath()) {
ASSERT(m_newFrameBase != MacroAssembler::stackPointerRegister);
if (verbose)
dataLog(" Releasing the new frame base pointer\n");
m_jit.move(m_newFrameBase, MacroAssembler::stackPointerRegister);
releaseGPR(m_newFrameBase);
}
if (verbose)
dataLog(" Ensuring wanted registers are in the right register\n");
for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
CachedRecovery* cachedRecovery { m_newRegisters[reg] };
if (!cachedRecovery)
continue;
emitDisplace(*cachedRecovery);
}
}
}
#endif // ENABLE(JIT)