DFGOSRExitCompilerCommon.cpp [plain text]
#include "config.h"
#include "DFGOSRExitCompilerCommon.h"
#if ENABLE(DFG_JIT)
#include "DFGJITCode.h"
#include "DFGOperations.h"
#include "JIT.h"
#include "JSCJSValueInlines.h"
#include "JSCInlines.h"
#include "StructureStubInfo.h"
namespace JSC { namespace DFG {
void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit)
{
jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
if (!exitKindMayJettison(exit.m_kind))
return;
jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT0);
AssemblyHelpers::Jump tooFewFails;
jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2);
jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()));
jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0);
AssemblyHelpers::Jump reoptimizeNow = jit.branch32(
AssemblyHelpers::GreaterThanOrEqual,
AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()),
AssemblyHelpers::TrustedImm32(0));
AssemblyHelpers::JumpList loopThreshold;
for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->directCaller.inlineCallFrame) {
loopThreshold.append(
jit.branchTest8(
AssemblyHelpers::NonZero,
AssemblyHelpers::AbsoluteAddress(
inlineCallFrame->baselineCodeBlock->ownerScriptExecutable()->addressOfDidTryToEnterInLoop())));
}
jit.move(
AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()),
GPRInfo::regT1);
if (!loopThreshold.empty()) {
AssemblyHelpers::Jump done = jit.jump();
loopThreshold.link(&jit);
jit.move(
AssemblyHelpers::TrustedImm32(
jit.codeBlock()->exitCountThresholdForReoptimizationFromLoop()),
GPRInfo::regT1);
done.link(&jit);
}
tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
reoptimizeNow.link(&jit);
#if !NUMBER_OF_ARGUMENT_REGISTERS
jit.poke(GPRInfo::regT0);
jit.poke(AssemblyHelpers::TrustedImmPtr(&exit), 1);
#else
jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0);
jit.move(AssemblyHelpers::TrustedImmPtr(&exit), GPRInfo::argumentGPR1);
#endif
jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::nonArgGPR0);
jit.call(GPRInfo::nonArgGPR0);
AssemblyHelpers::Jump doneAdjusting = jit.jump();
tooFewFails.link(&jit);
int32_t activeThreshold =
jit.baselineCodeBlock()->adjustedCounterValue(
Options::thresholdForOptimizeAfterLongWarmUp());
int32_t targetValue = applyMemoryUsageHeuristicsAndConvertToInt(
activeThreshold, jit.baselineCodeBlock());
int32_t clippedValue;
switch (jit.codeBlock()->jitType()) {
case JITCode::DFGJIT:
clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
break;
case JITCode::FTLJIT:
clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
clippedValue = 0; #endif
break;
}
jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
jit.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
doneAdjusting.link(&jit);
}
void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
{
ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT);
jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)CallFrameSlot::codeBlock));
const CodeOrigin* codeOrigin;
for (codeOrigin = &exit.m_codeOrigin; codeOrigin && codeOrigin->inlineCallFrame; codeOrigin = codeOrigin->inlineCallFrame->getCallerSkippingTailCalls()) {
InlineCallFrame* inlineCallFrame = codeOrigin->inlineCallFrame;
CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(*codeOrigin);
InlineCallFrame::Kind trueCallerCallKind;
CodeOrigin* trueCaller = inlineCallFrame->getCallerSkippingTailCalls(&trueCallerCallKind);
GPRReg callerFrameGPR = GPRInfo::callFrameRegister;
if (!trueCaller) {
ASSERT(inlineCallFrame->isTail());
jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT3);
jit.storePtr(GPRInfo::regT3, AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
jit.loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()), GPRInfo::regT3);
callerFrameGPR = GPRInfo::regT3;
} else {
CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(*trueCaller);
unsigned callBytecodeIndex = trueCaller->bytecodeIndex;
void* jumpTarget = nullptr;
switch (trueCallerCallKind) {
case InlineCallFrame::Call:
case InlineCallFrame::Construct:
case InlineCallFrame::CallVarargs:
case InlineCallFrame::ConstructVarargs:
case InlineCallFrame::TailCall:
case InlineCallFrame::TailCallVarargs: {
CallLinkInfo* callLinkInfo =
baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
RELEASE_ASSERT(callLinkInfo);
jumpTarget = callLinkInfo->callReturnLocation().executableAddress();
break;
}
case InlineCallFrame::GetterCall:
case InlineCallFrame::SetterCall: {
StructureStubInfo* stubInfo =
baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex));
RELEASE_ASSERT(stubInfo);
jumpTarget = stubInfo->doneLocation().executableAddress();
break;
}
default:
RELEASE_ASSERT_NOT_REACHED();
}
if (trueCaller->inlineCallFrame) {
jit.addPtr(
AssemblyHelpers::TrustedImm32(trueCaller->inlineCallFrame->stackOffset * sizeof(EncodedJSValue)),
GPRInfo::callFrameRegister,
GPRInfo::regT3);
callerFrameGPR = GPRInfo::regT3;
}
jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
}
jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::codeBlock)));
jit.emitSaveOrCopyCalleeSavesFor(
baselineCodeBlock,
static_cast<VirtualRegister>(inlineCallFrame->stackOffset),
trueCaller ? AssemblyHelpers::UseExistingTagRegisterContents : AssemblyHelpers::CopyBaselineCalleeSavedRegistersFromBaseFrame,
GPRInfo::regT2);
if (!inlineCallFrame->isVarargs())
jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)));
#if USE(JSVALUE64)
jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits();
jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)));
if (!inlineCallFrame->isClosureCall)
jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
#else // USE(JSVALUE64) // so this is the 32-bit part
jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin->bytecodeIndex;
uint32_t locationBits = CallSiteIndex(instruction).bits();
jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount)));
jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
if (!inlineCallFrame->isClosureCall)
jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + CallFrameSlot::callee)));
#endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
}
if (codeOrigin) {
#if USE(JSVALUE64)
uint32_t locationBits = CallSiteIndex(codeOrigin->bytecodeIndex).bits();
#else
Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin->bytecodeIndex;
uint32_t locationBits = CallSiteIndex(instruction).bits();
#endif
jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(CallFrameSlot::argumentCount)));
}
}
static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch)
{
AssemblyHelpers::Jump ownerIsRememberedOrInEden = jit.barrierBranchWithoutFence(owner);
#if CPU(X86)
jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
#endif
jit.setupArgumentsWithExecState(owner);
jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch);
jit.call(scratch);
#if CPU(X86)
jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
#endif
ownerIsRememberedOrInEden.link(&jit);
}
void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit)
{
jit.memoryFence();
jit.move(
AssemblyHelpers::TrustedImmPtr(
jit.codeBlock()->baselineAlternative()), GPRInfo::argumentGPR1);
osrWriteBarrier(jit, GPRInfo::argumentGPR1, GPRInfo::nonArgGPR0);
InlineCallFrameSet* inlineCallFrames = jit.codeBlock()->jitCode()->dfgCommon()->inlineCallFrames.get();
if (inlineCallFrames) {
for (InlineCallFrame* inlineCallFrame : *inlineCallFrames) {
jit.move(
AssemblyHelpers::TrustedImmPtr(
inlineCallFrame->baselineCodeBlock.get()), GPRInfo::argumentGPR1);
osrWriteBarrier(jit, GPRInfo::argumentGPR1, GPRInfo::nonArgGPR0);
}
}
if (exit.m_codeOrigin.inlineCallFrame)
jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
CodeBlock* codeBlockForExit = jit.baselineCodeBlockFor(exit.m_codeOrigin);
Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(codeBlockForExit);
BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
ASSERT(mapping);
ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
void* jumpTarget = codeBlockForExit->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(codeBlockForExit) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister);
if (exit.isExceptionHandler()) {
jit.storePtr(GPRInfo::callFrameRegister, jit.vm()->addressOfCallFrameForCatch());
}
jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
jit.jump(GPRInfo::regT2);
}
} }
#endif // ENABLE(DFG_JIT)