#include "config.h"
#include "DFGJITCode.h"
#if ENABLE(DFG_JIT)
#include "CodeBlock.h"
#include "JSCInlines.h"
#include "TrackedReferences.h"
namespace JSC { namespace DFG {
JITCode::JITCode()
: DirectJITCode(DFGJIT)
#if ENABLE(FTL_JIT)
, osrEntryRetry(0)
, abandonOSREntry(false)
#endif // ENABLE(FTL_JIT)
{
}
JITCode::~JITCode()
{
}
CommonData* JITCode::dfgCommon()
{
return &common;
}
JITCode* JITCode::dfg()
{
return this;
}
void JITCode::shrinkToFit()
{
common.shrinkToFit();
osrEntry.shrinkToFit();
osrExit.shrinkToFit();
speculationRecovery.shrinkToFit();
minifiedDFG.prepareAndShrink();
variableEventStream.shrinkToFit();
}
void JITCode::reconstruct(
CodeBlock* codeBlock, CodeOrigin codeOrigin, unsigned streamIndex,
Operands<ValueRecovery>& result)
{
variableEventStream.reconstruct(
codeBlock, codeOrigin, minifiedDFG, streamIndex, result);
}
void JITCode::reconstruct(
ExecState* exec, CodeBlock* codeBlock, CodeOrigin codeOrigin, unsigned streamIndex,
Operands<JSValue>& result)
{
Operands<ValueRecovery> recoveries;
reconstruct(codeBlock, codeOrigin, streamIndex, recoveries);
result = Operands<JSValue>(OperandsLike, recoveries);
for (size_t i = result.size(); i--;)
result[i] = recoveries[i].recover(exec);
}
#if ENABLE(FTL_JIT)
bool JITCode::checkIfOptimizationThresholdReached(CodeBlock* codeBlock)
{
ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
return tierUpCounter.checkIfThresholdCrossedAndSet(codeBlock->baselineVersion());
}
void JITCode::optimizeNextInvocation(CodeBlock* codeBlock)
{
ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
if (Options::verboseOSR())
dataLog(*codeBlock, ": FTL-optimizing next invocation.\n");
tierUpCounter.setNewThreshold(0, codeBlock->baselineVersion());
}
void JITCode::dontOptimizeAnytimeSoon(CodeBlock* codeBlock)
{
ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
if (Options::verboseOSR())
dataLog(*codeBlock, ": Not FTL-optimizing anytime soon.\n");
tierUpCounter.deferIndefinitely();
}
void JITCode::optimizeAfterWarmUp(CodeBlock* codeBlock)
{
ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
if (Options::verboseOSR())
dataLog(*codeBlock, ": FTL-optimizing after warm-up.\n");
CodeBlock* baseline = codeBlock->baselineVersion();
tierUpCounter.setNewThreshold(
baseline->adjustedCounterValue(Options::thresholdForFTLOptimizeAfterWarmUp()),
baseline);
}
void JITCode::optimizeSoon(CodeBlock* codeBlock)
{
ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
if (Options::verboseOSR())
dataLog(*codeBlock, ": FTL-optimizing soon.\n");
CodeBlock* baseline = codeBlock->baselineVersion();
tierUpCounter.setNewThreshold(
baseline->adjustedCounterValue(Options::thresholdForFTLOptimizeSoon()),
baseline);
}
void JITCode::forceOptimizationSlowPathConcurrently(CodeBlock* codeBlock)
{
ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
if (Options::verboseOSR())
dataLog(*codeBlock, ": Forcing slow path concurrently for FTL entry.\n");
tierUpCounter.forceSlowPathConcurrently();
}
void JITCode::setOptimizationThresholdBasedOnCompilationResult(
CodeBlock* codeBlock, CompilationResult result)
{
ASSERT(codeBlock->jitType() == JITCode::DFGJIT);
switch (result) {
case CompilationSuccessful:
optimizeNextInvocation(codeBlock);
codeBlock->baselineVersion()->m_hasBeenCompiledWithFTL = true;
return;
case CompilationFailed:
dontOptimizeAnytimeSoon(codeBlock);
codeBlock->baselineVersion()->m_didFailFTLCompilation = true;
return;
case CompilationDeferred:
optimizeAfterWarmUp(codeBlock);
return;
case CompilationInvalidated:
codeBlock->baselineVersion()->countReoptimization();
optimizeAfterWarmUp(codeBlock);
return;
}
RELEASE_ASSERT_NOT_REACHED();
}
#endif // ENABLE(FTL_JIT)
void JITCode::validateReferences(const TrackedReferences& trackedReferences)
{
common.validateReferences(trackedReferences);
for (OSREntryData& entry : osrEntry) {
for (unsigned i = entry.m_expectedValues.size(); i--;)
entry.m_expectedValues[i].validateReferences(trackedReferences);
}
minifiedDFG.validateReferences(trackedReferences);
}
} }
#endif // ENABLE(DFG_JIT)