LowLevelInterpreter.cpp [plain text]
#include "config.h"
#include "LowLevelInterpreter.h"
#include "LLIntOfflineAsmConfig.h"
#include <wtf/InlineASM.h>
#if ENABLE(C_LOOP)
#include "Bytecodes.h"
#include "CLoopStackInlines.h"
#include "CodeBlock.h"
#include "CommonSlowPaths.h"
#include "Interpreter.h"
#include "LLIntCLoop.h"
#include "LLIntData.h"
#include "LLIntSlowPaths.h"
#include "JSCInlines.h"
#include <wtf/Assertions.h>
#include <wtf/MathExtras.h>
using namespace JSC::LLInt;
#if ENABLE(UNIFIED_AND_FREEZABLE_CONFIG_RECORD)
using WebConfig::g_config;
#endif
#define OFFLINE_ASM_BEGIN
#define OFFLINE_ASM_END
#if ENABLE(OPCODE_TRACING)
#define TRACE_OPCODE(opcode) dataLogF(" op %s\n", #opcode)
#else
#define TRACE_OPCODE(opcode)
#endif
#define USE_LABEL(label) \
do { \
if (false) \
goto label; \
} while (false)
#define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode) USE_LABEL(opcode); TRACE_OPCODE(opcode);
#define OFFLINE_ASM_GLOBAL_LABEL(label) label: USE_LABEL(label);
#if ENABLE(LABEL_TRACING)
#define TRACE_LABEL(prefix, label) dataLog(#prefix, ": ", #label, "\n")
#else
#define TRACE_LABEL(prefix, label) do { } while (false);
#endif
#if ENABLE(COMPUTED_GOTO_OPCODES)
#define OFFLINE_ASM_GLUE_LABEL(label) label: TRACE_LABEL("OFFLINE_ASM_GLUE_LABEL", label); USE_LABEL(label);
#else
#define OFFLINE_ASM_GLUE_LABEL(label) case label: label: USE_LABEL(label);
#endif
#define OFFLINE_ASM_LOCAL_LABEL(label) label: TRACE_LABEL("OFFLINE_ASM_LOCAL_LABEL", #label); USE_LABEL(label);
namespace JSC {
class CLoopRegister {
public:
ALWAYS_INLINE intptr_t i() const { return m_value; };
ALWAYS_INLINE uintptr_t u() const { return m_value; }
ALWAYS_INLINE int32_t i32() const { return m_value; }
ALWAYS_INLINE uint32_t u32() const { return m_value; }
ALWAYS_INLINE int8_t i8() const { return m_value; }
ALWAYS_INLINE uint8_t u8() const { return m_value; }
ALWAYS_INLINE intptr_t* ip() const { return bitwise_cast<intptr_t*>(m_value); }
ALWAYS_INLINE int8_t* i8p() const { return bitwise_cast<int8_t*>(m_value); }
ALWAYS_INLINE void* vp() const { return bitwise_cast<void*>(m_value); }
ALWAYS_INLINE const void* cvp() const { return bitwise_cast<const void*>(m_value); }
ALWAYS_INLINE CallFrame* callFrame() const { return bitwise_cast<CallFrame*>(m_value); }
ALWAYS_INLINE const void* instruction() const { return bitwise_cast<const void*>(m_value); }
ALWAYS_INLINE VM* vm() const { return bitwise_cast<VM*>(m_value); }
ALWAYS_INLINE JSCell* cell() const { return bitwise_cast<JSCell*>(m_value); }
ALWAYS_INLINE ProtoCallFrame* protoCallFrame() const { return bitwise_cast<ProtoCallFrame*>(m_value); }
ALWAYS_INLINE NativeFunction nativeFunc() const { return bitwise_cast<NativeFunction>(m_value); }
#if USE(JSVALUE64)
ALWAYS_INLINE int64_t i64() const { return m_value; }
ALWAYS_INLINE uint64_t u64() const { return m_value; }
ALWAYS_INLINE EncodedJSValue encodedJSValue() const { return bitwise_cast<EncodedJSValue>(m_value); }
#endif
ALWAYS_INLINE Opcode opcode() const { return bitwise_cast<Opcode>(m_value); }
operator CallFrame*() { return bitwise_cast<CallFrame*>(m_value); }
operator const Instruction*() { return bitwise_cast<const Instruction*>(m_value); }
operator JSCell*() { return bitwise_cast<JSCell*>(m_value); }
operator ProtoCallFrame*() { return bitwise_cast<ProtoCallFrame*>(m_value); }
operator Register*() { return bitwise_cast<Register*>(m_value); }
operator VM*() { return bitwise_cast<VM*>(m_value); }
template<typename T, typename = std::enable_if_t<sizeof(T) == sizeof(uintptr_t)>>
ALWAYS_INLINE void operator=(T value) { m_value = bitwise_cast<uintptr_t>(value); }
#if USE(JSVALUE64)
ALWAYS_INLINE void operator=(int32_t value) { m_value = static_cast<intptr_t>(value); }
ALWAYS_INLINE void operator=(uint32_t value) { m_value = static_cast<uintptr_t>(value); }
#endif
ALWAYS_INLINE void operator=(int16_t value) { m_value = static_cast<intptr_t>(value); }
ALWAYS_INLINE void operator=(uint16_t value) { m_value = static_cast<uintptr_t>(value); }
ALWAYS_INLINE void operator=(int8_t value) { m_value = static_cast<intptr_t>(value); }
ALWAYS_INLINE void operator=(uint8_t value) { m_value = static_cast<uintptr_t>(value); }
ALWAYS_INLINE void operator=(bool value) { m_value = static_cast<uintptr_t>(value); }
#if USE(JSVALUE64)
ALWAYS_INLINE double bitsAsDouble() const { return bitwise_cast<double>(m_value); }
ALWAYS_INLINE int64_t bitsAsInt64() const { return bitwise_cast<int64_t>(m_value); }
#endif
private:
uintptr_t m_value { static_cast<uintptr_t>(0xbadbeef0baddbeef) };
};
class CLoopDoubleRegister {
public:
template<typename T>
explicit operator T() const { return bitwise_cast<T>(m_value); }
ALWAYS_INLINE double d() const { return m_value; }
ALWAYS_INLINE int64_t bitsAsInt64() const { return bitwise_cast<int64_t>(m_value); }
ALWAYS_INLINE void operator=(double value) { m_value = value; }
template<typename T, typename = std::enable_if_t<sizeof(T) == sizeof(uintptr_t) && std::is_integral<T>::value>>
ALWAYS_INLINE void operator=(T value) { m_value = bitwise_cast<double>(value); }
private:
double m_value;
};
namespace LLInt {
#if USE(JSVALUE32_64)
static double ints2Double(uint32_t lo, uint32_t hi)
{
uint64_t value = (static_cast<uint64_t>(hi) << 32) | lo;
return bitwise_cast<double>(value);
}
static void double2Ints(double val, CLoopRegister& lo, CLoopRegister& hi)
{
uint64_t value = bitwise_cast<uint64_t>(val);
hi = static_cast<uint32_t>(value >> 32);
lo = static_cast<uint32_t>(value);
}
#endif // USE(JSVALUE32_64)
static void decodeResult(SlowPathReturnType result, CLoopRegister& t0, CLoopRegister& t1)
{
const void* t0Result;
const void* t1Result;
JSC::decodeResult(result, t0Result, t1Result);
t0 = t0Result;
t1 = t1Result;
}
}
JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame, bool isInitializationPass)
{
#define CAST bitwise_cast
if (UNLIKELY(isInitializationPass)) {
Opcode* opcodeMap = LLInt::opcodeMap();
Opcode* opcodeMapWide16 = LLInt::opcodeMapWide16();
Opcode* opcodeMapWide32 = LLInt::opcodeMapWide32();
#if ENABLE(COMPUTED_GOTO_OPCODES)
#define OPCODE_ENTRY(__opcode, length) \
opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode); \
opcodeMapWide16[__opcode] = bitwise_cast<void*>(&&__opcode##_wide16); \
opcodeMapWide32[__opcode] = bitwise_cast<void*>(&&__opcode##_wide32);
#define LLINT_OPCODE_ENTRY(__opcode, length) \
opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
#else
#define OPCODE_ENTRY(__opcode, length) \
opcodeMap[__opcode] = __opcode; \
opcodeMapWide16[__opcode] = static_cast<OpcodeID>(__opcode##_wide16); \
opcodeMapWide32[__opcode] = static_cast<OpcodeID>(__opcode##_wide32);
#define LLINT_OPCODE_ENTRY(__opcode, length) \
opcodeMap[__opcode] = __opcode;
#endif
FOR_EACH_BYTECODE_ID(OPCODE_ENTRY)
FOR_EACH_CLOOP_BYTECODE_HELPER_ID(LLINT_OPCODE_ENTRY)
FOR_EACH_LLINT_NATIVE_HELPER(LLINT_OPCODE_ENTRY)
#undef OPCODE_ENTRY
#undef LLINT_OPCODE_ENTRY
uint8_t* exceptionInstructions = reinterpret_cast<uint8_t*>(LLInt::exceptionInstructions());
for (unsigned i = 0; i < maxOpcodeLength + 1; ++i)
exceptionInstructions[i] = llint_throw_from_slow_path_trampoline;
return JSValue();
}
static_assert(sizeof(CLoopRegister) == sizeof(intptr_t));
CLoopRegister t0, t1, t2, t3, t5, sp, cfr, lr, pc;
#if USE(JSVALUE64)
CLoopRegister numberTag, notCellMask;
#endif
CLoopRegister pcBase;
CLoopRegister metadataTable;
CLoopDoubleRegister d0, d1;
struct StackPointerScope {
StackPointerScope(CLoopStack& stack)
: m_stack(stack)
, m_originalStackPointer(stack.currentStackPointer())
{ }
~StackPointerScope()
{
m_stack.setCurrentStackPointer(m_originalStackPointer);
}
private:
CLoopStack& m_stack;
void* m_originalStackPointer;
};
CLoopStack& cloopStack = vm->interpreter->cloopStack();
StackPointerScope stackPointerScope(cloopStack);
lr = getOpcode(llint_return_to_host);
sp = cloopStack.currentStackPointer();
cfr = vm->topCallFrame;
#ifndef NDEBUG
void* startSP = sp.vp();
CallFrame* startCFR = cfr.callFrame();
#endif
t0 = executableAddress;
t1 = vm;
t2 = protoCallFrame;
#if USE(JSVALUE64)
numberTag = JSValue::NumberTag;
notCellMask = JSValue::NotCellMask;
#endif // USE(JSVALUE64)
NativeFunction nativeFunc = nullptr;
JSValue functionReturnValue;
Opcode opcode = getOpcode(entryOpcodeID);
#define PUSH(cloopReg) \
do { \
sp = sp.ip() - 1; \
*sp.ip() = cloopReg.i(); \
} while (false)
#define POP(cloopReg) \
do { \
cloopReg = *sp.ip(); \
sp = sp.ip() + 1; \
} while (false)
#if ENABLE(OPCODE_STATS)
#define RECORD_OPCODE_STATS(__opcode) OpcodeStats::recordInstruction(__opcode)
#else
#define RECORD_OPCODE_STATS(__opcode)
#endif
#if ENABLE(COMPUTED_GOTO_OPCODES)
#define DISPATCH_OPCODE() goto *opcode
#define DEFINE_OPCODE(__opcode) \
__opcode: \
RECORD_OPCODE_STATS(__opcode);
DISPATCH_OPCODE();
#else // !ENABLE(COMPUTED_GOTO_OPCODES)
#define DISPATCH_OPCODE() goto dispatchOpcode
#define DEFINE_OPCODE(__opcode) \
case __opcode: \
__opcode: \
RECORD_OPCODE_STATS(__opcode);
dispatchOpcode:
switch (static_cast<unsigned>(opcode))
#endif // !ENABLE(COMPUTED_GOTO_OPCODES)
{
IGNORE_CLANG_WARNINGS_BEGIN("unreachable-code")
#include "LLIntAssembly.h"
IGNORE_CLANG_WARNINGS_END
OFFLINE_ASM_GLUE_LABEL(llint_return_to_host)
{
ASSERT(startSP == sp.vp());
ASSERT(startCFR == cfr.callFrame());
#if USE(JSVALUE32_64)
return JSValue(t1.i(), t0.i()); #else
return JSValue::decode(t0.encodedJSValue());
#endif
}
#if !ENABLE(COMPUTED_GOTO_OPCODES)
default:
ASSERT(false);
#endif
}
#if ENABLE(COMPUTED_GOTO_OPCODES)
#define LLINT_OPCODE_ENTRY(__opcode, length) \
UNUSED_LABEL(__opcode);
FOR_EACH_OPCODE_ID(LLINT_OPCODE_ENTRY);
#undef LLINT_OPCODE_ENTRY
#endif
#undef DEFINE_OPCODE
#undef CHECK_FOR_TIMEOUT
#undef CAST
return JSValue(); }
}
#elif !COMPILER(MSVC)
#define OFFLINE_ASM_BEGIN asm (
#define OFFLINE_ASM_END );
#if ENABLE(LLINT_EMBEDDED_OPCODE_ID)
#define EMBED_OPCODE_ID_IF_NEEDED(__opcode) ".int " __opcode##_value_string "\n"
#else
#define EMBED_OPCODE_ID_IF_NEEDED(__opcode)
#endif
#define OFFLINE_ASM_OPCODE_LABEL(__opcode) \
EMBED_OPCODE_ID_IF_NEEDED(__opcode) \
OFFLINE_ASM_OPCODE_DEBUG_LABEL(llint_##__opcode) \
OFFLINE_ASM_LOCAL_LABEL(llint_##__opcode)
#define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(__opcode)
#if CPU(ARM_THUMB2)
#define OFFLINE_ASM_GLOBAL_LABEL(label) \
".text\n" \
".align 4\n" \
".globl " SYMBOL_STRING(label) "\n" \
HIDE_SYMBOL(label) "\n" \
".thumb\n" \
".thumb_func " THUMB_FUNC_PARAM(label) "\n" \
SYMBOL_STRING(label) ":\n"
#elif CPU(ARM64)
#define OFFLINE_ASM_GLOBAL_LABEL(label) \
".text\n" \
".align 4\n" \
".globl " SYMBOL_STRING(label) "\n" \
HIDE_SYMBOL(label) "\n" \
SYMBOL_STRING(label) ":\n"
#else
#define OFFLINE_ASM_GLOBAL_LABEL(label) \
".text\n" \
".globl " SYMBOL_STRING(label) "\n" \
HIDE_SYMBOL(label) "\n" \
SYMBOL_STRING(label) ":\n"
#endif
#define OFFLINE_ASM_LOCAL_LABEL(label) LOCAL_LABEL_STRING(label) ":\n"
#if OS(LINUX)
#define OFFLINE_ASM_OPCODE_DEBUG_LABEL(label) #label ":\n"
#else
#define OFFLINE_ASM_OPCODE_DEBUG_LABEL(label)
#endif
DEBUGGER_ANNOTATION_MARKER(before_llint_asm)
#include "LLIntAssembly.h"
DEBUGGER_ANNOTATION_MARKER(after_llint_asm)
#endif // ENABLE(C_LOOP)