LowLevelInterpreter.cpp [plain text]
#include "config.h"
#include "LowLevelInterpreter.h"
#include "LLIntOfflineAsmConfig.h"
#include <wtf/InlineASM.h>
#if !ENABLE(JIT)
#include "CodeBlock.h"
#include "CommonSlowPaths.h"
#include "LLIntCLoop.h"
#include "LLIntSlowPaths.h"
#include "JSCInlines.h"
#include "VMInspector.h"
#include <wtf/Assertions.h>
#include <wtf/MathExtras.h>
using namespace JSC::LLInt;
#define OFFLINE_ASM_BEGIN
#define OFFLINE_ASM_END
#if ENABLE(OPCODE_TRACING)
#define TRACE_OPCODE(opcode) dataLogF(" op %s\n", #opcode)
#else
#define TRACE_OPCODE(opcode)
#endif
#define USE_LABEL(label) \
do { \
if (false) \
goto label; \
} while (false)
#define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode) USE_LABEL(opcode); TRACE_OPCODE(opcode);
#define OFFLINE_ASM_GLOBAL_LABEL(label) OFFLINE_ASM_GLUE_LABEL(label)
#if ENABLE(COMPUTED_GOTO_OPCODES)
#define OFFLINE_ASM_GLUE_LABEL(label) label: USE_LABEL(label);
#else
#define OFFLINE_ASM_GLUE_LABEL(label) case label: label: USE_LABEL(label);
#endif
#define OFFLINE_ASM_LOCAL_LABEL(label) label: USE_LABEL(label);
namespace JSC {
namespace LLInt {
#if USE(JSVALUE32_64)
static double Ints2Double(uint32_t lo, uint32_t hi)
{
union {
double dval;
uint64_t ival64;
} u;
u.ival64 = (static_cast<uint64_t>(hi) << 32) | lo;
return u.dval;
}
static void Double2Ints(double val, uint32_t& lo, uint32_t& hi)
{
union {
double dval;
uint64_t ival64;
} u;
u.dval = val;
hi = static_cast<uint32_t>(u.ival64 >> 32);
lo = static_cast<uint32_t>(u.ival64);
}
#endif // USE(JSVALUE32_64)
}
struct CLoopRegister {
CLoopRegister() { i = static_cast<intptr_t>(0xbadbeef0baddbeef); }
union {
intptr_t i;
uintptr_t u;
#if USE(JSVALUE64)
#if CPU(BIG_ENDIAN)
struct {
int32_t i32padding;
int32_t i32;
};
struct {
uint32_t u32padding;
uint32_t u32;
};
struct {
int8_t i8padding[7];
int8_t i8;
};
struct {
uint8_t u8padding[7];
uint8_t u8;
};
#else // !CPU(BIG_ENDIAN)
struct {
int32_t i32;
int32_t i32padding;
};
struct {
uint32_t u32;
uint32_t u32padding;
};
struct {
int8_t i8;
int8_t i8padding[7];
};
struct {
uint8_t u8;
uint8_t u8padding[7];
};
#endif // !CPU(BIG_ENDIAN)
#else // !USE(JSVALUE64)
int32_t i32;
uint32_t u32;
#if CPU(BIG_ENDIAN)
struct {
int8_t i8padding[3];
int8_t i8;
};
struct {
uint8_t u8padding[3];
uint8_t u8;
};
#else // !CPU(BIG_ENDIAN)
struct {
int8_t i8;
int8_t i8padding[3];
};
struct {
uint8_t u8;
uint8_t u8padding[3];
};
#endif // !CPU(BIG_ENDIAN)
#endif // !USE(JSVALUE64)
intptr_t* ip;
int8_t* i8p;
void* vp;
CallFrame* callFrame;
ExecState* execState;
void* instruction;
VM* vm;
JSCell* cell;
ProtoCallFrame* protoCallFrame;
NativeFunction nativeFunc;
#if USE(JSVALUE64)
int64_t i64;
uint64_t u64;
EncodedJSValue encodedJSValue;
double castToDouble;
#endif
Opcode opcode;
};
operator ExecState*() { return execState; }
operator Instruction*() { return reinterpret_cast<Instruction*>(instruction); }
operator VM*() { return vm; }
operator ProtoCallFrame*() { return protoCallFrame; }
operator Register*() { return reinterpret_cast<Register*>(vp); }
operator JSCell*() { return cell; }
#if USE(JSVALUE64)
inline void clearHighWord() { i32padding = 0; }
#else
inline void clearHighWord() { }
#endif
};
JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame, bool isInitializationPass)
{
#define CAST reinterpret_cast
#define SIGN_BIT32(x) ((x) & 0x80000000)
if (UNLIKELY(isInitializationPass)) {
#if ENABLE(COMPUTED_GOTO_OPCODES)
Opcode* opcodeMap = LLInt::opcodeMap();
#define OPCODE_ENTRY(__opcode, length) \
opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
FOR_EACH_OPCODE_ID(OPCODE_ENTRY)
#undef OPCODE_ENTRY
#define LLINT_OPCODE_ENTRY(__opcode, length) \
opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode);
FOR_EACH_LLINT_NATIVE_HELPER(LLINT_OPCODE_ENTRY)
#undef LLINT_OPCODE_ENTRY
#endif
Instruction* exceptionInstructions = LLInt::exceptionInstructions();
for (int i = 0; i < maxOpcodeLength + 1; ++i)
exceptionInstructions[i].u.pointer =
LLInt::getCodePtr(llint_throw_from_slow_path_trampoline);
return JSValue();
}
ASSERT(sizeof(CLoopRegister) == sizeof(intptr_t));
union CLoopDoubleRegister {
double d;
#if USE(JSVALUE64)
int64_t castToInt64;
#endif
};
CLoopRegister t0, t1, t2, t3, t5, t7, sp, cfr, lr, pc;
#if USE(JSVALUE64)
CLoopRegister pcBase, tagTypeNumber, tagMask;
#endif
CLoopDoubleRegister d0, d1;
lr.opcode = getOpcode(llint_return_to_host);
sp.vp = vm->interpreter->stack().topOfStack() + 1;
cfr.callFrame = vm->topCallFrame;
#ifndef NDEBUG
void* startSP = sp.vp;
CallFrame* startCFR = cfr.callFrame;
#endif
t0.vp = executableAddress;
t1.vm = vm;
t2.protoCallFrame = protoCallFrame;
#if USE(JSVALUE64)
tagTypeNumber.i = 0xFFFF000000000000;
tagMask.i = 0xFFFF000000000002;
#endif // USE(JSVALUE64)
NativeFunction nativeFunc = 0;
JSValue functionReturnValue;
Opcode opcode = getOpcode(entryOpcodeID);
#define PUSH(cloopReg) \
do { \
sp.ip--; \
*sp.ip = cloopReg.i; \
} while (false)
#define POP(cloopReg) \
do { \
cloopReg.i = *sp.ip; \
sp.ip++; \
} while (false)
#if ENABLE(OPCODE_STATS)
#define RECORD_OPCODE_STATS(__opcode) OpcodeStats::recordInstruction(__opcode)
#else
#define RECORD_OPCODE_STATS(__opcode)
#endif
#if USE(JSVALUE32_64)
#define FETCH_OPCODE() pc.opcode
#else // USE(JSVALUE64)
#define FETCH_OPCODE() *bitwise_cast<Opcode*>(pcBase.i8p + pc.i * 8)
#endif // USE(JSVALUE64)
#define NEXT_INSTRUCTION() \
do { \
opcode = FETCH_OPCODE(); \
DISPATCH_OPCODE(); \
} while (false)
#if ENABLE(COMPUTED_GOTO_OPCODES)
#define DISPATCH_OPCODE() goto *opcode
#define DEFINE_OPCODE(__opcode) \
__opcode: \
RECORD_OPCODE_STATS(__opcode);
DISPATCH_OPCODE();
#else // !ENABLE(COMPUTED_GOTO_OPCODES)
#define DISPATCH_OPCODE() goto dispatchOpcode
#define DEFINE_OPCODE(__opcode) \
case __opcode: \
__opcode: \
RECORD_OPCODE_STATS(__opcode);
dispatchOpcode:
switch (opcode)
#endif // !ENABLE(COMPUTED_GOTO_OPCODES)
{
#include "LLIntAssembly.h"
OFFLINE_ASM_GLUE_LABEL(llint_return_to_host)
{
ASSERT(startSP == sp.vp);
ASSERT(startCFR == cfr.callFrame);
#if USE(JSVALUE32_64)
return JSValue(t1.i, t0.i); #else
return JSValue::decode(t0.encodedJSValue);
#endif
}
OFFLINE_ASM_GLUE_LABEL(getHostCallReturnValue)
{
JSValue result = vm->hostCallReturnValue;
#if USE(JSVALUE32_64)
t1.i = result.tag();
t0.i = result.payload();
#else
t0.encodedJSValue = JSValue::encode(result);
#endif
opcode = lr.opcode;
DISPATCH_OPCODE();
}
#if !ENABLE(COMPUTED_GOTO_OPCODES)
default:
ASSERT(false);
#endif
}
#if ENABLE(COMPUTED_GOTO_OPCODES)
#define LLINT_OPCODE_ENTRY(__opcode, length) \
UNUSED_LABEL(__opcode);
FOR_EACH_OPCODE_ID(LLINT_OPCODE_ENTRY);
#undef LLINT_OPCODE_ENTRY
#endif
#undef NEXT_INSTRUCTION
#undef DEFINE_OPCODE
#undef CHECK_FOR_TIMEOUT
#undef CAST
#undef SIGN_BIT32
return JSValue(); }
}
#elif !OS(WINDOWS)
#define OFFLINE_ASM_BEGIN asm (
#define OFFLINE_ASM_END );
#define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(llint_##__opcode)
#define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(__opcode)
#if CPU(ARM_THUMB2)
#define OFFLINE_ASM_GLOBAL_LABEL(label) \
".text\n" \
".align 4\n" \
".globl " SYMBOL_STRING(label) "\n" \
HIDE_SYMBOL(label) "\n" \
".thumb\n" \
".thumb_func " THUMB_FUNC_PARAM(label) "\n" \
SYMBOL_STRING(label) ":\n"
#elif CPU(ARM64)
#define OFFLINE_ASM_GLOBAL_LABEL(label) \
".text\n" \
".align 4\n" \
".globl " SYMBOL_STRING(label) "\n" \
HIDE_SYMBOL(label) "\n" \
SYMBOL_STRING(label) ":\n"
#else
#define OFFLINE_ASM_GLOBAL_LABEL(label) \
".text\n" \
".globl " SYMBOL_STRING(label) "\n" \
HIDE_SYMBOL(label) "\n" \
SYMBOL_STRING(label) ":\n"
#endif
#define OFFLINE_ASM_LOCAL_LABEL(label) LOCAL_LABEL_STRING(label) ":\n"
#include "LLIntAssembly.h"
#endif // ENABLE(JIT)