#ifndef AirArg_h
#define AirArg_h
#if ENABLE(B3_JIT)
#include "AirTmp.h"
#include "B3Common.h"
#include "B3Type.h"
#include <wtf/Optional.h>
#if COMPILER(GCC) && ASSERT_DISABLED
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wreturn-type"
#endif // COMPILER(GCC) && ASSERT_DISABLED
namespace JSC { namespace B3 {
class Value;
namespace Air {
class Special;
class StackSlot;
class Arg {
public:
enum Kind : int8_t {
Invalid,
Tmp,
Imm,
BigImm,
BitImm,
BitImm64,
Addr,
Stack,
CallArg,
Index,
RelCond,
ResCond,
DoubleCond,
Special,
WidthArg
};
enum Role : int8_t {
Use,
ColdUse,
LateUse,
LateColdUse,
Def,
ZDef,
UseDef,
UseZDef,
EarlyDef,
Scratch,
UseAddr
};
enum Type : int8_t {
GP,
FP
};
static const unsigned numTypes = 2;
template<typename Functor>
static void forEachType(const Functor& functor)
{
functor(GP);
functor(FP);
}
enum Width : int8_t {
Width8,
Width16,
Width32,
Width64
};
static Width pointerWidth()
{
if (sizeof(void*) == 8)
return Width64;
return Width32;
}
enum Signedness : int8_t {
Signed,
Unsigned
};
static bool isAnyUse(Role role)
{
switch (role) {
case Use:
case ColdUse:
case UseDef:
case UseZDef:
case LateUse:
case LateColdUse:
case Scratch:
return true;
case Def:
case ZDef:
case UseAddr:
case EarlyDef:
return false;
}
ASSERT_NOT_REACHED();
}
static bool isColdUse(Role role)
{
switch (role) {
case ColdUse:
case LateColdUse:
return true;
case Use:
case UseDef:
case UseZDef:
case LateUse:
case Def:
case ZDef:
case UseAddr:
case Scratch:
case EarlyDef:
return false;
}
ASSERT_NOT_REACHED();
}
static bool isWarmUse(Role role)
{
return isAnyUse(role) && !isColdUse(role);
}
static Role cooled(Role role)
{
switch (role) {
case ColdUse:
case LateColdUse:
case UseDef:
case UseZDef:
case Def:
case ZDef:
case UseAddr:
case Scratch:
case EarlyDef:
return role;
case Use:
return ColdUse;
case LateUse:
return LateColdUse;
}
ASSERT_NOT_REACHED();
}
static bool isEarlyUse(Role role)
{
switch (role) {
case Use:
case ColdUse:
case UseDef:
case UseZDef:
return true;
case Def:
case ZDef:
case UseAddr:
case LateUse:
case LateColdUse:
case Scratch:
case EarlyDef:
return false;
}
ASSERT_NOT_REACHED();
}
static bool isLateUse(Role role)
{
switch (role) {
case LateUse:
case LateColdUse:
case Scratch:
return true;
case ColdUse:
case Use:
case UseDef:
case UseZDef:
case Def:
case ZDef:
case UseAddr:
case EarlyDef:
return false;
}
ASSERT_NOT_REACHED();
}
static bool isAnyDef(Role role)
{
switch (role) {
case Use:
case ColdUse:
case UseAddr:
case LateUse:
case LateColdUse:
return false;
case Def:
case UseDef:
case ZDef:
case UseZDef:
case EarlyDef:
case Scratch:
return true;
}
ASSERT_NOT_REACHED();
}
static bool isEarlyDef(Role role)
{
switch (role) {
case Use:
case ColdUse:
case UseAddr:
case LateUse:
case Def:
case UseDef:
case ZDef:
case UseZDef:
case LateColdUse:
return false;
case EarlyDef:
case Scratch:
return true;
}
ASSERT_NOT_REACHED();
}
static bool isLateDef(Role role)
{
switch (role) {
case Use:
case ColdUse:
case UseAddr:
case LateUse:
case EarlyDef:
case Scratch:
case LateColdUse:
return false;
case Def:
case UseDef:
case ZDef:
case UseZDef:
return true;
}
ASSERT_NOT_REACHED();
}
static bool isZDef(Role role)
{
switch (role) {
case Use:
case ColdUse:
case UseAddr:
case LateUse:
case Def:
case UseDef:
case EarlyDef:
case Scratch:
case LateColdUse:
return false;
case ZDef:
case UseZDef:
return true;
}
ASSERT_NOT_REACHED();
}
static Type typeForB3Type(B3::Type type)
{
switch (type) {
case Void:
ASSERT_NOT_REACHED();
return GP;
case Int32:
case Int64:
return GP;
case Float:
case Double:
return FP;
}
ASSERT_NOT_REACHED();
return GP;
}
static Width widthForB3Type(B3::Type type)
{
switch (type) {
case Void:
ASSERT_NOT_REACHED();
return Width8;
case Int32:
case Float:
return Width32;
case Int64:
case Double:
return Width64;
}
ASSERT_NOT_REACHED();
}
static Width conservativeWidth(Type type)
{
return type == GP ? pointerWidth() : Width64;
}
static Width minimumWidth(Type type)
{
return type == GP ? Width8 : Width32;
}
static unsigned bytes(Width width)
{
return 1 << width;
}
static Width widthForBytes(unsigned bytes)
{
switch (bytes) {
case 0:
case 1:
return Width8;
case 2:
return Width16;
case 3:
case 4:
return Width32;
default:
return Width64;
}
}
Arg()
: m_kind(Invalid)
{
}
Arg(Air::Tmp tmp)
: m_kind(Tmp)
, m_base(tmp)
{
}
Arg(Reg reg)
: Arg(Air::Tmp(reg))
{
}
static Arg imm(int64_t value)
{
Arg result;
result.m_kind = Imm;
result.m_offset = value;
return result;
}
static Arg bigImm(int64_t value)
{
Arg result;
result.m_kind = BigImm;
result.m_offset = value;
return result;
}
static Arg bitImm(int64_t value)
{
Arg result;
result.m_kind = BitImm;
result.m_offset = value;
return result;
}
static Arg bitImm64(int64_t value)
{
Arg result;
result.m_kind = BitImm64;
result.m_offset = value;
return result;
}
static Arg immPtr(const void* address)
{
return bigImm(bitwise_cast<intptr_t>(address));
}
static Arg addr(Air::Tmp base, int32_t offset = 0)
{
ASSERT(base.isGP());
Arg result;
result.m_kind = Addr;
result.m_base = base;
result.m_offset = offset;
return result;
}
static Arg stack(StackSlot* value, int32_t offset = 0)
{
Arg result;
result.m_kind = Stack;
result.m_offset = bitwise_cast<intptr_t>(value);
result.m_scale = offset; return result;
}
static Arg callArg(int32_t offset)
{
Arg result;
result.m_kind = CallArg;
result.m_offset = offset;
return result;
}
static Arg stackAddr(int32_t offsetFromFP, unsigned frameSize, Width width)
{
Arg result = Arg::addr(Air::Tmp(GPRInfo::callFrameRegister), offsetFromFP);
if (!result.isValidForm(width)) {
result = Arg::addr(
Air::Tmp(MacroAssembler::stackPointerRegister),
offsetFromFP + frameSize);
}
return result;
}
static bool isValidScale(unsigned scale, Optional<Width> width = Nullopt)
{
switch (scale) {
case 1:
if (isX86() || isARM64())
return true;
return false;
case 2:
case 4:
case 8:
if (isX86())
return true;
if (isARM64()) {
if (!width)
return true;
return scale == 1 || scale == bytes(*width);
}
return false;
default:
return false;
}
}
static unsigned logScale(unsigned scale)
{
switch (scale) {
case 1:
return 0;
case 2:
return 1;
case 4:
return 2;
case 8:
return 3;
default:
ASSERT_NOT_REACHED();
return 0;
}
}
static Arg index(Air::Tmp base, Air::Tmp index, unsigned scale = 1, int32_t offset = 0)
{
ASSERT(base.isGP());
ASSERT(index.isGP());
ASSERT(isValidScale(scale));
Arg result;
result.m_kind = Index;
result.m_base = base;
result.m_index = index;
result.m_scale = static_cast<int32_t>(scale);
result.m_offset = offset;
return result;
}
static Arg relCond(MacroAssembler::RelationalCondition condition)
{
Arg result;
result.m_kind = RelCond;
result.m_offset = condition;
return result;
}
static Arg resCond(MacroAssembler::ResultCondition condition)
{
Arg result;
result.m_kind = ResCond;
result.m_offset = condition;
return result;
}
static Arg doubleCond(MacroAssembler::DoubleCondition condition)
{
Arg result;
result.m_kind = DoubleCond;
result.m_offset = condition;
return result;
}
static Arg special(Air::Special* special)
{
Arg result;
result.m_kind = Special;
result.m_offset = bitwise_cast<intptr_t>(special);
return result;
}
static Arg widthArg(Width width)
{
Arg result;
result.m_kind = WidthArg;
result.m_offset = width;
return result;
}
bool operator==(const Arg& other) const
{
return m_offset == other.m_offset
&& m_kind == other.m_kind
&& m_base == other.m_base
&& m_index == other.m_index
&& m_scale == other.m_scale;
}
bool operator!=(const Arg& other) const
{
return !(*this == other);
}
explicit operator bool() const { return *this != Arg(); }
Kind kind() const
{
return m_kind;
}
bool isTmp() const
{
return kind() == Tmp;
}
bool isImm() const
{
return kind() == Imm;
}
bool isBigImm() const
{
return kind() == BigImm;
}
bool isBitImm() const
{
return kind() == BitImm;
}
bool isBitImm64() const
{
return kind() == BitImm64;
}
bool isSomeImm() const
{
switch (kind()) {
case Imm:
case BigImm:
case BitImm:
case BitImm64:
return true;
default:
return false;
}
}
bool isAddr() const
{
return kind() == Addr;
}
bool isStack() const
{
return kind() == Stack;
}
bool isCallArg() const
{
return kind() == CallArg;
}
bool isIndex() const
{
return kind() == Index;
}
bool isMemory() const
{
switch (kind()) {
case Addr:
case Stack:
case CallArg:
case Index:
return true;
default:
return false;
}
}
bool isStackMemory() const;
bool isRelCond() const
{
return kind() == RelCond;
}
bool isResCond() const
{
return kind() == ResCond;
}
bool isDoubleCond() const
{
return kind() == DoubleCond;
}
bool isCondition() const
{
switch (kind()) {
case RelCond:
case ResCond:
case DoubleCond:
return true;
default:
return false;
}
}
bool isSpecial() const
{
return kind() == Special;
}
bool isWidthArg() const
{
return kind() == WidthArg;
}
bool isAlive() const
{
return isTmp() || isStack();
}
Air::Tmp tmp() const
{
ASSERT(kind() == Tmp);
return m_base;
}
int64_t value() const
{
ASSERT(isSomeImm());
return m_offset;
}
template<typename T>
bool isRepresentableAs() const
{
return B3::isRepresentableAs<T>(value());
}
bool isRepresentableAs(Width, Signedness) const;
template<typename T>
T asNumber() const
{
return static_cast<T>(value());
}
void* pointerValue() const
{
ASSERT(kind() == BigImm);
return bitwise_cast<void*>(static_cast<intptr_t>(m_offset));
}
Air::Tmp base() const
{
ASSERT(kind() == Addr || kind() == Index);
return m_base;
}
bool hasOffset() const { return isMemory(); }
int32_t offset() const
{
if (kind() == Stack)
return static_cast<int32_t>(m_scale);
ASSERT(kind() == Addr || kind() == CallArg || kind() == Index);
return static_cast<int32_t>(m_offset);
}
StackSlot* stackSlot() const
{
ASSERT(kind() == Stack);
return bitwise_cast<StackSlot*>(m_offset);
}
Air::Tmp index() const
{
ASSERT(kind() == Index);
return m_index;
}
unsigned scale() const
{
ASSERT(kind() == Index);
return m_scale;
}
unsigned logScale() const
{
return logScale(scale());
}
Air::Special* special() const
{
ASSERT(kind() == Special);
return bitwise_cast<Air::Special*>(m_offset);
}
Width width() const
{
ASSERT(kind() == WidthArg);
return static_cast<Width>(m_offset);
}
bool isGPTmp() const
{
return isTmp() && tmp().isGP();
}
bool isFPTmp() const
{
return isTmp() && tmp().isFP();
}
bool isGP() const
{
switch (kind()) {
case Imm:
case BigImm:
case BitImm:
case BitImm64:
case Addr:
case Index:
case Stack:
case CallArg:
case RelCond:
case ResCond:
case DoubleCond:
case Special:
case WidthArg:
return true;
case Tmp:
return isGPTmp();
case Invalid:
return false;
}
ASSERT_NOT_REACHED();
}
bool isFP() const
{
switch (kind()) {
case Imm:
case BitImm:
case BitImm64:
case RelCond:
case ResCond:
case DoubleCond:
case Special:
case WidthArg:
case Invalid:
return false;
case Addr:
case Index:
case Stack:
case CallArg:
case BigImm: return true;
case Tmp:
return isFPTmp();
}
ASSERT_NOT_REACHED();
}
bool hasType() const
{
switch (kind()) {
case Imm:
case BitImm:
case BitImm64:
case Special:
case Tmp:
return true;
default:
return false;
}
}
Type type() const
{
return isGP() ? GP : FP;
}
bool isType(Type type) const
{
switch (type) {
case GP:
return isGP();
case FP:
return isFP();
}
ASSERT_NOT_REACHED();
}
bool canRepresent(Value* value) const;
bool isCompatibleType(const Arg& other) const;
bool isGPR() const
{
return isTmp() && tmp().isGPR();
}
GPRReg gpr() const
{
return tmp().gpr();
}
bool isFPR() const
{
return isTmp() && tmp().isFPR();
}
FPRReg fpr() const
{
return tmp().fpr();
}
bool isReg() const
{
return isTmp() && tmp().isReg();
}
Reg reg() const
{
return tmp().reg();
}
unsigned gpTmpIndex() const
{
return tmp().gpTmpIndex();
}
unsigned fpTmpIndex() const
{
return tmp().fpTmpIndex();
}
unsigned tmpIndex() const
{
return tmp().tmpIndex();
}
static bool isValidImmForm(int64_t value)
{
if (isX86())
return B3::isRepresentableAs<int32_t>(value);
if (isARM64())
return isUInt12(value);
return false;
}
static bool isValidBitImmForm(int64_t value)
{
if (isX86())
return B3::isRepresentableAs<int32_t>(value);
if (isARM64())
return ARM64LogicalImmediate::create32(value).isValid();
return false;
}
static bool isValidBitImm64Form(int64_t value)
{
if (isX86())
return B3::isRepresentableAs<int32_t>(value);
if (isARM64())
return ARM64LogicalImmediate::create64(value).isValid();
return false;
}
static bool isValidAddrForm(int32_t offset, Optional<Width> width = Nullopt)
{
if (isX86())
return true;
if (isARM64()) {
if (!width)
return true;
if (isValidSignedImm9(offset))
return true;
switch (*width) {
case Width8:
return isValidScaledUImm12<8>(offset);
case Width16:
return isValidScaledUImm12<16>(offset);
case Width32:
return isValidScaledUImm12<32>(offset);
case Width64:
return isValidScaledUImm12<64>(offset);
}
}
return false;
}
static bool isValidIndexForm(unsigned scale, int32_t offset, Optional<Width> width = Nullopt)
{
if (!isValidScale(scale, width))
return false;
if (isX86())
return true;
if (isARM64())
return !offset;
return false;
}
bool isValidForm(Optional<Width> width = Nullopt) const
{
switch (kind()) {
case Invalid:
return false;
case Tmp:
return true;
case Imm:
return isValidImmForm(value());
case BigImm:
return true;
case BitImm:
return isValidBitImmForm(value());
case BitImm64:
return isValidBitImm64Form(value());
case Addr:
case Stack:
case CallArg:
return isValidAddrForm(offset(), width);
case Index:
return isValidIndexForm(scale(), offset(), width);
case RelCond:
case ResCond:
case DoubleCond:
case Special:
case WidthArg:
return true;
}
ASSERT_NOT_REACHED();
}
template<typename Functor>
void forEachTmpFast(const Functor& functor)
{
switch (m_kind) {
case Tmp:
case Addr:
functor(m_base);
break;
case Index:
functor(m_base);
functor(m_index);
break;
default:
break;
}
}
bool usesTmp(Air::Tmp tmp) const;
template<typename Thing>
bool is() const;
template<typename Thing>
Thing as() const;
template<typename Thing, typename Functor>
void forEachFast(const Functor&);
template<typename Thing, typename Functor>
void forEach(Role, Type, Width, const Functor&);
template<typename Functor>
void forEachTmp(Role argRole, Type argType, Width argWidth, const Functor& functor)
{
switch (m_kind) {
case Tmp:
ASSERT(isAnyUse(argRole) || isAnyDef(argRole));
functor(m_base, argRole, argType, argWidth);
break;
case Addr:
functor(m_base, Use, GP, argRole == UseAddr ? argWidth : pointerWidth());
break;
case Index:
functor(m_base, Use, GP, argRole == UseAddr ? argWidth : pointerWidth());
functor(m_index, Use, GP, argRole == UseAddr ? argWidth : pointerWidth());
break;
default:
break;
}
}
MacroAssembler::TrustedImm32 asTrustedImm32() const
{
ASSERT(isImm() || isBitImm());
return MacroAssembler::TrustedImm32(static_cast<int32_t>(m_offset));
}
#if USE(JSVALUE64)
MacroAssembler::TrustedImm64 asTrustedImm64() const
{
ASSERT(isBigImm() || isBitImm64());
return MacroAssembler::TrustedImm64(value());
}
#endif
MacroAssembler::TrustedImmPtr asTrustedImmPtr() const
{
if (is64Bit())
ASSERT(isBigImm());
else
ASSERT(isImm());
return MacroAssembler::TrustedImmPtr(pointerValue());
}
MacroAssembler::Address asAddress() const
{
ASSERT(isAddr());
return MacroAssembler::Address(m_base.gpr(), static_cast<int32_t>(m_offset));
}
MacroAssembler::BaseIndex asBaseIndex() const
{
ASSERT(isIndex());
return MacroAssembler::BaseIndex(
m_base.gpr(), m_index.gpr(), static_cast<MacroAssembler::Scale>(logScale()),
static_cast<int32_t>(m_offset));
}
MacroAssembler::RelationalCondition asRelationalCondition() const
{
ASSERT(isRelCond());
return static_cast<MacroAssembler::RelationalCondition>(m_offset);
}
MacroAssembler::ResultCondition asResultCondition() const
{
ASSERT(isResCond());
return static_cast<MacroAssembler::ResultCondition>(m_offset);
}
MacroAssembler::DoubleCondition asDoubleCondition() const
{
ASSERT(isDoubleCond());
return static_cast<MacroAssembler::DoubleCondition>(m_offset);
}
bool isInvertible() const
{
switch (kind()) {
case RelCond:
case DoubleCond:
return true;
case ResCond:
return MacroAssembler::isInvertible(asResultCondition());
default:
return false;
}
}
Arg inverted(bool inverted = true) const
{
if (!inverted)
return *this;
switch (kind()) {
case RelCond:
return relCond(MacroAssembler::invert(asRelationalCondition()));
case ResCond:
return resCond(MacroAssembler::invert(asResultCondition()));
case DoubleCond:
return doubleCond(MacroAssembler::invert(asDoubleCondition()));
default:
RELEASE_ASSERT_NOT_REACHED();
return Arg();
}
}
Arg flipped(bool flipped = true) const
{
if (!flipped)
return Arg();
return relCond(MacroAssembler::flip(asRelationalCondition()));
}
bool isSignedCond() const
{
return isRelCond() && MacroAssembler::isSigned(asRelationalCondition());
}
bool isUnsignedCond() const
{
return isRelCond() && MacroAssembler::isUnsigned(asRelationalCondition());
}
unsigned jsHash() const;
void dump(PrintStream&) const;
Arg(WTF::HashTableDeletedValueType)
: m_base(WTF::HashTableDeletedValue)
{
}
bool isHashTableDeletedValue() const
{
return *this == Arg(WTF::HashTableDeletedValue);
}
unsigned hash() const
{
return WTF::IntHash<int64_t>::hash(m_offset) + m_kind + m_scale + m_base.hash() +
m_index.hash();
}
private:
int64_t m_offset { 0 };
Kind m_kind { Invalid };
int32_t m_scale { 1 };
Air::Tmp m_base;
Air::Tmp m_index;
};
struct ArgHash {
static unsigned hash(const Arg& key) { return key.hash(); }
static bool equal(const Arg& a, const Arg& b) { return a == b; }
static const bool safeToCompareToEmptyOrDeleted = true;
};
} } }
namespace WTF {
void printInternal(PrintStream&, JSC::B3::Air::Arg::Kind);
void printInternal(PrintStream&, JSC::B3::Air::Arg::Role);
void printInternal(PrintStream&, JSC::B3::Air::Arg::Type);
void printInternal(PrintStream&, JSC::B3::Air::Arg::Width);
void printInternal(PrintStream&, JSC::B3::Air::Arg::Signedness);
template<typename T> struct DefaultHash;
template<> struct DefaultHash<JSC::B3::Air::Arg> {
typedef JSC::B3::Air::ArgHash Hash;
};
template<typename T> struct HashTraits;
template<> struct HashTraits<JSC::B3::Air::Arg> : SimpleClassHashTraits<JSC::B3::Air::Arg> {
static const bool emptyValueIsZero = false;
};
}
#if COMPILER(GCC) && ASSERT_DISABLED
#pragma GCC diagnostic pop
#endif // COMPILER(GCC) && ASSERT_DISABLED
#endif // ENABLE(B3_JIT)
#endif // AirArg_h