ARM64ISelLowering.h [plain text]
#ifndef LLVM_TARGET_ARM64_ISELLOWERING_H
#define LLVM_TARGET_ARM64_ISELLOWERING_H
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/Target/TargetLowering.h"
namespace llvm {
namespace ARM64ISD {
enum {
FIRST_NUMBER = ISD::BUILTIN_OP_END,
WrapperLarge,
CALL,
TLSDESC_CALL,
ADRP, ADDlow,
LOADgot,
RET_FLAG,
BRCOND, CSEL, FCSEL, CSINV, CSNEG, CSINC,
THREAD_POINTER,
ADC, SBC,
ADDS, SUBS, ADCS, SBCS, ANDS,
FCMP,
FMAX, FMIN,
EXTR,
DUP, DUPLANE8, DUPLANE16, DUPLANE32, DUPLANE64,
MOVI, MOVIshift, MOVIedit, MOVImsl,
FMOV,
MVNIshift, MVNImsl,
BICi, ORRi,
NEG,
ZIP1, ZIP2, UZP1, UZP2, TRN1, TRN2,
REV16, REV32, REV64,
EXT,
VSHL, VLSHR, VASHR,
CMEQ, CMGE, CMGT, CMHI, CMHS,
FCMEQ, FCMGE, FCMGT,
CMEQz, CMGEz, CMGTz, CMLEz, CMLTz,
FCMEQz, FCMGEz, FCMGTz, FCMLEz, FCMLTz,
NOT,
BIT,
CBZ, CBNZ, TBZ, TBNZ,
TC_RETURN,
PREFETCH,
SITOF,
UITOF
};
}
class ARM64Subtarget;
class ARM64TargetMachine;
class ARM64TargetLowering : public TargetLowering {
bool RequireStrictAlign;
public:
explicit ARM64TargetLowering(ARM64TargetMachine &TM);
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
void computeMaskedBitsForTargetNode(const SDValue Op, APInt &KnownZero,
APInt &KnownOne, const SelectionDAG &DAG,
unsigned Depth = 0) const;
virtual MVT getScalarShiftAmountTy(EVT LHSTy) const;
virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast = 0) const {
if (RequireStrictAlign)
return false;
if (Fast)
*Fast = true;
return true;
}
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
virtual const char *getTargetNodeName(unsigned Opcode) const;
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
virtual unsigned getFunctionAlignment(const Function *F) const;
virtual unsigned getMaximalGlobalOffset() const;
virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo) const;
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const;
virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
MachineBasicBlock *BB,
unsigned Size,
unsigned BinOpcode) const;
MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI,
MachineBasicBlock *BB,
unsigned Size) const;
MachineBasicBlock *EmitAtomicBinary128(MachineInstr *MI,
MachineBasicBlock *BB,
unsigned BinOpcodeLo,
unsigned BinOpcodeHi) const;
MachineBasicBlock *EmitAtomicCmpSwap128(MachineInstr *MI,
MachineBasicBlock *BB) const;
MachineBasicBlock *EmitAtomicMinMax128(MachineInstr *MI,
MachineBasicBlock *BB,
unsigned CondCode) const;
MachineBasicBlock *EmitF128CSEL(MachineInstr *MI,
MachineBasicBlock *BB) const;
virtual MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *MBB) const;
virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info,
const CallInst &I,
unsigned Intrinsic) const;
virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
virtual bool isZExtFree(Type *Ty1, Type *Ty2) const;
virtual bool isZExtFree(EVT VT1, EVT VT2) const;
virtual bool isZExtFree(SDValue Val, EVT VT2) const;
virtual bool hasPairedLoad(Type *LoadedType,
unsigned &RequiredAligment) const;
virtual bool hasPairedLoad(EVT LoadedType,
unsigned &RequiredAligment) const;
virtual bool isLegalAddImmediate(int64_t) const;
virtual bool isLegalICmpImmediate(int64_t) const;
virtual EVT getOptimalMemOpType(uint64_t Size,
unsigned DstAlign, unsigned SrcAlign,
bool IsMemset, bool ZeroMemset,
bool MemcpyStrSrc,
MachineFunction &MF) const;
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
virtual int getScalingFactorCost(const AddrMode &AM, Type *Ty) const;
virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
virtual const uint16_t *getScratchRegisters(CallingConv::ID CC) const;
private:
const ARM64Subtarget *Subtarget;
void addTypeForNEON(EVT VT, EVT PromotedBitwiseVT);
void addDRTypeForNEON(MVT VT);
void addQRTypeForNEON(MVT VT);
virtual SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
LowerCall(CallLoweringInfo &,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc DL, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals,
bool isThisReturn, SDValue ThisVal) const;
bool isEligibleForTailCallOptimization(SDValue Callee,
CallingConv::ID CalleeCC,
bool isVarArg,
bool isCalleeStructRet,
bool isCallerStructRet,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const;
void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
SDLoc DL, SDValue &Chain) const;
virtual bool CanLowerReturn(CallingConv::ID CallConv,
MachineFunction &MF, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const;
virtual SDValue
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc DL, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerELFTLSDescCall(SDValue SymAddr, SDValue DescAddr, SDLoc DL,
SelectionDAG &DAG) const;
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
RTLIB::Libcall Call) const;
SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
ConstraintType getConstraintType(const std::string &Constraint) const;
ConstraintWeight getSingleConstraintMatchWeight(
AsmOperandInfo &info, const char *constraint) const;
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const;
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
std::vector<SDValue>&Ops,
SelectionDAG &DAG) const;
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const;
bool mayBeEmittedAsTailCall(CallInst *CI) const;
bool getIndexedAddressParts(SDNode *Op,
SDValue &Base,
SDValue &Offset,
ISD::MemIndexedMode &AM,
bool &IsInc,
SelectionDAG &DAG) const;
bool getPreIndexedAddressParts(SDNode *N,
SDValue &Base,
SDValue &Offset,
ISD::MemIndexedMode &AM,
SelectionDAG &DAG) const;
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
SDValue &Base,
SDValue &Offset,
ISD::MemIndexedMode &AM,
SelectionDAG &DAG) const;
void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const;
};
namespace ARM64 {
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo);
}
}
#endif // LLVM_TARGET_ARM64_ISELLOWERING_H