#include "llvm/ValueSymbolTable.h"
#include "llvm/CallingConv.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/InlineAsm.h"
#include "llvm/Instructions.h"
#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/System/Host.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm-abi.h"
#include "llvm-internal.h"
#include "llvm-debug.h"
extern "C" {
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tm_p.h"
#include "tree.h"
#include "c-tree.h" // FIXME: eliminate.
#include "tree-iterator.h"
#include "output.h"
#include "diagnostic.h"
#include "real.h"
#include "langhooks.h"
#include "function.h"
#include "toplev.h"
#include "flags.h"
#include "target.h"
#include "hard-reg-set.h"
#include "except.h"
#include "rtl.h"
#include "libfuncs.h"
#include "tree-flow.h"
#include "tree-gimple.h"
extern int get_pointer_alignment (tree exp, unsigned int max_align);
extern enum machine_mode reg_raw_mode[FIRST_PSEUDO_REGISTER];
}
static LLVMContext &Context = getGlobalContext();
extern std::vector<tree> Thunks;
#define BOGUS_CTOR(exp) \
(DECL_INITIAL(exp) && \
TREE_CODE(DECL_INITIAL(exp)) == CONSTRUCTOR && \
!TREE_TYPE(DECL_INITIAL(exp)))
static bool isGimpleTemporary(tree decl) {
return is_gimple_formal_tmp_reg(decl) &&
!isAggregateTreeType(TREE_TYPE(decl));
}
uint64_t getINTEGER_CSTVal(tree exp) {
unsigned HOST_WIDE_INT HI = (unsigned HOST_WIDE_INT)TREE_INT_CST_HIGH(exp);
unsigned HOST_WIDE_INT LO = (unsigned HOST_WIDE_INT)TREE_INT_CST_LOW(exp);
if (HOST_BITS_PER_WIDE_INT == 64) {
return (uint64_t)LO;
} else {
assert(HOST_BITS_PER_WIDE_INT == 32 &&
"Only 32- and 64-bit hosts supported!");
return ((uint64_t)HI << 32) | (uint64_t)LO;
}
}
bool isInt64(tree t, bool Unsigned) {
if (HOST_BITS_PER_WIDE_INT == 64)
return host_integerp(t, Unsigned) && !TREE_OVERFLOW (t);
else {
assert(HOST_BITS_PER_WIDE_INT == 32 &&
"Only 32- and 64-bit hosts supported!");
return
(TREE_CODE (t) == INTEGER_CST && !TREE_OVERFLOW (t))
&& ((TYPE_UNSIGNED(TREE_TYPE(t)) == Unsigned) ||
(HOST_WIDE_INT)TREE_INT_CST_HIGH(t) >= 0);
}
}
uint64_t getInt64(tree t, bool Unsigned) {
assert(isInt64(t, Unsigned) && "invalid constant!");
return getINTEGER_CSTVal(t);
}
static unsigned int getPointerAlignment(tree exp) {
assert(POINTER_TYPE_P (TREE_TYPE (exp)) && "Expected a pointer type!");
unsigned int align = get_pointer_alignment(exp, BIGGEST_ALIGNMENT) / 8;
return align ? align : 1;
}
TreeToLLVM *TheTreeToLLVM = 0;
const TargetData &getTargetData() {
return *TheTarget->getTargetData();
}
bool TreeToLLVM::EmitDebugInfo() {
if (TheDebugInfo && getFUNCTION_DECL() && !DECL_IGNORED_P(getFUNCTION_DECL()))
return true;
return false;
}
TreeToLLVM::TreeToLLVM(tree fndecl) :
TD(getTargetData()), Builder(Context, *TheFolder) {
FnDecl = fndecl;
Fn = 0;
ReturnBB = UnwindBB = 0;
ReturnOffset = 0;
if (EmitDebugInfo()) {
expanded_location Location = expand_location(DECL_SOURCE_LOCATION (fndecl));
if (Location.file) {
TheDebugInfo->setLocationFile(Location.file);
TheDebugInfo->setLocationLine(Location.line);
} else {
TheDebugInfo->setLocationFile("<unknown file>");
TheDebugInfo->setLocationLine(0);
}
}
AllocaInsertionPoint = 0;
ExceptionValue = 0;
ExceptionSelectorValue = 0;
FuncEHException = 0;
FuncEHSelector = 0;
FuncEHGetTypeID = 0;
assert(TheTreeToLLVM == 0 && "Reentering function creation?");
TheTreeToLLVM = this;
}
TreeToLLVM::~TreeToLLVM() {
TheTreeToLLVM = 0;
}
static BasicBlock *getLabelDeclBlock(tree LabelDecl) {
assert(TREE_CODE(LabelDecl) == LABEL_DECL && "Isn't a label!?");
if (DECL_LLVM_SET_P(LabelDecl))
return cast<BasicBlock>(DECL_LLVM(LabelDecl));
const char *Name = "bb";
if (DECL_NAME(LabelDecl))
Name = IDENTIFIER_POINTER(DECL_NAME(LabelDecl));
BasicBlock *NewBB = BasicBlock::Create(Context, Name);
SET_DECL_LLVM(LabelDecl, NewBB);
return NewBB;
}
static void llvm_store_scalar_argument(Value *Loc, Value *ArgVal,
const llvm::Type *LLVMTy,
unsigned RealSize,
LLVMBuilder &Builder) {
if (RealSize) {
assert(!BYTES_BIG_ENDIAN && "Unsupported case - please report");
assert(ArgVal->getType()->isIntegerTy() && "Expected an integer value!");
const Type *StoreType = IntegerType::get(Context, RealSize * 8);
Loc = Builder.CreateBitCast(Loc, StoreType->getPointerTo());
if (ArgVal->getType()->getPrimitiveSizeInBits() >=
StoreType->getPrimitiveSizeInBits())
ArgVal = Builder.CreateTrunc(ArgVal, StoreType);
else
ArgVal = Builder.CreateZExt(ArgVal, StoreType);
Builder.CreateStore(ArgVal, Loc);
} else {
Loc = Builder.CreateBitCast(Loc, LLVMTy->getPointerTo());
Builder.CreateStore(ArgVal, Loc);
}
}
#ifndef LLVM_STORE_SCALAR_ARGUMENT
#define LLVM_STORE_SCALAR_ARGUMENT(LOC,ARG,TYPE,SIZE,BUILDER) \
llvm_store_scalar_argument((LOC),(ARG),(TYPE),(SIZE),(BUILDER))
#endif
#define LLVM_BYVAL_ALIGNMENT_TOO_SMALL(T) \
(LLVM_BYVAL_ALIGNMENT(T) && LLVM_BYVAL_ALIGNMENT(T) < TYPE_ALIGN_UNIT(T))
namespace {
struct FunctionPrologArgumentConversion : public DefaultABIClient {
tree FunctionDecl;
Function::arg_iterator &AI;
LLVMBuilder Builder;
std::vector<Value*> LocStack;
std::vector<std::string> NameStack;
unsigned Offset;
CallingConv::ID &CallingConv;
bool isShadowRet;
FunctionPrologArgumentConversion(tree FnDecl,
Function::arg_iterator &ai,
const LLVMBuilder &B, CallingConv::ID &CC)
: FunctionDecl(FnDecl), AI(ai), Builder(B), Offset(0), CallingConv(CC),
isShadowRet(false) {}
CallingConv::ID& getCallingConv(void) { return CallingConv; }
void HandlePad(const llvm::Type *LLVMTy) {
++AI;
}
bool isShadowReturn() const {
return isShadowRet;
}
void setName(const std::string &Name) {
NameStack.push_back(Name);
}
void setLocation(Value *Loc) {
LocStack.push_back(Loc);
}
void clear() {
assert(NameStack.size() == 1 && LocStack.size() == 1 && "Imbalance!");
NameStack.clear();
LocStack.clear();
}
void HandleAggregateShadowResult(const PointerType *PtrArgTy,
bool RetPtr) {
assert(AI != Builder.GetInsertBlock()->getParent()->arg_end() &&
"No explicit return value?");
AI->setName("agg.result");
isShadowRet = true;
tree ResultDecl = DECL_RESULT(FunctionDecl);
tree RetTy = TREE_TYPE(TREE_TYPE(FunctionDecl));
if (TREE_CODE(RetTy) == TREE_CODE(TREE_TYPE(ResultDecl))) {
SET_DECL_LLVM(ResultDecl, AI);
++AI;
return;
}
assert(TREE_CODE(TREE_TYPE(ResultDecl)) == REFERENCE_TYPE &&
"Not type match and not passing by reference?");
Value *Tmp = TheTreeToLLVM->CreateTemporary(AI->getType());
Builder.CreateStore(AI, Tmp);
SET_DECL_LLVM(ResultDecl, Tmp);
if (TheDebugInfo && !DECL_IGNORED_P(FunctionDecl)) {
TheDebugInfo->EmitDeclare(ResultDecl,
dwarf::DW_TAG_return_variable,
"agg.result", RetTy, Tmp,
Builder);
}
++AI;
}
void HandleScalarShadowResult(const PointerType *PtrArgTy, bool RetPtr) {
assert(AI != Builder.GetInsertBlock()->getParent()->arg_end() &&
"No explicit return value?");
AI->setName("scalar.result");
isShadowRet = true;
SET_DECL_LLVM(DECL_RESULT(FunctionDecl), AI);
++AI;
}
void HandleScalarArgument(const llvm::Type *LLVMTy, tree type,
unsigned RealSize = 0) {
Value *ArgVal = AI;
if (ArgVal->getType() != LLVMTy) {
if (ArgVal->getType()->isPointerTy() && LLVMTy->isPointerTy()) {
ArgVal = Builder.CreateBitCast(ArgVal, LLVMTy);
} else if (ArgVal->getType()->isDoubleTy()) {
ArgVal = Builder.CreateFPTrunc(ArgVal, LLVMTy,
NameStack.back().c_str());
} else {
assert(ArgVal->getType()->isIntegerTy(32) && LLVMTy->isIntegerTy() &&
"Lowerings don't match?");
ArgVal = Builder.CreateTrunc(ArgVal, LLVMTy,NameStack.back().c_str());
}
}
assert(!LocStack.empty());
Value *Loc = LocStack.back();
LLVM_STORE_SCALAR_ARGUMENT(Loc,ArgVal,LLVMTy,RealSize,Builder);
AI->setName(NameStack.back());
++AI;
}
void HandleByValArgument(const llvm::Type *LLVMTy, tree type) {
if (LLVM_BYVAL_ALIGNMENT_TOO_SMALL(type)) {
assert(!LocStack.empty());
Value *Loc = LocStack.back();
const Type *SBP = Type::getInt8PtrTy(Context);
const Type *IntPtr = getTargetData().getIntPtrType(Context);
Value *Ops[5] = {
Builder.CreateCast(Instruction::BitCast, Loc, SBP),
Builder.CreateCast(Instruction::BitCast, AI, SBP),
ConstantInt::get(IntPtr,
TREE_INT_CST_LOW(TYPE_SIZE_UNIT(type))),
ConstantInt::get(Type::getInt32Ty(Context),
LLVM_BYVAL_ALIGNMENT(type)),
ConstantInt::get(Type::getInt1Ty(Context), false)
};
const Type *ArgTypes[3] = {SBP, SBP, IntPtr };
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::memcpy,
ArgTypes, 3), Ops, Ops+5);
AI->setName(NameStack.back());
}
++AI;
}
void HandleFCAArgument(const llvm::Type *LLVMTy,
tree type ATTRIBUTE_UNUSED) {
assert(!LocStack.empty());
Value *Loc = LocStack.back();
Builder.CreateStore(AI, Loc);
AI->setName(NameStack.back());
++AI;
}
void HandleAggregateResultAsScalar(const Type *ScalarTy, unsigned Offset=0){
this->Offset = Offset;
}
void EnterField(unsigned FieldNo, const llvm::Type *StructTy) {
NameStack.push_back(NameStack.back()+"."+utostr(FieldNo));
Value *Loc = LocStack.back();
Loc = Builder.CreateBitCast(Loc, StructTy->getPointerTo());
Loc = Builder.CreateStructGEP(Loc, FieldNo);
LocStack.push_back(Loc);
}
void ExitField() {
NameStack.pop_back();
LocStack.pop_back();
}
};
}
static bool isPassedByVal(tree type, const Type *Ty,
std::vector<const Type*> &ScalarArgs,
bool isShadowRet, CallingConv::ID &CC) {
if (LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(type, Ty))
return true;
std::vector<const Type*> Args;
if (LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(type, Ty, CC, Args) &&
LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(Args, ScalarArgs, isShadowRet,
CC))
return true;
return false;
}
static bool LanguageIsC() {
static unsigned Val = 2;
if (Val != 2) return (bool)Val;
StringRef LanguageName = lang_hooks.name;
if (LanguageName == "GNU C" || LanguageName == "GNU Objective-C")
return (Val = true);
return (Val = false);
}
void TreeToLLVM::setLexicalBlockDepths(tree t, treeset &s, unsigned level) {
tree bstep, step;
switch (TREE_CODE(t)) {
default:
abort();
case BLOCK:
for (bstep = t; bstep; bstep = TREE_CHAIN(bstep)) {
BLOCK_NUMBER(bstep) = level;
for (step = BLOCK_VARS(t); step; step = TREE_CHAIN(step))
s.insert(step);
}
for (bstep = BLOCK_SUBBLOCKS(t); bstep; bstep = TREE_CHAIN(bstep))
setLexicalBlockDepths(bstep, s, level+1);
return;
case FUNCTION_DECL:
return setLexicalBlockDepths(DECL_INITIAL(t), s, level);
}
}
void TreeToLLVM::StartFunctionBody() {
const char *Name = "";
if (tree ID = DECL_ASSEMBLER_NAME(FnDecl))
Name = IDENTIFIER_POINTER(ID);
tree static_chain = cfun->static_chain_decl;
const FunctionType *FTy;
CallingConv::ID CallingConv;
AttrListPtr PAL;
if (TYPE_ARG_TYPES(TREE_TYPE(FnDecl)) == 0 && LanguageIsC()) {
FTy = TheTypeConverter->ConvertArgListToFnType(TREE_TYPE(FnDecl),
DECL_ARGUMENTS(FnDecl),
static_chain,
CallingConv, PAL);
} else {
FTy = TheTypeConverter->ConvertFunctionType(TREE_TYPE(FnDecl),
FnDecl,
static_chain,
CallingConv, PAL);
}
if (DECL_LLVM_SET_P(FnDecl) &&
cast<PointerType>(DECL_LLVM(FnDecl)->getType())->getElementType() == FTy){
Fn = cast<Function>(DECL_LLVM(FnDecl));
assert(Fn->getCallingConv() == CallingConv &&
"Calling convention disagreement between prototype and impl!");
handleVisibility(FnDecl, Fn);
} else {
Function *FnEntry = TheModule->getFunction(Name);
if (FnEntry) {
assert(FnEntry->getName() == Name && "Same entry, different name?");
assert((FnEntry->isDeclaration() ||
FnEntry->getLinkage() == Function::AvailableExternallyLinkage) &&
"Multiple fns with same name and neither are external!");
FnEntry->setName(""); assert(FnEntry->getCallingConv() == CallingConv &&
"Calling convention disagreement between prototype and impl!");
}
Fn = Function::Create(FTy, Function::ExternalLinkage, Name, TheModule);
assert(Fn->getName() == Name && "Preexisting fn with the same name!");
Fn->setCallingConv(CallingConv);
Fn->setAttributes(PAL);
if (FnEntry) {
FnEntry->replaceAllUsesWith(
Builder.getFolder().CreateBitCast(Fn, FnEntry->getType())
);
changeLLVMConstant(FnEntry, Fn);
FnEntry->eraseFromParent();
}
SET_DECL_LLVM(FnDecl, Fn);
}
assert(Fn->empty() && "Function expanded multiple times!");
if (!lang_hooks.function_is_thunk_p (FnDecl)) {
if (DECL_LLVM_PRIVATE(FnDecl)) {
Fn->setLinkage(Function::PrivateLinkage);
} else if (DECL_LLVM_LINKER_PRIVATE(FnDecl)) {
Fn->setLinkage(Function::LinkerPrivateLinkage);
} else if (!TREE_PUBLIC(FnDecl) ) {
Fn->setLinkage(Function::InternalLinkage);
} else if (DECL_EXTERNAL(FnDecl) &&
lookup_attribute ("always_inline", DECL_ATTRIBUTES (FnDecl))) {
Fn->setLinkage(Function::AvailableExternallyLinkage);
} else if (DECL_COMDAT(FnDecl)) {
Fn->setLinkage(Function::getLinkOnceLinkage(flag_odr));
} else if (DECL_WEAK(FnDecl)) {
Fn->setLinkage(Function::WeakAnyLinkage);
} else if (DECL_ONE_ONLY(FnDecl) || lang_hooks.function_is_thunk_p (FnDecl)) {
Fn->setLinkage(Function::getWeakLinkage(flag_odr));
} else if (IS_EXTERN_INLINE(FnDecl)) {
Fn->setLinkage(Function::AvailableExternallyLinkage);
}
#ifdef TARGET_ADJUST_LLVM_LINKAGE
TARGET_ADJUST_LLVM_LINKAGE(Fn, FnDecl);
#endif
handleVisibility(FnDecl, Fn);
} else {
Thunks.push_back(FnDecl);
}
if (DECL_ALIGN (FnDecl) != FUNCTION_BOUNDARY)
Fn->setAlignment(DECL_ALIGN (FnDecl) / 8);
if (DECL_SECTION_NAME(FnDecl))
Fn->setSection(TREE_STRING_POINTER(DECL_SECTION_NAME(FnDecl)));
if (lookup_attribute ("used", DECL_ATTRIBUTES (FnDecl)))
AttributeUsedGlobals.insert(Fn);
if (lookup_attribute ("noinline", DECL_ATTRIBUTES (FnDecl)))
Fn->addFnAttr(Attribute::NoInline);
if (lookup_attribute ("always_inline", DECL_ATTRIBUTES (FnDecl)))
Fn->addFnAttr(Attribute::AlwaysInline);
if (DECL_IASM_ASM_FUNCTION (FnDecl) == 1)
Fn->addFnAttr(Attribute::Naked);
if (DECL_EXPLICIT_INLINE_P (FnDecl))
Fn->addFnAttr(Attribute::InlineHint);
if (optimize_size)
Fn->addFnAttr(Attribute::OptimizeForSize);
if (flag_stack_protect == 1)
Fn->addFnAttr(Attribute::StackProtect);
else if (flag_stack_protect == 2)
Fn->addFnAttr(Attribute::StackProtectReq);
if (lookup_attribute ("naked", DECL_ATTRIBUTES (FnDecl)))
Fn->addFnAttr(Attribute::Naked);
if (DECL_ATTRIBUTES(FnDecl))
AddAnnotateAttrsToGlobal(Fn, FnDecl);
if (!flag_exceptions)
Fn->setDoesNotThrow();
Builder.SetInsertPoint(BasicBlock::Create(Context, "entry", Fn));
treeset block_declared_vars;
setLexicalBlockDepths(FnDecl, block_declared_vars, 1);
SeenBlocks.clear();
if (EmitDebugInfo())
TheDebugInfo->EmitFunctionStart(FnDecl);
Function::arg_iterator AI = Fn->arg_begin();
FunctionPrologArgumentConversion Client(FnDecl, AI, Builder, CallingConv);
DefaultABI ABIConverter(Client);
ABIConverter.HandleReturnType(TREE_TYPE(TREE_TYPE(FnDecl)), FnDecl,
DECL_BUILT_IN(FnDecl));
ReturnOffset = Client.Offset;
tree Args = static_chain ? static_chain : DECL_ARGUMENTS(FnDecl);
std::vector<const Type*> ScalarArgs;
while (Args) {
const char *Name = "unnamed_arg";
if (DECL_NAME(Args)) Name = IDENTIFIER_POINTER(DECL_NAME(Args));
const Type *ArgTy = ConvertType(TREE_TYPE(Args));
bool isInvRef = isPassedByInvisibleReference(TREE_TYPE(Args));
if (isInvRef ||
(ArgTy->isVectorTy() &&
LLVM_SHOULD_PASS_VECTOR_USING_BYVAL_ATTR(TREE_TYPE(Args)) &&
!LLVM_BYVAL_ALIGNMENT_TOO_SMALL(TREE_TYPE(Args))) ||
(!ArgTy->isSingleValueType() &&
isPassedByVal(TREE_TYPE(Args), ArgTy, ScalarArgs,
Client.isShadowReturn(), CallingConv) &&
!LLVM_BYVAL_ALIGNMENT_TOO_SMALL(TREE_TYPE(Args)))) {
AI->setName(Name);
SET_DECL_LLVM(Args, AI);
if (!isInvRef && EmitDebugInfo())
TheDebugInfo->EmitDeclare(Args, dwarf::DW_TAG_arg_variable,
Name, TREE_TYPE(Args),
AI, Builder);
ABIConverter.HandleArgument(TREE_TYPE(Args), ScalarArgs);
} else {
Value *Tmp = CreateTemporary(ArgTy, TYPE_ALIGN_UNIT(TREE_TYPE(Args)));
Tmp->setName(std::string(Name)+"_addr");
SET_DECL_LLVM(Args, Tmp);
if (EmitDebugInfo())
TheDebugInfo->EmitDeclare(Args, dwarf::DW_TAG_arg_variable,
Name, TREE_TYPE(Args), Tmp,
Builder);
if (DECL_ATTRIBUTES(Args))
EmitAnnotateIntrinsic(Tmp, Args);
if (POINTER_TYPE_P(TREE_TYPE(Args))
&& lookup_attribute ("gcroot", TYPE_ATTRIBUTES(TREE_TYPE(Args))))
EmitTypeGcroot(Tmp, Args);
Client.setName(Name);
Client.setLocation(Tmp);
ABIConverter.HandleArgument(TREE_TYPE(Args), ScalarArgs);
Client.clear();
}
Args = Args == static_chain ? DECL_ARGUMENTS(FnDecl) : TREE_CHAIN(Args);
}
if (DECL_RESULT(FnDecl) && !VOID_TYPE_P(TREE_TYPE(DECL_RESULT(FnDecl))) &&
!DECL_LLVM_SET_P(DECL_RESULT(FnDecl)))
EmitAutomaticVariableDecl(DECL_RESULT(FnDecl));
if (cfun->nonlocal_goto_save_area) {
}
if (EmitDebugInfo())
TheDebugInfo->EmitStopPoint(Fn, Builder.GetInsertBlock(), Builder);
for (tree t = cfun->unexpanded_var_list; t; t = TREE_CHAIN(t)) {
if (!DECL_LLVM_SET_P(TREE_VALUE(t)) &&
block_declared_vars.count(TREE_VALUE(t)) == 0)
EmitAutomaticVariableDecl(TREE_VALUE(t));
}
switchLexicalBlock(DECL_INITIAL(FnDecl));
ReturnBB = BasicBlock::Create(Context, "return");
}
Function *TreeToLLVM::FinishFunctionBody() {
EmitBlock(ReturnBB);
SmallVector <Value *, 4> RetVals;
if (Fn->getReturnType() != Type::getVoidTy(Context)) {
if (!isAggregateTreeType(TREE_TYPE(DECL_RESULT(FnDecl)))) {
tree TreeRetVal = DECL_RESULT(FnDecl);
Value *RetVal = Builder.CreateLoad(DECL_LLVM(TreeRetVal), "retval");
bool RetValSigned = !TYPE_UNSIGNED(TREE_TYPE(TreeRetVal));
Instruction::CastOps opcode = CastInst::getCastOpcode(
RetVal, RetValSigned, Fn->getReturnType(), RetValSigned);
RetVal = CastToType(opcode, RetVal, Fn->getReturnType());
RetVals.push_back(RetVal);
} else {
Value *RetVal = DECL_LLVM(DECL_RESULT(FnDecl));
if (const StructType *STy = dyn_cast<StructType>(Fn->getReturnType())) {
Value *R1 = BitCastToType(RetVal, STy->getPointerTo());
llvm::Value *Idxs[2];
Idxs[0] = ConstantInt::get(llvm::Type::getInt32Ty(Context), 0);
for (unsigned ri = 0; ri < STy->getNumElements(); ++ri) {
Idxs[1] = ConstantInt::get(llvm::Type::getInt32Ty(Context), ri);
Value *GEP = Builder.CreateGEP(R1, Idxs, Idxs+2, "mrv_gep");
Value *E = Builder.CreateLoad(GEP, "mrv");
RetVals.push_back(E);
}
} else {
if (ReturnOffset) {
RetVal = BitCastToType(RetVal,
Type::getInt8PtrTy(Context));
RetVal = Builder.CreateGEP(RetVal,
ConstantInt::get(TD.getIntPtrType(Context), ReturnOffset));
}
RetVal = BitCastToType(RetVal, Fn->getReturnType()->getPointerTo());
RetVal = Builder.CreateLoad(RetVal, "retval");
RetVals.push_back(RetVal);
}
}
}
if (EmitDebugInfo()) {
TheDebugInfo->EmitStopPoint(Fn, Builder.GetInsertBlock(), Builder);
TheDebugInfo->EmitFunctionEnd(Builder.GetInsertBlock(), true);
}
if (RetVals.empty())
Builder.CreateRetVoid();
else if (!Fn->getReturnType()->isAggregateType()) {
assert(RetVals.size() == 1 && "Non-aggregate return has multiple values!");
Builder.CreateRet(RetVals[0]);
} else
Builder.CreateAggregateRet(RetVals.data(), RetVals.size());
EmitLandingPads();
EmitPostPads();
EmitUnwindBlock();
eraseLocalLLVMValues();
for (std::vector<BitCastInst *>::iterator I = UniquedValues.begin(),
E = UniquedValues.end(); I != E; ++I) {
BitCastInst *BI = *I;
assert(BI->getSrcTy() == BI->getDestTy() && "Not a no-op bitcast!");
BI->replaceAllUsesWith(BI->getOperand(0));
BI->eraseFromParent();
}
UniquedValues.clear();
return Fn;
}
Function *TreeToLLVM::EmitFunction() {
StartFunctionBody();
basic_block bb;
edge e;
edge_iterator ei;
tree stmt = NULL_TREE;
FOR_EACH_BB (bb) {
for (block_stmt_iterator bsi = bsi_start (bb); !bsi_end_p (bsi);
bsi_next (&bsi)) {
MemRef DestLoc;
stmt = bsi_stmt (bsi);
if (isAggregateTreeType(TREE_TYPE(stmt)) &&
TREE_CODE(stmt)!= MODIFY_EXPR && TREE_CODE(stmt)!=INIT_EXPR)
DestLoc = CreateTempLoc(ConvertType(TREE_TYPE(stmt)));
Emit(stmt, DestLoc.Ptr ? &DestLoc : NULL);
}
if (!stmt && EmitDebugInfo()) {
assert(EDGE_COUNT(bb->succs) == 1 && "empty basic block with multiple successors?") ;
e = EDGE_I(bb->succs, 0);
source_locus locus = e->goto_locus;
if (locus) {
TheDebugInfo->setLocationFile(LOCATION_FILE(*locus));
TheDebugInfo->setLocationLine(LOCATION_LINE(*locus));
}
TheDebugInfo->EmitStopPoint(Fn, Builder.GetInsertBlock(), Builder);
}
FOR_EACH_EDGE (e, ei, bb->succs)
if (e->flags & EDGE_FALLTHRU)
break;
if (e && e->dest != bb->next_bb) {
Builder.CreateBr(getLabelDeclBlock(tree_block_label (e->dest)));
EmitBlock(BasicBlock::Create(Context, ""));
}
}
return FinishFunctionBody();
}
void TreeToLLVM::switchLexicalBlock(tree exp) {
if (exp == NULL_TREE || TREE_CODE(exp) == FUNCTION_DECL) {
if (EmitDebugInfo())
TheDebugInfo->setCurrentLexicalBlock(exp);
return;
}
if (!EXPR_P(exp) && (TREE_CODE(exp) != BLOCK))
return;
tree new_block = EXPR_P(exp) ? TREE_BLOCK(exp) : exp;
if (!new_block)
return;
bool previously_visited = !SeenBlocks.insert(new_block);
if (!previously_visited)
switchLexicalBlock(BLOCK_SUPERCONTEXT(new_block));
if (EmitDebugInfo()) {
tree current_block = TheDebugInfo->getCurrentLexicalBlock();
if (new_block && current_block && new_block != current_block) {
tree new_climber = new_block, current_climber = current_block;
unsigned new_climber_depth, current_climber_depth;
while (new_climber != current_climber) {
current_climber_depth = DECL_P(current_climber) ? 0 : BLOCK_NUMBER(current_climber);
new_climber_depth = DECL_P(new_climber) ? 0 : BLOCK_NUMBER(new_climber);
if (new_climber_depth <= current_climber_depth)
current_climber = BLOCK_SUPERCONTEXT(current_climber);
if (new_climber_depth >= current_climber_depth)
new_climber = BLOCK_SUPERCONTEXT(new_climber);
}
assert(new_climber == current_climber && "missed common TREE_BLOCK parent");
TheDebugInfo->change_regions(new_block, new_climber);
}
}
if (previously_visited)
return;
tree step;
for (step = BLOCK_VARS(new_block); step; step = TREE_CHAIN(step))
switch (TREE_CODE_CLASS(TREE_CODE(step))) {
default:
assert(0 && "non-var, non-type node hanging from a GCC BLOCK?");
break;
case tcc_type:
break;
case tcc_declaration:
if (!DECL_LLVM_SET_P(step))
EmitAutomaticVariableDecl(step);
break;
}
if (!BLOCK_VARS(new_block) && BLOCK_SUBBLOCKS(new_block))
switchLexicalBlock(BLOCK_SUBBLOCKS(new_block));
}
Value *TreeToLLVM::Emit(tree exp, const MemRef *DestLoc) {
assert((isAggregateTreeType(TREE_TYPE(exp)) == (DestLoc != 0) ||
TREE_CODE(exp) == MODIFY_EXPR || TREE_CODE(exp) == INIT_EXPR) &&
"Didn't pass DestLoc to an aggregate expr, or passed it to scalar!");
Value *Result = 0;
bool emitdebuginfo = EmitDebugInfo();
if (emitdebuginfo && EXPR_HAS_LOCATION(exp)) {
TheDebugInfo->setLocationFile(EXPR_FILENAME(exp));
TheDebugInfo->setLocationLine(EXPR_LINENO(exp));
}
TreeToLLVM::switchLexicalBlock(exp);
if (emitdebuginfo)
TheDebugInfo->EmitStopPoint(Fn, Builder.GetInsertBlock(), Builder);
switch (TREE_CODE(exp)) {
default:
errs() << "Unhandled expression!\n"
<< "TREE_CODE: " << TREE_CODE(exp) << "\n";
debug_tree(exp);
abort();
case LABEL_EXPR: Result = EmitLABEL_EXPR(exp); break;
case GOTO_EXPR: Result = EmitGOTO_EXPR(exp); break;
case RETURN_EXPR: Result = EmitRETURN_EXPR(exp, DestLoc); break;
case COND_EXPR: Result = EmitCOND_EXPR(exp); break;
case SWITCH_EXPR: Result = EmitSWITCH_EXPR(exp); break;
case EXC_PTR_EXPR: Result = EmitEXC_PTR_EXPR(exp); break;
case FILTER_EXPR: Result = EmitFILTER_EXPR(exp); break;
case RESX_EXPR: Result = EmitRESX_EXPR(exp); break;
case VAR_DECL:
case PARM_DECL:
case RESULT_DECL:
case INDIRECT_REF:
case ARRAY_REF:
case ARRAY_RANGE_REF:
case COMPONENT_REF:
case BIT_FIELD_REF:
case STRING_CST:
case REALPART_EXPR:
case IMAGPART_EXPR:
Result = EmitLoadOfLValue(exp, DestLoc);
break;
case OBJ_TYPE_REF: Result = EmitOBJ_TYPE_REF(exp); break;
case ADDR_EXPR: Result = EmitADDR_EXPR(exp); break;
case CALL_EXPR: Result = EmitCALL_EXPR(exp, DestLoc); break;
case INIT_EXPR:
case MODIFY_EXPR: Result = EmitMODIFY_EXPR(exp, DestLoc); break;
case ASM_EXPR: Result = EmitASM_EXPR(exp); break;
case NON_LVALUE_EXPR: Result = Emit(TREE_OPERAND(exp, 0), DestLoc); break;
case NOP_EXPR: Result = EmitNOP_EXPR(exp, DestLoc); break;
case FIX_TRUNC_EXPR:
case FLOAT_EXPR:
case CONVERT_EXPR: Result = EmitCONVERT_EXPR(exp, DestLoc); break;
case VIEW_CONVERT_EXPR: Result = EmitVIEW_CONVERT_EXPR(exp, DestLoc); break;
case NEGATE_EXPR: Result = EmitNEGATE_EXPR(exp, DestLoc); break;
case CONJ_EXPR: Result = EmitCONJ_EXPR(exp, DestLoc); break;
case ABS_EXPR: Result = EmitABS_EXPR(exp); break;
case BIT_NOT_EXPR: Result = EmitBIT_NOT_EXPR(exp); break;
case TRUTH_NOT_EXPR: Result = EmitTRUTH_NOT_EXPR(exp); break;
case LT_EXPR:
Result = EmitCompare(exp, ICmpInst::ICMP_ULT, ICmpInst::ICMP_SLT,
FCmpInst::FCMP_OLT);
break;
case LE_EXPR:
Result = EmitCompare(exp, ICmpInst::ICMP_ULE, ICmpInst::ICMP_SLE,
FCmpInst::FCMP_OLE);
break;
case GT_EXPR:
Result = EmitCompare(exp, ICmpInst::ICMP_UGT, ICmpInst::ICMP_SGT,
FCmpInst::FCMP_OGT);
break;
case GE_EXPR:
Result = EmitCompare(exp, ICmpInst::ICMP_UGE, ICmpInst::ICMP_SGE,
FCmpInst::FCMP_OGE);
break;
case EQ_EXPR:
Result = EmitCompare(exp, ICmpInst::ICMP_EQ, ICmpInst::ICMP_EQ,
FCmpInst::FCMP_OEQ);
break;
case NE_EXPR:
Result = EmitCompare(exp, ICmpInst::ICMP_NE, ICmpInst::ICMP_NE,
FCmpInst::FCMP_UNE);
break;
case UNORDERED_EXPR:
Result = EmitCompare(exp, 0, 0, FCmpInst::FCMP_UNO);
break;
case ORDERED_EXPR:
Result = EmitCompare(exp, 0, 0, FCmpInst::FCMP_ORD);
break;
case UNLT_EXPR: Result = EmitCompare(exp, 0, 0, FCmpInst::FCMP_ULT); break;
case UNLE_EXPR: Result = EmitCompare(exp, 0, 0, FCmpInst::FCMP_ULE); break;
case UNGT_EXPR: Result = EmitCompare(exp, 0, 0, FCmpInst::FCMP_UGT); break;
case UNGE_EXPR: Result = EmitCompare(exp, 0, 0, FCmpInst::FCMP_UGE); break;
case UNEQ_EXPR: Result = EmitCompare(exp, 0, 0, FCmpInst::FCMP_UEQ); break;
case LTGT_EXPR: Result = EmitCompare(exp, 0, 0, FCmpInst::FCMP_ONE); break;
case PLUS_EXPR:
Result = EmitBinOp(exp, DestLoc,
FLOAT_TYPE_P(TREE_TYPE(exp)) ?
Instruction::FAdd :
Instruction::Add);
break;
case MINUS_EXPR:
Result = EmitBinOp(exp, DestLoc,
FLOAT_TYPE_P(TREE_TYPE(exp)) ?
Instruction::FSub :
Instruction::Sub);
break;
case MULT_EXPR:
Result = EmitBinOp(exp, DestLoc,
FLOAT_TYPE_P(TREE_TYPE(exp)) ?
Instruction::FMul :
Instruction::Mul);
break;
case EXACT_DIV_EXPR: Result = EmitEXACT_DIV_EXPR(exp, DestLoc); break;
case TRUNC_DIV_EXPR:
if (TYPE_UNSIGNED(TREE_TYPE(exp)))
Result = EmitBinOp(exp, DestLoc, Instruction::UDiv);
else
Result = EmitBinOp(exp, DestLoc, Instruction::SDiv);
break;
case RDIV_EXPR: Result = EmitBinOp(exp, DestLoc, Instruction::FDiv); break;
case CEIL_DIV_EXPR: Result = EmitCEIL_DIV_EXPR(exp); break;
case FLOOR_DIV_EXPR: Result = EmitFLOOR_DIV_EXPR(exp); break;
case ROUND_DIV_EXPR: Result = EmitROUND_DIV_EXPR(exp); break;
case TRUNC_MOD_EXPR:
if (TYPE_UNSIGNED(TREE_TYPE(exp)))
Result = EmitBinOp(exp, DestLoc, Instruction::URem);
else
Result = EmitBinOp(exp, DestLoc, Instruction::SRem);
break;
case FLOOR_MOD_EXPR: Result = EmitFLOOR_MOD_EXPR(exp, DestLoc); break;
case BIT_AND_EXPR: Result = EmitBinOp(exp, DestLoc, Instruction::And);break;
case BIT_IOR_EXPR: Result = EmitBinOp(exp, DestLoc, Instruction::Or );break;
case BIT_XOR_EXPR: Result = EmitBinOp(exp, DestLoc, Instruction::Xor);break;
case TRUTH_AND_EXPR: Result = EmitTruthOp(exp, Instruction::And); break;
case TRUTH_OR_EXPR: Result = EmitTruthOp(exp, Instruction::Or); break;
case TRUTH_XOR_EXPR: Result = EmitTruthOp(exp, Instruction::Xor); break;
case RSHIFT_EXPR:
Result = EmitShiftOp(exp,DestLoc,
TYPE_UNSIGNED(TREE_TYPE(exp)) ? Instruction::LShr : Instruction::AShr);
break;
case LSHIFT_EXPR: Result = EmitShiftOp(exp,DestLoc,Instruction::Shl);break;
case RROTATE_EXPR:
Result = EmitRotateOp(exp, Instruction::LShr, Instruction::Shl);
break;
case LROTATE_EXPR:
Result = EmitRotateOp(exp, Instruction::Shl, Instruction::LShr);
break;
case MIN_EXPR:
Result = EmitMinMaxExpr(exp, ICmpInst::ICMP_ULE, ICmpInst::ICMP_SLE,
FCmpInst::FCMP_OLE);
break;
case MAX_EXPR:
Result = EmitMinMaxExpr(exp, ICmpInst::ICMP_UGE, ICmpInst::ICMP_SGE,
FCmpInst::FCMP_OGE);
break;
case CONSTRUCTOR: Result = EmitCONSTRUCTOR(exp, DestLoc); break;
case COMPLEX_CST: EmitCOMPLEX_CST (exp, DestLoc); break;
case COMPLEX_EXPR: EmitCOMPLEX_EXPR(exp, DestLoc); break;
case INTEGER_CST:
Result = TreeConstantToLLVM::ConvertINTEGER_CST(exp);
break;
case REAL_CST:
Result = TreeConstantToLLVM::ConvertREAL_CST(exp);
break;
case VECTOR_CST:
Result = TreeConstantToLLVM::ConvertVECTOR_CST(exp);
break;
}
if (EmitDebugInfo() && EXPR_HAS_LOCATION(exp)) {
TheDebugInfo->setLocationFile(EXPR_FILENAME(exp));
TheDebugInfo->setLocationLine(EXPR_LINENO(exp));
}
assert(((DestLoc && Result == 0) || DestLoc == 0) &&
"Expected a scalar or aggregate but got the wrong thing!");
assert((Result == 0 || VOID_TYPE_P(TREE_TYPE(exp)) ||
ConvertType(TREE_TYPE(exp))->isVectorTy() ||
TREE_CODE(exp) == MODIFY_EXPR ||
Result->getType() == ConvertType(TREE_TYPE(exp))) &&
"Value has wrong type!");
return Result;
}
static unsigned int
get_constant_alignment (tree exp)
{
unsigned int align = TYPE_ALIGN (TREE_TYPE (exp));
#ifdef CONSTANT_ALIGNMENT
align = CONSTANT_ALIGNMENT (exp, align);
#endif
return align;
}
LValue TreeToLLVM::EmitLV(tree exp) {
LValue LV;
switch (TREE_CODE(exp)) {
default:
errs() << "Unhandled lvalue expression!\n";
debug_tree(exp);
abort();
case PARM_DECL:
case VAR_DECL:
case FUNCTION_DECL:
case CONST_DECL:
case RESULT_DECL:
LV = EmitLV_DECL(exp);
break;
case ARRAY_RANGE_REF:
case ARRAY_REF:
LV = EmitLV_ARRAY_REF(exp);
break;
case COMPONENT_REF:
LV = EmitLV_COMPONENT_REF(exp);
break;
case BIT_FIELD_REF:
LV = EmitLV_BIT_FIELD_REF(exp);
break;
case REALPART_EXPR:
LV = EmitLV_XXXXPART_EXPR(exp, 0);
break;
case IMAGPART_EXPR:
LV = EmitLV_XXXXPART_EXPR(exp, 1);
break;
case LABEL_DECL: {
LV = LValue(EmitLV_LABEL_DECL(exp), 1);
break;
}
case COMPLEX_CST: {
Value *Ptr = TreeConstantToLLVM::EmitLV_COMPLEX_CST(exp);
LV = LValue(Ptr, get_constant_alignment(exp) / 8);
break;
}
case STRING_CST: {
Value *Ptr = TreeConstantToLLVM::EmitLV_STRING_CST(exp);
LV = LValue(Ptr, get_constant_alignment(exp) / 8);
break;
}
case VIEW_CONVERT_EXPR:
LV = EmitLV_VIEW_CONVERT_EXPR(exp);
break;
case EXC_PTR_EXPR:
LV = EmitLV_EXC_PTR_EXPR(exp);
break;
case FILTER_EXPR:
LV = EmitLV_FILTER_EXPR(exp);
break;
case WITH_SIZE_EXPR:
LV = EmitLV_WITH_SIZE_EXPR(exp);
break;
case INDIRECT_REF:
LV = EmitLV_INDIRECT_REF(exp);
break;
}
assert((LV.isBitfield() || VOID_TYPE_P(TREE_TYPE(exp)) ||
LV.Ptr->getType() == ConvertType(TREE_TYPE(exp))->getPointerTo()) &&
"LValue has wrong type!");
return LV;
}
void TreeToLLVM::TODO(tree exp) {
errs() << "Unhandled tree node\n";
if (exp) debug_tree(exp);
abort();
}
Value *TreeToLLVM::CastToType(unsigned opcode, Value *V, const Type* Ty) {
if (ZExtInst *CI = dyn_cast<ZExtInst>(V))
if (Ty->isIntegerTy(1) && CI->getOperand(0)->getType()->isIntegerTy(1))
return CI->getOperand(0);
return Builder.CreateCast(Instruction::CastOps(opcode), V, Ty,
V->getName().data());
}
Value *TreeToLLVM::CastToAnyType(Value *V, bool VisSigned,
const Type* Ty, bool TyIsSigned) {
if (V->getType() == Ty)
return V;
Instruction::CastOps opc =
CastInst::getCastOpcode(V, VisSigned, Ty, TyIsSigned);
return CastToType(opc, V, Ty);
}
Value *TreeToLLVM::CastToUIntType(Value *V, const Type* Ty) {
if (V->getType() == Ty)
return V;
unsigned SrcBits = V->getType()->getPrimitiveSizeInBits();
unsigned DstBits = Ty->getPrimitiveSizeInBits();
assert(SrcBits != DstBits && "Types are different but have same #bits?");
Instruction::CastOps opcode =
(SrcBits > DstBits ? Instruction::Trunc : Instruction::ZExt);
return CastToType(opcode, V, Ty);
}
Value *TreeToLLVM::CastToSIntType(Value *V, const Type* Ty) {
if (V->getType() == Ty)
return V;
unsigned SrcBits = V->getType()->getPrimitiveSizeInBits();
unsigned DstBits = Ty->getPrimitiveSizeInBits();
assert(SrcBits != DstBits && "Types are different but have same #bits?");
Instruction::CastOps opcode =
(SrcBits > DstBits ? Instruction::Trunc : Instruction::SExt);
return CastToType(opcode, V, Ty);
}
Value *TreeToLLVM::CastToFPType(Value *V, const Type* Ty) {
unsigned SrcBits = V->getType()->getPrimitiveSizeInBits();
unsigned DstBits = Ty->getPrimitiveSizeInBits();
if (SrcBits == DstBits)
return V;
Instruction::CastOps opcode = (SrcBits > DstBits ?
Instruction::FPTrunc : Instruction::FPExt);
return CastToType(opcode, V, Ty);
}
Value *TreeToLLVM::BitCastToType(Value *V, const Type *Ty) {
return CastToType(Instruction::BitCast, V, Ty);
}
AllocaInst *TreeToLLVM::CreateTemporary(const Type *Ty, unsigned align) {
if (AllocaInsertionPoint == 0) {
AllocaInsertionPoint = CastInst::Create(Instruction::BitCast,
Constant::getNullValue(Type::getInt32Ty(Context)),
Type::getInt32Ty(Context), "alloca point");
Fn->begin()->getInstList().insert(Fn->begin()->begin(),
AllocaInsertionPoint);
}
if (align)
return new AllocaInst(Ty, 0, align, "memtmp", AllocaInsertionPoint);
else
return new AllocaInst(Ty, 0, "memtmp", AllocaInsertionPoint);
}
MemRef TreeToLLVM::CreateTempLoc(const Type *Ty) {
AllocaInst *AI = CreateTemporary(Ty);
if (!AI->getAlignment())
AI->setAlignment(TD.getPrefTypeAlignment(Ty));
return MemRef(AI, AI->getAlignment(), false);
}
void TreeToLLVM::EmitBlock(BasicBlock *BB) {
BasicBlock *CurBB = Builder.GetInsertBlock();
if (CurBB->getTerminator() == 0) {
if (CurBB->getName().empty() && CurBB->begin() == CurBB->end())
CurBB->eraseFromParent();
else
Builder.CreateBr(BB);
}
Fn->getBasicBlockList().push_back(BB);
Builder.SetInsertPoint(BB); }
static void CopyAggregate(MemRef DestLoc, MemRef SrcLoc,
LLVMBuilder &Builder, tree gccType){
assert(DestLoc.Ptr->getType() == SrcLoc.Ptr->getType() &&
"Cannot copy between two pointers of different type!");
const Type *ElTy =
cast<PointerType>(DestLoc.Ptr->getType())->getElementType();
unsigned Alignment = std::min(DestLoc.getAlignment(), SrcLoc.getAlignment());
if (ElTy->isSingleValueType()) {
LoadInst *V = Builder.CreateLoad(SrcLoc.Ptr, SrcLoc.Volatile);
StoreInst *S = Builder.CreateStore(V, DestLoc.Ptr, DestLoc.Volatile);
V->setAlignment(Alignment);
S->setAlignment(Alignment);
} else if (const StructType *STy = dyn_cast<StructType>(ElTy)) {
const StructLayout *SL = getTargetData().getStructLayout(STy);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
if (gccType && isPaddingElement(gccType, i))
continue;
Value *DElPtr = Builder.CreateStructGEP(DestLoc.Ptr, i);
Value *SElPtr = Builder.CreateStructGEP(SrcLoc.Ptr, i);
unsigned Align = MinAlign(Alignment, SL->getElementOffset(i));
CopyAggregate(MemRef(DElPtr, Align, DestLoc.Volatile),
MemRef(SElPtr, Align, SrcLoc.Volatile),
Builder, 0);
}
} else {
const ArrayType *ATy = cast<ArrayType>(ElTy);
unsigned EltSize = getTargetData().getTypeAllocSize(ATy->getElementType());
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
Value *DElPtr = Builder.CreateStructGEP(DestLoc.Ptr, i);
Value *SElPtr = Builder.CreateStructGEP(SrcLoc.Ptr, i);
unsigned Align = MinAlign(Alignment, i * EltSize);
CopyAggregate(MemRef(DElPtr, Align, DestLoc.Volatile),
MemRef(SElPtr, Align, SrcLoc.Volatile),
Builder, 0);
}
}
}
static unsigned CountAggregateElements(const Type *Ty) {
if (Ty->isSingleValueType()) return 1;
if (const StructType *STy = dyn_cast<StructType>(Ty)) {
unsigned NumElts = 0;
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
NumElts += CountAggregateElements(STy->getElementType(i));
return NumElts;
} else {
const ArrayType *ATy = cast<ArrayType>(Ty);
return ATy->getNumElements()*CountAggregateElements(ATy->getElementType());
}
}
static bool containsFPField(const Type *LLVMTy) {
if (LLVMTy->isFloatingPointTy())
return true;
const StructType* STy = dyn_cast<StructType>(LLVMTy);
if (STy) {
for (StructType::element_iterator I = STy->element_begin(),
E = STy->element_end(); I != E; I++) {
const Type *Ty = *I;
if (Ty->isFloatingPointTy())
return true;
if (Ty->isStructTy() && containsFPField(Ty))
return true;
const ArrayType *ATy = dyn_cast<ArrayType>(Ty);
if (ATy && containsFPField(ATy->getElementType()))
return true;
const VectorType *VTy = dyn_cast<VectorType>(Ty);
if (VTy && containsFPField(VTy->getElementType()))
return true;
}
}
return false;
}
#ifndef TARGET_LLVM_MIN_BYTES_COPY_BY_MEMCPY
#define TARGET_LLVM_MIN_BYTES_COPY_BY_MEMCPY 64
#endif
void TreeToLLVM::EmitAggregateCopy(MemRef DestLoc, MemRef SrcLoc, tree type) {
if (DestLoc.Ptr == SrcLoc.Ptr && !DestLoc.Volatile && !SrcLoc.Volatile)
return;
const Type *LLVMTy = ConvertType(type);
unsigned NumElts = CountAggregateElements(LLVMTy);
if (TREE_CODE(TYPE_SIZE(type)) == INTEGER_CST &&
(NumElts == 1 ||
TREE_INT_CST_LOW(TYPE_SIZE_UNIT(type)) <
TARGET_LLVM_MIN_BYTES_COPY_BY_MEMCPY)) {
if ((TREE_CODE(type) != UNION_TYPE || !containsFPField(LLVMTy)) &&
!TheTypeConverter->GCCTypeOverlapsWithLLVMTypePadding(type, LLVMTy) &&
NumElts <= 8) {
DestLoc.Ptr = BitCastToType(DestLoc.Ptr,
LLVMTy->getPointerTo());
SrcLoc.Ptr = BitCastToType(SrcLoc.Ptr,
LLVMTy->getPointerTo());
CopyAggregate(DestLoc, SrcLoc, Builder, type);
return;
}
}
Value *TypeSize = Emit(TYPE_SIZE_UNIT(type), 0);
EmitMemCpy(DestLoc.Ptr, SrcLoc.Ptr, TypeSize,
std::min(DestLoc.getAlignment(), SrcLoc.getAlignment()));
}
static void ZeroAggregate(MemRef DestLoc, LLVMBuilder &Builder) {
const Type *ElTy =
cast<PointerType>(DestLoc.Ptr->getType())->getElementType();
if (ElTy->isSingleValueType()) {
StoreInst *St = Builder.CreateStore(Constant::getNullValue(ElTy),
DestLoc.Ptr, DestLoc.Volatile);
St->setAlignment(DestLoc.getAlignment());
} else if (const StructType *STy = dyn_cast<StructType>(ElTy)) {
const StructLayout *SL = getTargetData().getStructLayout(STy);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Value *Ptr = Builder.CreateStructGEP(DestLoc.Ptr, i);
unsigned Alignment = MinAlign(DestLoc.getAlignment(),
SL->getElementOffset(i));
ZeroAggregate(MemRef(Ptr, Alignment, DestLoc.Volatile), Builder);
}
} else {
const ArrayType *ATy = cast<ArrayType>(ElTy);
unsigned EltSize = getTargetData().getTypeAllocSize(ATy->getElementType());
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
Value *Ptr = Builder.CreateStructGEP(DestLoc.Ptr, i);
unsigned Alignment = MinAlign(DestLoc.getAlignment(), i * EltSize);
ZeroAggregate(MemRef(Ptr, Alignment, DestLoc.Volatile), Builder);
}
}
}
void TreeToLLVM::EmitAggregateZero(MemRef DestLoc, tree type) {
if (TREE_CODE(TYPE_SIZE(type)) == INTEGER_CST &&
TREE_INT_CST_LOW(TYPE_SIZE_UNIT(type)) < 128) {
const Type *LLVMTy = ConvertType(type);
if (!TheTypeConverter->GCCTypeOverlapsWithLLVMTypePadding(type, LLVMTy) &&
CountAggregateElements(LLVMTy) <= 8) {
DestLoc.Ptr = BitCastToType(DestLoc.Ptr,
LLVMTy->getPointerTo());
ZeroAggregate(DestLoc, Builder);
return;
}
}
EmitMemSet(DestLoc.Ptr, ConstantInt::get(Type::getInt8Ty(Context), 0),
Emit(TYPE_SIZE_UNIT(type), 0), DestLoc.getAlignment());
}
Value *TreeToLLVM::EmitMemCpy(Value *DestPtr, Value *SrcPtr, Value *Size,
unsigned Align) {
const Type *SBP = Type::getInt8PtrTy(Context);
const Type *IntPtr = TD.getIntPtrType(Context);
Value *Ops[5] = {
BitCastToType(DestPtr, SBP),
BitCastToType(SrcPtr, SBP),
CastToSIntType(Size, IntPtr),
ConstantInt::get(Type::getInt32Ty(Context), Align),
ConstantInt::get(Type::getInt1Ty(Context), false)
};
const Type *ArgTypes[3] = {SBP, SBP, IntPtr };
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memcpy,
ArgTypes, 3), Ops, Ops+5);
return Ops[0];
}
Value *TreeToLLVM::EmitMemMove(Value *DestPtr, Value *SrcPtr, Value *Size,
unsigned Align) {
const Type *SBP = Type::getInt8PtrTy(Context);
const Type *IntPtr = TD.getIntPtrType(Context);
Value *Ops[5] = {
BitCastToType(DestPtr, SBP),
BitCastToType(SrcPtr, SBP),
CastToSIntType(Size, IntPtr),
ConstantInt::get(Type::getInt32Ty(Context), Align),
ConstantInt::get(Type::getInt1Ty(Context), false)
};
const Type *ArgTypes[3] = {SBP, SBP, IntPtr };
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memmove,
ArgTypes, 3), Ops, Ops+5);
return Ops[0];
}
Value *TreeToLLVM::EmitMemSet(Value *DestPtr, Value *SrcVal, Value *Size,
unsigned Align) {
const Type *SBP = Type::getInt8PtrTy(Context);
const Type *IntPtr = TD.getIntPtrType(Context);
Value *Ops[5] = {
BitCastToType(DestPtr, SBP),
CastToSIntType(SrcVal, Type::getInt8Ty(Context)),
CastToSIntType(Size, IntPtr),
ConstantInt::get(Type::getInt32Ty(Context), Align),
ConstantInt::get(Type::getInt1Ty(Context), false)
};
const Type *ArgTypes[2] = {SBP, IntPtr };
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::memset,
ArgTypes, 2), Ops, Ops+5);
return Ops[0];
}
void TreeToLLVM::EmitTypeGcroot(Value *V, tree decl) {
Fn->setGC("shadow-stack");
Function *gcrootFun = Intrinsic::getDeclaration(TheModule,
Intrinsic::gcroot);
const PointerType *Ty = Type::getInt8PtrTy(Context);
V = Builder.CreateBitCast(V, Ty->getPointerTo());
Value *Ops[2] = {
V,
ConstantPointerNull::get(Ty)
};
Builder.CreateCall(gcrootFun, Ops, Ops+2);
}
void TreeToLLVM::EmitAnnotateIntrinsic(Value *V, tree decl) {
tree annotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES (decl));
if (!annotateAttr)
return;
Function *annotateFun = Intrinsic::getDeclaration(TheModule,
Intrinsic::var_annotation);
Constant *lineNo =
ConstantInt::get(Type::getInt32Ty(Context), DECL_SOURCE_LINE(decl));
Constant *file = ConvertMetadataStringToGV(DECL_SOURCE_FILE(decl));
const Type *SBP= Type::getInt8PtrTy(Context);
file = Builder.getFolder().CreateBitCast(file, SBP);
while (annotateAttr) {
tree args = TREE_VALUE(annotateAttr);
for (tree a = args; a; a = TREE_CHAIN(a)) {
tree val = TREE_VALUE(a);
assert(TREE_CODE(val) == STRING_CST &&
"Annotate attribute arg should always be a string");
const Type *SBP = Type::getInt8PtrTy(Context);
Constant *strGV = TreeConstantToLLVM::EmitLV_STRING_CST(val);
Value *Ops[4] = {
BitCastToType(V, SBP),
BitCastToType(strGV, SBP),
file,
lineNo
};
Builder.CreateCall(annotateFun, Ops, Ops+4);
}
annotateAttr = TREE_CHAIN(annotateAttr);
if (annotateAttr)
annotateAttr = lookup_attribute("annotate", annotateAttr);
}
}
void TreeToLLVM::EmitAutomaticVariableDecl(tree decl) {
tree type = TREE_TYPE(decl);
assert(!DECL_LLVM_SET_P(decl) && "Shouldn't call this on an emitted var!");
if (TREE_CODE(decl) == CONST_DECL) {
DECL_MODE(decl) = TYPE_MODE(type);
DECL_ALIGN(decl) = TYPE_ALIGN(type);
DECL_SIZE(decl) = TYPE_SIZE(type);
DECL_SIZE_UNIT(decl) = TYPE_SIZE_UNIT(type);
return;
}
if ((TREE_CODE(decl) != VAR_DECL && TREE_CODE(decl) != RESULT_DECL) ||
TREE_STATIC(decl) || DECL_EXTERNAL(decl) || type == error_mark_node)
return;
if (isGimpleTemporary(decl))
return;
if (TREE_CODE(decl) == VAR_DECL && DECL_VALUE_EXPR(decl))
return;
const Type *Ty; Value *Size = 0;
if (DECL_SIZE(decl) == 0) { if (DECL_INITIAL(decl) == 0)
return; else {
TODO(decl);
abort();
}
} else if (TREE_CODE(DECL_SIZE_UNIT(decl)) == INTEGER_CST) {
Ty = ConvertType(type);
} else {
if (TREE_CODE(type) == ARRAY_TYPE
&& isSequentialCompatible(type)
&& TYPE_SIZE(type) == DECL_SIZE(decl)) {
Ty = ConvertType(TREE_TYPE(type)); Size = Emit(DECL_SIZE(decl), 0);
assert(!integer_zerop(TYPE_SIZE(TREE_TYPE(type)))
&& "Array of positive size with elements of zero size!");
Value *EltSize = Emit(TYPE_SIZE(TREE_TYPE(type)), 0);
Size = Builder.CreateUDiv(Size, EltSize, "len");
} else {
Size = Emit(DECL_SIZE_UNIT(decl), 0);
Ty = Type::getInt8Ty(Context);
}
Size = CastToUIntType(Size, Type::getInt32Ty(Context));
}
unsigned Alignment = 0;
if (DECL_ALIGN(decl)) {
unsigned TargetAlign = getTargetData().getABITypeAlignment(Ty);
if (DECL_USER_ALIGN(decl) || 8 * TargetAlign < (unsigned)DECL_ALIGN(decl))
Alignment = DECL_ALIGN(decl) / 8;
}
const char *Name; if (DECL_NAME(decl))
Name = IDENTIFIER_POINTER(DECL_NAME(decl));
else if (TREE_CODE(decl) == RESULT_DECL)
Name = "retval";
else
Name = "";
AllocaInst *AI;
if (!Size) { AI = CreateTemporary(Ty);
AI->setName(Name);
} else {
AI = Builder.CreateAlloca(Ty, Size, Name);
}
AI->setAlignment(Alignment);
SET_DECL_LLVM(decl, AI);
if (DECL_ATTRIBUTES(decl))
EmitAnnotateIntrinsic(AI, decl);
if (POINTER_TYPE_P(TREE_TYPE (decl))
&& lookup_attribute("gcroot", TYPE_ATTRIBUTES(TREE_TYPE (decl))))
{
const Type *T = cast<PointerType>(AI->getType())->getElementType();
EmitTypeGcroot(AI, decl);
Builder.CreateStore(Constant::getNullValue(T), AI);
}
if (EmitDebugInfo()) {
if (DECL_NAME(decl)) {
TheDebugInfo->EmitDeclare(decl, dwarf::DW_TAG_auto_variable,
Name, TREE_TYPE(decl), AI,
Builder);
} else if (TREE_CODE(decl) == RESULT_DECL) {
TheDebugInfo->EmitDeclare(decl, dwarf::DW_TAG_return_variable,
Name, TREE_TYPE(decl), AI,
Builder);
}
}
}
Value *TreeToLLVM::EmitLABEL_EXPR(tree exp) {
EmitBlock(getLabelDeclBlock(TREE_OPERAND(exp, 0)));
return 0;
}
Value *TreeToLLVM::EmitGOTO_EXPR(tree exp) {
tree dest = GOTO_DESTINATION(exp);
if (TREE_CODE(dest) == LABEL_DECL) {
Builder.CreateBr(getLabelDeclBlock(dest));
} else {
basic_block bb = bb_for_stmt(exp);
Value *V = Emit(dest, 0);
IndirectBrInst *Br = Builder.CreateIndirectBr(V, EDGE_COUNT(bb->succs));
edge e;
edge_iterator ei;
FOR_EACH_EDGE (e, ei, bb->succs)
Br->addDestination(getLabelDeclBlock(tree_block_label(e->dest)));
}
EmitBlock(BasicBlock::Create(Context, ""));
return 0;
}
Value *TreeToLLVM::EmitRETURN_EXPR(tree exp, const MemRef *DestLoc) {
assert(DestLoc == 0 && "Does not return a value!");
tree retval = TREE_OPERAND(exp, 0);
assert((!retval || TREE_CODE(retval) == RESULT_DECL ||
((TREE_CODE(retval) == MODIFY_EXPR
|| TREE_CODE(retval) == INIT_EXPR) &&
TREE_CODE(TREE_OPERAND(retval, 0)) == RESULT_DECL)) &&
"RETURN_EXPR not gimple!");
if (retval && TREE_CODE(retval) != RESULT_DECL)
Emit(retval, 0);
Builder.CreateBr(ReturnBB);
EmitBlock(BasicBlock::Create(Context, ""));
return 0;
}
Value *TreeToLLVM::EmitCOND_EXPR(tree exp) {
tree exp_cond = COND_EXPR_COND(exp);
unsigned UIPred = 0, SIPred = 0, FPPred = ~0;
Value *Cond;
switch (TREE_CODE(exp_cond)) {
default: break;
case LT_EXPR:
UIPred = ICmpInst::ICMP_ULT;
SIPred = ICmpInst::ICMP_SLT;
FPPred = FCmpInst::FCMP_OLT;
break;
case LE_EXPR:
UIPred = ICmpInst::ICMP_ULE;
SIPred = ICmpInst::ICMP_SLE;
FPPred = FCmpInst::FCMP_OLE;
break;
case GT_EXPR:
UIPred = ICmpInst::ICMP_UGT;
SIPred = ICmpInst::ICMP_SGT;
FPPred = FCmpInst::FCMP_OGT;
break;
case GE_EXPR:
UIPred = ICmpInst::ICMP_UGE;
SIPred = ICmpInst::ICMP_SGE;
FPPred = FCmpInst::FCMP_OGE;
break;
case EQ_EXPR:
UIPred = SIPred = ICmpInst::ICMP_EQ;
FPPred = FCmpInst::FCMP_OEQ;
break;
case NE_EXPR:
UIPred = SIPred = ICmpInst::ICMP_NE;
FPPred = FCmpInst::FCMP_UNE;
break;
case UNORDERED_EXPR: FPPred = FCmpInst::FCMP_UNO; break;
case ORDERED_EXPR: FPPred = FCmpInst::FCMP_ORD; break;
case UNLT_EXPR: FPPred = FCmpInst::FCMP_ULT; break;
case UNLE_EXPR: FPPred = FCmpInst::FCMP_ULE; break;
case UNGT_EXPR: FPPred = FCmpInst::FCMP_UGT; break;
case UNGE_EXPR: FPPred = FCmpInst::FCMP_UGE; break;
case UNEQ_EXPR: FPPred = FCmpInst::FCMP_UEQ; break;
case LTGT_EXPR: FPPred = FCmpInst::FCMP_ONE; break;
}
if (FPPred == ~0U) {
Cond = Emit(exp_cond, 0);
if (!Cond->getType()->isIntegerTy(1))
Cond = Builder.CreateIsNotNull(Cond, "toBool");
} else {
Cond = EmitCompare(exp_cond, UIPred, SIPred, FPPred, Type::getInt1Ty(Context));
assert(Cond->getType() == Type::getInt1Ty(Context));
}
tree Then = COND_EXPR_THEN(exp);
tree Else = COND_EXPR_ELSE(exp);
assert(TREE_CODE(Then) == GOTO_EXPR && TREE_CODE(Else) == GOTO_EXPR
&& "Not a gimple if?");
BasicBlock *ThenDest = getLabelDeclBlock(TREE_OPERAND(Then, 0));
BasicBlock *ElseDest = getLabelDeclBlock(TREE_OPERAND(Else, 0));
Builder.CreateCondBr(Cond, ThenDest, ElseDest);
EmitBlock(BasicBlock::Create(Context, ""));
return 0;
}
Value *TreeToLLVM::EmitSWITCH_EXPR(tree exp) {
tree Cases = SWITCH_LABELS(exp);
Value *SwitchExp = Emit(SWITCH_COND(exp), 0);
bool ExpIsSigned = !TYPE_UNSIGNED(TREE_TYPE(SWITCH_COND(exp)));
SwitchInst *SI = Builder.CreateSwitch(SwitchExp, Builder.GetInsertBlock(),
TREE_VEC_LENGTH(Cases));
EmitBlock(BasicBlock::Create(Context, ""));
SI->setSuccessor(0, Builder.GetInsertBlock());
assert(!SWITCH_BODY(exp) && "not a gimple switch?");
BasicBlock *DefaultDest = NULL;
for (unsigned i = 0, e = TREE_VEC_LENGTH(Cases); i != e; ++i) {
BasicBlock *Dest = getLabelDeclBlock(CASE_LABEL(TREE_VEC_ELT(Cases, i)));
tree low = CASE_LOW(TREE_VEC_ELT(Cases, i));
if (!low) {
DefaultDest = Dest;
continue;
}
Value *Val = Emit(low, 0);
Val = CastToAnyType(Val, !TYPE_UNSIGNED(TREE_TYPE(low)),
SwitchExp->getType(), ExpIsSigned);
ConstantInt *LowC = cast<ConstantInt>(Val);
tree high = CASE_HIGH(TREE_VEC_ELT(Cases, i));
if (!high) {
SI->addCase(LowC, Dest); continue;
}
Val = Emit(high, 0);
Val = CastToAnyType(Val, !TYPE_UNSIGNED(TREE_TYPE(high)),
SwitchExp->getType(), ExpIsSigned);
ConstantInt *HighC = cast<ConstantInt>(Val);
APInt Range = HighC->getValue() - LowC->getValue();
if (Range.ult(APInt(Range.getBitWidth(), 64))) {
APInt CurrentValue = LowC->getValue();
while (1) {
SI->addCase(LowC, Dest);
if (LowC == HighC) break; CurrentValue++;
LowC = ConstantInt::get(Context, CurrentValue);
}
} else {
Value *Diff = Builder.CreateSub(SwitchExp, LowC);
Value *Cond = Builder.CreateICmpULE(Diff,
ConstantInt::get(Context, Range));
BasicBlock *False_Block = BasicBlock::Create(Context, "case_false");
Builder.CreateCondBr(Cond, Dest, False_Block);
EmitBlock(False_Block);
}
}
if (DefaultDest) {
if (SI->getSuccessor(0) == Builder.GetInsertBlock())
SI->setSuccessor(0, DefaultDest);
else {
Builder.CreateBr(DefaultDest);
EmitBlock(BasicBlock::Create(Context, ""));
}
}
return 0;
}
void TreeToLLVM::CreateExceptionValues() {
if (ExceptionValue) return;
ExceptionValue = CreateTemporary(Type::getInt8PtrTy(Context));
ExceptionValue->setName("eh_exception");
ExceptionSelectorValue = CreateTemporary(Type::getInt32Ty(Context));
ExceptionSelectorValue->setName("eh_selector");
FuncEHException = Intrinsic::getDeclaration(TheModule,
Intrinsic::eh_exception);
FuncEHSelector = Intrinsic::getDeclaration(TheModule,
Intrinsic::eh_selector);
FuncEHGetTypeID = Intrinsic::getDeclaration(TheModule,
Intrinsic::eh_typeid_for);
}
BasicBlock *TreeToLLVM::getPostPad(unsigned RegionNo) {
PostPads.grow(RegionNo);
BasicBlock *&PostPad = PostPads[RegionNo];
if (!PostPad)
PostPad = BasicBlock::Create(Context, "ppad");
return PostPad;
}
static void AddHandler (struct eh_region *region, void *data) {
((std::vector<struct eh_region *> *)data)->push_back(region);
}
void TreeToLLVM::EmitLandingPads() {
std::vector<Value*> Args;
std::vector<struct eh_region *> Handlers;
for (unsigned i = 1; i < LandingPads.size(); ++i) {
BasicBlock *LandingPad = LandingPads[i];
if (!LandingPad)
continue;
CreateExceptionValues();
EmitBlock(LandingPad);
Value *Ex = Builder.CreateCall(FuncEHException, "eh_ptr");
Builder.CreateStore(Ex, ExceptionValue);
Args.push_back(Builder.CreateLoad(ExceptionValue, "eh_ptr"));
assert(llvm_eh_personality_libfunc
&& "no exception handling personality function!");
Args.push_back(BitCastToType(DECL_LLVM(llvm_eh_personality_libfunc),
Type::getInt8PtrTy(Context)));
foreach_reachable_handler(i, false, AddHandler, &Handlers);
bool HasCleanup = false;
bool HasCatchAll = false;
static GlobalVariable *CatchAll = 0;
for (std::vector<struct eh_region *>::iterator I = Handlers.begin(),
E = Handlers.end(); I != E; ++I) {
struct eh_region *region = *I;
getPostPad(get_eh_region_number(region));
int RegionKind = classify_eh_handler(region);
if (RegionKind < 0) {
tree TypeList = get_eh_type_list(region);
unsigned Length = list_length(TypeList);
Args.reserve(Args.size() + Length + 1);
Args.push_back(ConstantInt::get(Type::getInt32Ty(Context), Length + 1));
for (; TypeList; TypeList = TREE_CHAIN(TypeList)) {
tree TType = lookup_type_for_runtime(TREE_VALUE(TypeList));
Args.push_back(Emit(TType, 0));
}
} else if (RegionKind > 0) {
tree TypeList = get_eh_type_list(region);
if (!TypeList) {
if (!CatchAll) {
Constant *Init =
Constant::getNullValue(Type::getInt8PtrTy(Context));
CatchAll = new GlobalVariable(*TheModule, Init->getType(), true,
GlobalVariable::LinkOnceAnyLinkage,
Init, "llvm.eh.catch.all.value");
CatchAll->setSection("llvm.metadata");
}
Args.push_back(CatchAll);
HasCatchAll = true;
} else {
for (; TypeList; TypeList = TREE_CHAIN(TypeList)) {
tree TType = lookup_type_for_runtime(TREE_VALUE(TypeList));
Args.push_back(Emit(TType, 0));
}
}
} else {
HasCleanup = true;
}
}
if (can_throw_external_1(i, false)) {
if (HasCleanup) {
if (Args.size() == 2 || USING_SJLJ_EXCEPTIONS || !lang_eh_catch_all) {
Args.push_back(ConstantInt::get(Type::getInt32Ty(Context), 0));
} else if (!HasCatchAll) {
if (!CatchAll) {
Constant *Init = 0;
tree catch_all_type = lang_eh_catch_all();
if (catch_all_type == NULL_TREE)
Init = Constant::getNullValue(Type::getInt8PtrTy(Context));
else
Init = cast<Constant>(Emit(catch_all_type, 0));
CatchAll = new GlobalVariable(*TheModule, Init->getType(), true,
GlobalVariable::LinkOnceAnyLinkage,
Init, "llvm.eh.catch.all.value");
CatchAll->setSection("llvm.metadata");
}
Args.push_back(CatchAll);
}
}
}
Value *Select = Builder.CreateCall(FuncEHSelector, Args.begin(), Args.end(),
"eh_select");
Builder.CreateStore(Select, ExceptionSelectorValue);
assert(!Handlers.empty() && "Landing pad but no handler?");
Builder.CreateBr(getPostPad(get_eh_region_number(*Handlers.begin())));
Handlers.clear();
Args.clear();
}
}
void TreeToLLVM::EmitPostPads() {
std::vector<struct eh_region *> Handlers;
for (unsigned i = 1; i < PostPads.size(); ++i) {
BasicBlock *PostPad = PostPads[i];
if (!PostPad)
continue;
CreateExceptionValues();
EmitBlock(PostPad);
struct eh_region *region = get_eh_region(i);
BasicBlock *Dest = getLabelDeclBlock(get_eh_region_tree_label(region));
int RegionKind = classify_eh_handler(region);
if (!RegionKind || !get_eh_type_list(region)) {
Builder.CreateBr(Dest);
continue;
} else if (RegionKind < 0) {
Value *Select = Builder.CreateLoad(ExceptionSelectorValue);
Value *Zero = ConstantInt::get(Select->getType(), 0);
Value *Compare = Builder.CreateICmpSLT(Select, Zero);
BasicBlock *NoFilterBB = BasicBlock::Create(Context, "nofilter");
Builder.CreateCondBr(Compare, Dest, NoFilterBB);
EmitBlock(NoFilterBB);
} else if (RegionKind > 0) {
tree TypeList = get_eh_type_list(region);
Value *Cond = NULL;
for (; TypeList; TypeList = TREE_CHAIN (TypeList)) {
Value *TType = Emit(lookup_type_for_runtime(TREE_VALUE(TypeList)), 0);
TType = BitCastToType(TType,
Type::getInt8PtrTy(Context));
Value *TypeID = Builder.CreateCall(FuncEHGetTypeID, TType, "eh_typeid");
Value *Select = Builder.CreateLoad(ExceptionSelectorValue);
Value *Compare = Builder.CreateICmpEQ(Select, TypeID);
Cond = Cond ? Builder.CreateOr(Cond, Compare) : Compare;
}
BasicBlock *NoCatchBB = NULL;
struct eh_region *next_catch = get_eh_next_catch(region);
for (; next_catch; next_catch = get_eh_next_catch(next_catch)) {
unsigned CatchNo = get_eh_region_number(next_catch);
if (CatchNo < PostPads.size())
NoCatchBB = PostPads[CatchNo];
if (NoCatchBB)
break;
}
if (NoCatchBB) {
Builder.CreateCondBr(Cond, Dest, NoCatchBB);
continue;
}
NoCatchBB = BasicBlock::Create(Context, "nocatch");
Builder.CreateCondBr(Cond, Dest, NoCatchBB);
EmitBlock(NoCatchBB);
}
foreach_reachable_handler(i, true, AddHandler, &Handlers);
BasicBlock *TargetBB = NULL;
for (std::vector<struct eh_region *>::iterator I = Handlers.begin(),
E = Handlers.end(); I != E; ++I) {
unsigned UnwindNo = get_eh_region_number(*I);
if (UnwindNo < PostPads.size())
TargetBB = PostPads[UnwindNo];
if (TargetBB)
break;
}
if (TargetBB) {
Builder.CreateBr(TargetBB);
} else {
assert(can_throw_external_1(i, true) &&
"Must-not-throw region handled by runtime?");
if (!UnwindBB)
UnwindBB = BasicBlock::Create(Context, "Unwind");
Builder.CreateBr(UnwindBB);
}
Handlers.clear();
}
}
void TreeToLLVM::EmitUnwindBlock() {
if (UnwindBB) {
CreateExceptionValues();
EmitBlock(UnwindBB);
Value *Arg = Builder.CreateLoad(ExceptionValue, "eh_ptr");
assert(llvm_unwind_resume_libfunc && "no unwind resume function!");
#ifdef TARGET_ADJUST_LLVM_CC
tree fntype = TREE_TYPE(llvm_unwind_resume_libfunc);
CallingConv::ID CallingConvention = CallingConv::C;
TARGET_ADJUST_LLVM_CC(CallingConvention, fntype);
CallInst *Call = Builder.CreateCall(DECL_LLVM(llvm_unwind_resume_libfunc),
Arg);
Call->setCallingConv(CallingConvention);
#else
Builder.CreateCall(DECL_LLVM(llvm_unwind_resume_libfunc), Arg);
#endif
Builder.CreateUnreachable();
}
}
static bool canEmitLocalRegisterVariable(tree exp) {
if (TREE_CODE(exp) != VAR_DECL || !DECL_REGISTER(exp))
return false;
if (TREE_STATIC(exp) || DECL_EXTERNAL(exp) || TREE_PUBLIC(exp))
return false;
if (DECL_ASM_BLOCK_REGISTER (exp))
return false;
if (DECL_ASSEMBLER_NAME_SET_P(exp))
return true;
return false;
}
static bool canEmitGlobalRegisterVariable(tree exp) {
if (TREE_CODE(exp) != VAR_DECL || !DECL_REGISTER(exp))
return false;
if (DECL_ASM_BLOCK_REGISTER (exp))
return true;
if (TREE_STATIC(exp) || DECL_EXTERNAL(exp) || TREE_PUBLIC(exp))
return true;
return false;
}
Value *TreeToLLVM::EmitLoadOfLValue(tree exp, const MemRef *DestLoc) {
if (isGimpleTemporary(exp)) {
if (DECL_LLVM_SET_P(exp))
return DECL_LLVM(exp);
DECL_GIMPLE_FORMAL_TEMP_P(exp) = 0;
EmitAutomaticVariableDecl(exp);
} else if (canEmitGlobalRegisterVariable(exp)) {
return EmitReadOfRegisterVariable(exp, DestLoc);
}
LValue LV = EmitLV(exp);
bool isVolatile = TREE_THIS_VOLATILE(exp);
const Type *Ty = ConvertType(TREE_TYPE(exp));
unsigned Alignment = LV.getAlignment();
if (TREE_CODE(exp) == COMPONENT_REF)
if (const StructType *STy =
dyn_cast<StructType>(ConvertType(TREE_TYPE(TREE_OPERAND(exp, 0)))))
if (STy->isPacked())
Alignment = 1;
if (!LV.isBitfield()) {
if (!DestLoc) {
Value *Ptr = BitCastToType(LV.Ptr, Ty->getPointerTo());
LoadInst *LI = Builder.CreateLoad(Ptr, isVolatile);
LI->setAlignment(Alignment);
if (canEmitLocalRegisterVariable(exp)) {
return EmitMoveOfRegVariableToRightReg(LI, exp);
} else
return LI;
} else {
EmitAggregateCopy(*DestLoc, MemRef(LV.Ptr, Alignment, isVolatile),
TREE_TYPE(exp));
return 0;
}
} else {
if (!LV.BitSize)
return Constant::getNullValue(Ty);
const Type *ValTy = cast<PointerType>(LV.Ptr->getType())->getElementType();
unsigned ValSizeInBits = ValTy->getPrimitiveSizeInBits();
unsigned Strides = 1 + (LV.BitStart + LV.BitSize - 1) / ValSizeInBits;
assert(ValTy->isIntegerTy() && "Invalid bitfield lvalue!");
assert(ValSizeInBits > LV.BitStart && "Bad bitfield lvalue!");
assert(ValSizeInBits >= LV.BitSize && "Bad bitfield lvalue!");
assert(2*ValSizeInBits > LV.BitSize+LV.BitStart && "Bad bitfield lvalue!");
Value *Result = NULL;
for (unsigned I = 0; I < Strides; I++) {
unsigned Index = BYTES_BIG_ENDIAN ? I : Strides - I - 1; unsigned ThisFirstBit = Index * ValSizeInBits;
unsigned ThisLastBitPlusOne = ThisFirstBit + ValSizeInBits;
if (ThisFirstBit < LV.BitStart)
ThisFirstBit = LV.BitStart;
if (ThisLastBitPlusOne > LV.BitStart+LV.BitSize)
ThisLastBitPlusOne = LV.BitStart+LV.BitSize;
Value *Ptr = Index ?
Builder.CreateGEP(LV.Ptr,
ConstantInt::get(Type::getInt32Ty(Context), Index)) :
LV.Ptr;
LoadInst *LI = Builder.CreateLoad(Ptr, isVolatile);
LI->setAlignment(Alignment);
Value *Val = LI;
unsigned BitsInVal = ThisLastBitPlusOne - ThisFirstBit;
unsigned FirstBitInVal = ThisFirstBit % ValSizeInBits;
if (BYTES_BIG_ENDIAN)
FirstBitInVal = ValSizeInBits-FirstBitInVal-BitsInVal;
if (FirstBitInVal+BitsInVal != ValSizeInBits) {
Value *ShAmt = ConstantInt::get(ValTy, ValSizeInBits -
(FirstBitInVal+BitsInVal));
Val = Builder.CreateShl(Val, ShAmt);
}
if (ValSizeInBits != BitsInVal) {
bool AddSignBits = !TYPE_UNSIGNED(TREE_TYPE(exp)) && !Result;
Value *ShAmt = ConstantInt::get(ValTy, ValSizeInBits-BitsInVal);
Val = AddSignBits ?
Builder.CreateAShr(Val, ShAmt) : Builder.CreateLShr(Val, ShAmt);
}
if (Result) {
Value *ShAmt = ConstantInt::get(ValTy, BitsInVal);
Result = Builder.CreateShl(Result, ShAmt);
Result = Builder.CreateOr(Result, Val);
} else {
Result = Val;
}
}
if (TYPE_UNSIGNED(TREE_TYPE(exp)))
return CastToUIntType(Result, Ty);
else
return CastToSIntType(Result, Ty);
}
}
Value *TreeToLLVM::EmitADDR_EXPR(tree exp) {
LValue LV = EmitLV(TREE_OPERAND(exp, 0));
assert((!LV.isBitfield() || LV.BitStart == 0) &&
"It is illegal to take the address of a bitfield");
return BitCastToType(LV.Ptr, ConvertType(TREE_TYPE(exp)));
}
Value *TreeToLLVM::EmitOBJ_TYPE_REF(tree exp) {
return BitCastToType(Emit(OBJ_TYPE_REF_EXPR(exp), 0),
ConvertType(TREE_TYPE(exp)));
}
Value *TreeToLLVM::EmitCALL_EXPR(tree exp, const MemRef *DestLoc) {
tree fndecl = get_callee_fndecl(exp);
if (fndecl && DECL_BUILT_IN(fndecl) &&
DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_FRONTEND) {
Value *Res = 0;
if (EmitBuiltinCall(exp, fndecl, DestLoc, Res))
return Res;
}
Value *Callee = Emit(TREE_OPERAND(exp, 0), 0);
assert(TREE_TYPE (TREE_OPERAND (exp, 0)) &&
(TREE_CODE(TREE_TYPE (TREE_OPERAND (exp, 0))) == POINTER_TYPE ||
TREE_CODE(TREE_TYPE (TREE_OPERAND (exp, 0))) == REFERENCE_TYPE ||
TREE_CODE(TREE_TYPE (TREE_OPERAND (exp, 0))) == BLOCK_POINTER_TYPE)
&& "Not calling a function pointer?");
tree function_type = TREE_TYPE(TREE_TYPE (TREE_OPERAND (exp, 0)));
CallingConv::ID CallingConv;
AttrListPtr PAL;
const Type *Ty = TheTypeConverter->ConvertFunctionType(function_type,
fndecl,
TREE_OPERAND(exp, 2),
CallingConv, PAL);
Callee = BitCastToType(Callee, Ty->getPointerTo());
Value *Result = EmitCallOf(Callee, exp, DestLoc, PAL);
if (fndecl && TREE_THIS_VOLATILE(fndecl)) {
if (flag_objc_abi == 1 &&
Callee->getName() == "objc_exception_throw")
cast<Function>(Callee)->removeFnAttr(Attribute::NoReturn);
else {
Builder.CreateUnreachable();
EmitBlock(BasicBlock::Create(Context, ""));
}
}
return Result;
}
static Value *llvm_load_scalar_argument(Value *L,
const llvm::Type *LLVMTy,
unsigned RealSize,
LLVMBuilder &Builder) {
if (!RealSize)
return UndefValue::get(LLVMTy);
assert(!BYTES_BIG_ENDIAN && "Unsupported case - please report");
assert(LLVMTy->isIntegerTy() && "Expected an integer value!");
const Type *LoadType = IntegerType::get(Context, RealSize * 8);
L = Builder.CreateBitCast(L, LoadType->getPointerTo());
Value *Val = Builder.CreateLoad(L);
if (LoadType->getPrimitiveSizeInBits() >= LLVMTy->getPrimitiveSizeInBits())
Val = Builder.CreateTrunc(Val, LLVMTy);
else
Val = Builder.CreateZExt(Val, LLVMTy);
return Val;
}
#ifndef LLVM_LOAD_SCALAR_ARGUMENT
#define LLVM_LOAD_SCALAR_ARGUMENT(LOC,TY,SIZE,BUILDER) \
llvm_load_scalar_argument((LOC),(TY),(SIZE),(BUILDER))
#endif
namespace {
struct FunctionCallArgumentConversion : public DefaultABIClient {
SmallVector<Value*, 16> &CallOperands;
SmallVector<Value*, 2> LocStack;
const FunctionType *FTy;
const MemRef *DestLoc;
bool useReturnSlot;
LLVMBuilder &Builder;
Value *TheValue;
MemRef RetBuf;
CallingConv::ID &CallingConv;
bool isShadowRet;
bool isAggrRet;
unsigned Offset;
FunctionCallArgumentConversion(SmallVector<Value*, 16> &ops,
const FunctionType *FnTy,
const MemRef *destloc,
bool ReturnSlotOpt,
LLVMBuilder &b,
CallingConv::ID &CC)
: CallOperands(ops), FTy(FnTy), DestLoc(destloc),
useReturnSlot(ReturnSlotOpt), Builder(b), CallingConv(CC),
isShadowRet(false), isAggrRet(false), Offset(0) { }
CallingConv::ID& getCallingConv(void) { return CallingConv; }
void pushAddress(Value *Loc) {
assert(Loc && "Invalid location!");
LocStack.push_back(Loc);
}
void pushValue(Value *V) {
assert(LocStack.empty() && "Value only allowed at top level!");
LocStack.push_back(NULL);
TheValue = V;
}
Value *getAddress(void) {
assert(!LocStack.empty());
Value *&Loc = LocStack.back();
if (!Loc) {
Loc = TheTreeToLLVM->CreateTemporary(TheValue->getType());
Builder.CreateStore(TheValue, Loc);
}
return Loc;
}
Value *getValue(const Type *Ty) {
assert(!LocStack.empty());
Value *Loc = LocStack.back();
if (Loc) {
Loc = Builder.CreateBitCast(Loc, Ty->getPointerTo());
return Builder.CreateLoad(Loc, "val");
} else {
assert(TheValue->getType() == Ty && "Value not of expected type!");
return TheValue;
}
}
void clear() {
assert(LocStack.size() == 1 && "Imbalance!");
LocStack.clear();
}
bool isShadowReturn() const { return isShadowRet; }
bool isAggrReturn() { return isAggrRet; }
Value *EmitShadowResult(tree type, const MemRef *DestLoc) {
if (!RetBuf.Ptr)
return 0;
if (DestLoc) {
assert(ConvertType(type) ==
cast<PointerType>(RetBuf.Ptr->getType())->getElementType() &&
"Inconsistent result types!");
TheTreeToLLVM->EmitAggregateCopy(*DestLoc, RetBuf, type);
return 0;
} else {
return Builder.CreateLoad(RetBuf.Ptr, "result");
}
}
void HandleScalarResult(const Type *RetTy) {
assert(DestLoc == 0 &&
"Call returns a scalar but caller expects aggregate!");
}
void HandleAggregateResultAsScalar(const Type *ScalarTy,
unsigned Offset = 0) {
this->Offset = Offset;
}
void HandleAggregateResultAsAggregate(const Type *AggrTy) {
isAggrRet = true;
}
void HandleAggregateShadowResult(const PointerType *PtrArgTy,
bool RetPtr) {
assert(!DestLoc || PtrArgTy == DestLoc->Ptr->getType());
if (DestLoc == 0) {
Value *Buf = TheTreeToLLVM->CreateTemporary(PtrArgTy->getElementType());
CallOperands.push_back(Buf);
} else if (useReturnSlot) {
CallOperands.push_back(DestLoc->Ptr);
} else {
RetBuf = TheTreeToLLVM->CreateTempLoc(PtrArgTy->getElementType());
CallOperands.push_back(RetBuf.Ptr);
}
isShadowRet = true;
}
void HandlePad(const llvm::Type *LLVMTy) {
CallOperands.push_back(UndefValue::get(LLVMTy));
}
void HandleScalarShadowResult(const PointerType *PtrArgTy, bool RetPtr) {
assert(DestLoc == 0 &&
"Call returns a scalar but caller expects aggregate!");
RetBuf = TheTreeToLLVM->CreateTempLoc(PtrArgTy->getElementType());
CallOperands.push_back(RetBuf.Ptr);
isShadowRet = true;
}
void HandleScalarArgument(const llvm::Type *LLVMTy, tree type,
unsigned RealSize = 0) {
Value *Loc = NULL;
if (RealSize) {
Value *L = getAddress();
Loc = LLVM_LOAD_SCALAR_ARGUMENT(L,LLVMTy,RealSize,Builder);
} else
Loc = getValue(LLVMTy);
if (CallOperands.size() < FTy->getNumParams()) {
const Type *CalledTy= FTy->getParamType(CallOperands.size());
if (Loc->getType() != CalledTy) {
assert(type && "Inconsistent parameter types?");
bool isSigned = !TYPE_UNSIGNED(type);
Loc = TheTreeToLLVM->CastToAnyType(Loc, isSigned, CalledTy, false);
}
}
CallOperands.push_back(Loc);
}
void HandleByInvisibleReferenceArgument(const llvm::Type *PtrTy, tree type){
Value *Loc = getAddress();
Loc = Builder.CreateBitCast(Loc, PtrTy);
CallOperands.push_back(Loc);
}
void HandleByValArgument(const llvm::Type *LLVMTy, tree type) {
Value *Loc = getAddress();
assert(LLVMTy->getPointerTo() == Loc->getType());
CallOperands.push_back(Loc);
}
void HandleFCAArgument(const llvm::Type *LLVMTy,
tree type ATTRIBUTE_UNUSED) {
Value *Loc = getAddress();
assert(LLVMTy->getPointerTo() == Loc->getType());
CallOperands.push_back(Builder.CreateLoad(Loc));
}
void EnterField(unsigned FieldNo, const llvm::Type *StructTy) {
Value *Loc = getAddress();
Loc = Builder.CreateBitCast(Loc, StructTy->getPointerTo());
pushAddress(Builder.CreateStructGEP(Loc, FieldNo, "elt"));
}
void ExitField() {
assert(!LocStack.empty());
LocStack.pop_back();
}
};
}
Value *TreeToLLVM::EmitCallOf(Value *Callee, tree exp, const MemRef *DestLoc,
const AttrListPtr &InPAL) {
BasicBlock *LandingPad = 0;
AttrListPtr PAL = InPAL;
if (PAL.isEmpty() && isa<Function>(Callee))
PAL = cast<Function>(Callee)->getAttributes();
if (!tree_could_throw_p(exp))
PAL = PAL.addAttr(~0, Attribute::NoUnwind);
if (!PAL.paramHasAttr(~0, Attribute::NoUnwind)) {
int RegionNo = lookup_stmt_eh_region(exp);
if (RegionNo > 0) {
if (can_throw_internal_1(RegionNo, false)) {
LandingPads.grow(RegionNo);
BasicBlock *&ThisPad = LandingPads[RegionNo];
if (!ThisPad)
ThisPad = BasicBlock::Create(Context, "lpad");
LandingPad = ThisPad;
} else {
assert(can_throw_external_1(RegionNo, false) &&
"Must-not-throw region handled by runtime?");
}
}
}
tree fndecl = get_callee_fndecl(exp);
tree fntype = fndecl ?
TREE_TYPE(fndecl) : TREE_TYPE (TREE_TYPE(TREE_OPERAND (exp, 0)));
CallingConv::ID CallingConvention = CallingConv::C;
#ifdef TARGET_ADJUST_LLVM_CC
TARGET_ADJUST_LLVM_CC(CallingConvention, fntype);
#endif
SmallVector<Value*, 16> CallOperands;
const PointerType *PFTy = cast<PointerType>(Callee->getType());
const FunctionType *FTy = cast<FunctionType>(PFTy->getElementType());
FunctionCallArgumentConversion Client(CallOperands, FTy, DestLoc,
CALL_EXPR_RETURN_SLOT_OPT(exp),
Builder, CallingConvention);
DefaultABI ABIConverter(Client);
ABIConverter.HandleReturnType(TREE_TYPE(exp),
fndecl ? fndecl : exp,
fndecl ? DECL_BUILT_IN(fndecl) : false);
if (TREE_OPERAND(exp, 2))
CallOperands.push_back(Emit(TREE_OPERAND(exp, 2), 0));
std::vector<const Type*> ScalarArgs;
for (tree arg = TREE_OPERAND(exp, 1); arg; arg = TREE_CHAIN(arg)) {
tree type = TREE_TYPE(TREE_VALUE(arg));
const Type *ArgTy = ConvertType(type);
if (ArgTy->isSingleValueType()) {
Client.pushValue(Emit(TREE_VALUE(arg), 0));
} else if (LLVM_SHOULD_PASS_AGGREGATE_AS_FCA(type, ArgTy)) {
LValue ArgVal = EmitLV(TREE_VALUE(arg));
Client.pushValue(Builder.CreateLoad(ArgVal.Ptr));
} else {
LValue ArgVal = EmitLV(TREE_VALUE(arg));
assert(!ArgVal.isBitfield() && "Bitfields are first-class types!");
Client.pushAddress(ArgVal.Ptr);
}
Attributes Attrs = Attribute::None;
unsigned OldSize = CallOperands.size();
ABIConverter.HandleArgument(type, ScalarArgs, &Attrs);
if (Attrs != Attribute::None) {
for (unsigned i = OldSize + 1; i <= CallOperands.size(); ++i) {
PAL = PAL.addAttr(i, Attrs);
}
}
Client.clear();
}
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Callee)) {
if (CallOperands.empty() && CE->getOpcode() == Instruction::BitCast) {
Constant *RealCallee = CE->getOperand(0);
assert(RealCallee->getType()->isPointerTy() &&
"Bitcast to ptr not from ptr?");
const PointerType *RealPT = cast<PointerType>(RealCallee->getType());
if (const FunctionType *RealFT =
dyn_cast<FunctionType>(RealPT->getElementType())) {
const PointerType *ActualPT = cast<PointerType>(Callee->getType());
const FunctionType *ActualFT =
cast<FunctionType>(ActualPT->getElementType());
if (RealFT->getReturnType() == ActualFT->getReturnType() &&
RealFT->getNumParams() == 0)
Callee = RealCallee;
}
}
}
Value *Call;
if (!LandingPad) {
Call = Builder.CreateCall(Callee, CallOperands.begin(), CallOperands.end());
cast<CallInst>(Call)->setCallingConv(CallingConvention);
cast<CallInst>(Call)->setAttributes(PAL);
if (flag_objc_abi == 1 &&
Callee->getName() == "objc_exception_throw")
cast<CallInst>(Call)->removeAttribute(~0U, Attribute::NoReturn);
} else {
BasicBlock *NextBlock = BasicBlock::Create(Context, "invcont");
Call = Builder.CreateInvoke(Callee, NextBlock, LandingPad,
CallOperands.begin(), CallOperands.end());
cast<InvokeInst>(Call)->setCallingConv(CallingConvention);
cast<InvokeInst>(Call)->setAttributes(PAL);
EmitBlock(NextBlock);
}
if (Client.isShadowReturn())
return Client.EmitShadowResult(TREE_TYPE(exp), DestLoc);
if (Call->getType()->isVoidTy())
return 0;
if (Client.isAggrReturn()) {
if (TD.getTypeAllocSize(Call->getType()) <=
TD.getTypeAllocSize(cast<PointerType>(DestLoc->Ptr->getType())
->getElementType())) {
Value *Dest = BitCastToType(DestLoc->Ptr, Call->getType()->getPointerTo());
LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(Call,Dest,DestLoc->Volatile,Builder);
} else {
AllocaInst *biggerTmp = CreateTemporary(Call->getType());
LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(Call,biggerTmp,false,
Builder);
EmitAggregateCopy(*DestLoc,
MemRef(BitCastToType(biggerTmp,Call->getType()->
getPointerTo()),
DestLoc->getAlignment(),
DestLoc->Volatile),
TREE_TYPE(exp));
}
return 0;
}
if (!DestLoc)
return Call;
Value *Ptr = DestLoc->Ptr;
if (Client.Offset) {
Ptr = BitCastToType(Ptr, Type::getInt8PtrTy(Context));
Ptr = Builder.CreateGEP(Ptr,
ConstantInt::get(TD.getIntPtrType(Context), Client.Offset));
}
Ptr = BitCastToType(Ptr, Call->getType()->getPointerTo());
StoreInst *St = Builder.CreateStore(Call, Ptr, DestLoc->Volatile);
St->setAlignment(DestLoc->getAlignment());
return 0;
}
void TreeToLLVM::HandleMultiplyDefinedGimpleTemporary(tree Var) {
Value *UniqVal = DECL_LLVM(Var);
assert(isa<CastInst>(UniqVal) && "Invalid value for gimple temporary!");
Value *FirstVal = cast<CastInst>(UniqVal)->getOperand(0);
Value *NewTmp = CreateTemporary(FirstVal->getType());
SET_DECL_LLVM(Var, NewTmp);
StoreInst *SI = new StoreInst(FirstVal, NewTmp);
BasicBlock::iterator InsertPt;
if (Instruction *I = dyn_cast<Instruction>(FirstVal)) {
InsertPt = I;
bool InsertPtFinal = false;
if (I->getParent() == &Fn->front()) {
for (BasicBlock::iterator CI = InsertPt, E = Fn->begin()->end();
CI != E; ++CI) {
if (&*CI == AllocaInsertionPoint) {
InsertPt = AllocaInsertionPoint;
++InsertPt;
InsertPtFinal = true; break;
}
}
}
if (!InsertPtFinal) {
if (InvokeInst *II = dyn_cast<InvokeInst>(InsertPt)) {
InsertPt = II->getNormalDest()->begin();
while (isa<PHINode>(InsertPt))
++InsertPt;
}
else
++InsertPt; }
} else {
InsertPt = AllocaInsertionPoint; ++InsertPt;
}
BasicBlock *BB = InsertPt->getParent();
BB->getInstList().insert(InsertPt, SI);
for (Value::use_iterator U = UniqVal->use_begin(), E = UniqVal->use_end();
U != E; ++U)
U.getUse().set(new LoadInst(NewTmp, "mtmp", cast<Instruction>(*U)));
DECL_GIMPLE_FORMAL_TEMP_P(Var) = 0;
}
Value *TreeToLLVM::EmitMODIFY_EXPR(tree exp, const MemRef *DestLoc) {
tree lhs = TREE_OPERAND (exp, 0);
tree rhs = TREE_OPERAND (exp, 1);
if (lang_hooks.empty_type_p(TREE_TYPE(exp))) {
if (TREE_SIDE_EFFECTS(rhs)) {
const Type *RHSTy = ConvertType(TREE_TYPE(rhs));
MemRef dest = CreateTempLoc(RHSTy);
return Emit(rhs, &dest);
} else
return (Value *)0;
}
bool LHSSigned = !TYPE_UNSIGNED(TREE_TYPE(lhs));
bool RHSSigned = !TYPE_UNSIGNED(TREE_TYPE(rhs));
if (isGimpleTemporary(lhs)) {
if (DECL_LLVM_SET_P(lhs)) {
HandleMultiplyDefinedGimpleTemporary(lhs);
return EmitMODIFY_EXPR(exp, DestLoc);
}
Value *RHS = Emit(rhs, 0);
const Type *LHSTy = ConvertType(TREE_TYPE(lhs));
Instruction::CastOps opc = CastInst::getCastOpcode(RHS, RHSSigned,
LHSTy, LHSSigned);
CastInst *Cast = CastInst::Create(opc, RHS, LHSTy, RHS->getName());
if (opc == Instruction::BitCast && RHS->getType() == LHSTy)
UniquedValues.push_back(cast<BitCastInst>(Cast));
Builder.Insert(Cast);
SET_DECL_LLVM(lhs, Cast);
return Cast;
} else if (canEmitGlobalRegisterVariable(lhs)) {
Value *RHS = Emit(rhs, 0);
RHS = CastToAnyType(RHS, RHSSigned, ConvertType(TREE_TYPE(lhs)), LHSSigned);
EmitModifyOfRegisterVariable(lhs, RHS);
return RHS;
}
LValue LV = EmitLV(lhs);
bool isVolatile = TREE_THIS_VOLATILE(lhs);
unsigned Alignment = LV.getAlignment();
if (TREE_CODE(lhs) == COMPONENT_REF)
if (const StructType *STy =
dyn_cast<StructType>(ConvertType(TREE_TYPE(TREE_OPERAND(lhs, 0)))))
if (STy->isPacked())
Alignment = 1;
if (!LV.isBitfield()) {
const Type *ValTy = ConvertType(TREE_TYPE(rhs));
if (ValTy->isSingleValueType()) {
Value *RHS = Emit(rhs, 0);
const PointerType *PT = cast<PointerType>(LV.Ptr->getType());
if (PT->getElementType()->canLosslesslyBitCastTo(RHS->getType()) ||
(PT->getElementType()->getPrimitiveSizeInBits() >
RHS->getType()->getPrimitiveSizeInBits()))
RHS = CastToAnyType(RHS, RHSSigned, PT->getElementType(), LHSSigned);
else
LV.Ptr = BitCastToType(LV.Ptr, RHS->getType()->getPointerTo());
StoreInst *SI = Builder.CreateStore(RHS, LV.Ptr, isVolatile);
SI->setAlignment(Alignment);
return RHS;
}
MemRef NewLoc(LV.Ptr, Alignment, isVolatile);
Emit(rhs, &NewLoc);
if (DestLoc)
EmitAggregateCopy(*DestLoc, NewLoc, TREE_TYPE(exp));
return 0;
}
Value *RHS = Emit(rhs, 0);
if (!LV.BitSize)
return RHS;
const Type *ValTy = cast<PointerType>(LV.Ptr->getType())->getElementType();
unsigned ValSizeInBits = ValTy->getPrimitiveSizeInBits();
unsigned Strides = 1 + (LV.BitStart + LV.BitSize - 1) / ValSizeInBits;
assert(ValTy->isIntegerTy() && "Invalid bitfield lvalue!");
assert(ValSizeInBits > LV.BitStart && "Bad bitfield lvalue!");
assert(ValSizeInBits >= LV.BitSize && "Bad bitfield lvalue!");
assert(2*ValSizeInBits > LV.BitSize+LV.BitStart && "Bad bitfield lvalue!");
Value *BitSource = CastToAnyType(RHS, RHSSigned, ValTy, LHSSigned);
for (unsigned I = 0; I < Strides; I++) {
unsigned Index = BYTES_BIG_ENDIAN ? Strides - I - 1 : I; unsigned ThisFirstBit = Index * ValSizeInBits;
unsigned ThisLastBitPlusOne = ThisFirstBit + ValSizeInBits;
if (ThisFirstBit < LV.BitStart)
ThisFirstBit = LV.BitStart;
if (ThisLastBitPlusOne > LV.BitStart+LV.BitSize)
ThisLastBitPlusOne = LV.BitStart+LV.BitSize;
Value *Ptr = Index ?
Builder.CreateGEP(LV.Ptr, ConstantInt::get(Type::getInt32Ty(Context), Index)) :
LV.Ptr;
LoadInst *LI = Builder.CreateLoad(Ptr, isVolatile);
LI->setAlignment(Alignment);
Value *OldVal = LI;
Value *NewVal = BitSource;
unsigned BitsInVal = ThisLastBitPlusOne - ThisFirstBit;
unsigned FirstBitInVal = ThisFirstBit % ValSizeInBits;
if (BYTES_BIG_ENDIAN)
FirstBitInVal = ValSizeInBits-FirstBitInVal-BitsInVal;
if (FirstBitInVal) {
Value *ShAmt = ConstantInt::get(ValTy, FirstBitInVal);
NewVal = Builder.CreateShl(NewVal, ShAmt);
}
uint64_t MaskVal = ((1ULL << BitsInVal)-1) << FirstBitInVal;
Constant *Mask = ConstantInt::get(Type::getInt64Ty(Context), MaskVal);
Mask = Builder.getFolder().CreateTruncOrBitCast(Mask, ValTy);
if (FirstBitInVal+BitsInVal != ValSizeInBits)
NewVal = Builder.CreateAnd(NewVal, Mask);
Mask = Builder.getFolder().CreateNot(Mask);
OldVal = Builder.CreateAnd(OldVal, Mask);
NewVal = Builder.CreateOr(OldVal, NewVal);
StoreInst *SI = Builder.CreateStore(NewVal, Ptr, isVolatile);
SI->setAlignment(Alignment);
if (I + 1 < Strides) {
Value *ShAmt = ConstantInt::get(ValTy, BitsInVal);
BitSource = Builder.CreateLShr(BitSource, ShAmt);
}
}
return RHS;
}
Value *TreeToLLVM::EmitNOP_EXPR(tree exp, const MemRef *DestLoc) {
if (TREE_CODE(TREE_TYPE(exp)) == VOID_TYPE && TREE_CODE(TREE_OPERAND(exp, 0)) == INTEGER_CST)
return 0;
tree Op = TREE_OPERAND(exp, 0);
const Type *Ty = ConvertType(TREE_TYPE(exp));
bool OpIsSigned = !TYPE_UNSIGNED(TREE_TYPE(Op));
bool ExpIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
if (DestLoc == 0) {
assert(!isAggregateTreeType(TREE_TYPE(Op))
&& "Aggregate to scalar nop_expr!");
Value *OpVal = Emit(Op, DestLoc);
if (Ty->isVoidTy()) return 0;
return CastToAnyType(OpVal, OpIsSigned, Ty, ExpIsSigned);
} else if (isAggregateTreeType(TREE_TYPE(Op))) {
MemRef NewLoc = *DestLoc;
NewLoc.Ptr = BitCastToType(DestLoc->Ptr, Ty->getPointerTo());
Value *OpVal = Emit(Op, &NewLoc);
assert(OpVal == 0 && "Shouldn't cast scalar to aggregate!");
return 0;
}
Value *OpVal = Emit(Op, 0);
Value *Ptr = BitCastToType(DestLoc->Ptr, OpVal->getType()->getPointerTo());
StoreInst *St = Builder.CreateStore(OpVal, Ptr, DestLoc->Volatile);
St->setAlignment(DestLoc->getAlignment());
return 0;
}
Value *TreeToLLVM::EmitCONVERT_EXPR(tree exp, const MemRef *DestLoc) {
assert(!DestLoc && "Cannot handle aggregate casts!");
Value *Op = Emit(TREE_OPERAND(exp, 0), 0);
bool OpIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
bool ExpIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
return CastToAnyType(Op, OpIsSigned, ConvertType(TREE_TYPE(exp)),ExpIsSigned);
}
Value *TreeToLLVM::EmitVIEW_CONVERT_EXPR(tree exp, const MemRef *DestLoc) {
tree Op = TREE_OPERAND(exp, 0);
if (isAggregateTreeType(TREE_TYPE(Op))) {
MemRef Target;
if (DestLoc)
Target = *DestLoc;
else
Target = CreateTempLoc(ConvertType(TREE_TYPE(exp)));
const Type *OpTy = ConvertType(TREE_TYPE(Op));
Target.Ptr = BitCastToType(Target.Ptr, OpTy->getPointerTo());
switch (TREE_CODE(Op)) {
default: {
Value *OpVal = Emit(Op, &Target);
assert(OpVal == 0 && "Expected an aggregate operand!");
break;
}
case VAR_DECL:
case PARM_DECL:
case RESULT_DECL:
case INDIRECT_REF:
case ARRAY_REF:
case ARRAY_RANGE_REF:
case COMPONENT_REF:
case BIT_FIELD_REF:
case STRING_CST:
case REALPART_EXPR:
case IMAGPART_EXPR:
LValue LV = EmitLV(Op);
assert(!LV.isBitfield() && "Expected an aggregate operand!");
bool isVolatile = TREE_THIS_VOLATILE(Op);
unsigned Alignment = LV.getAlignment();
EmitAggregateCopy(Target, MemRef(LV.Ptr, Alignment, isVolatile),
TREE_TYPE(exp));
break;
}
if (DestLoc)
return 0;
const Type *ExpTy = ConvertType(TREE_TYPE(exp));
return Builder.CreateLoad(BitCastToType(Target.Ptr,
ExpTy->getPointerTo()));
}
if (DestLoc) {
Value *OpVal = Emit(Op, 0);
assert(OpVal && "Expected a scalar result!");
Value *Ptr = BitCastToType(DestLoc->Ptr, OpVal->getType()->getPointerTo());
StoreInst *St = Builder.CreateStore(OpVal, Ptr, DestLoc->Volatile);
St->setAlignment(DestLoc->getAlignment());
return 0;
}
Value *OpVal = Emit(Op, 0);
assert(OpVal && "Expected a scalar result!");
const Type *DestTy = ConvertType(TREE_TYPE(exp));
if (OpVal->getType()->isPointerTy()) {
if (DestTy->isPointerTy()) return Builder.CreateBitCast(OpVal, DestTy);
OpVal = Builder.CreatePtrToInt(OpVal, TD.getIntPtrType(Context));
}
if (DestTy->isPointerTy())
return Builder.CreateIntToPtr(OpVal, DestTy);
return Builder.CreateBitCast(OpVal, DestTy);
}
Value *TreeToLLVM::EmitNEGATE_EXPR(tree exp, const MemRef *DestLoc) {
if (!DestLoc) {
Value *V = Emit(TREE_OPERAND(exp, 0), 0);
if (V->getType()->isFPOrFPVectorTy())
return Builder.CreateFNeg(V);
if (!V->getType()->isPointerTy()) {
bool HasNSW = !TYPE_OVERFLOW_WRAPS(TREE_TYPE(exp));
return HasNSW ? Builder.CreateNSWNeg(V) : Builder.CreateNeg(V);
}
V = CastToAnyType(V, false, TD.getIntPtrType(Context), false);
V = Builder.CreateNeg(V);
return CastToType(Instruction::IntToPtr, V, ConvertType(TREE_TYPE(exp)));
}
const Type *ComplexTy =
cast<PointerType>(DestLoc->Ptr->getType())->getElementType();
MemRef Tmp = CreateTempLoc(ComplexTy);
Emit(TREE_OPERAND(exp, 0), &Tmp);
Value *R, *I;
EmitLoadFromComplex(R, I, Tmp);
if (R->getType()->isFloatingPointTy()) {
R = Builder.CreateFNeg(R);
I = Builder.CreateFNeg(I);
} else {
R = Builder.CreateNeg(R);
I = Builder.CreateNeg(I);
}
EmitStoreToComplex(*DestLoc, R, I);
return 0;
}
Value *TreeToLLVM::EmitCONJ_EXPR(tree exp, const MemRef *DestLoc) {
assert(DestLoc && "CONJ_EXPR only applies to complex numbers.");
const Type *ComplexTy =
cast<PointerType>(DestLoc->Ptr->getType())->getElementType();
MemRef Tmp = CreateTempLoc(ComplexTy);
Emit(TREE_OPERAND(exp, 0), &Tmp);
Value *R, *I;
EmitLoadFromComplex(R, I, Tmp);
if (I->getType()->isFloatingPointTy())
I = Builder.CreateFNeg(I);
else
I = Builder.CreateNeg(I);
EmitStoreToComplex(*DestLoc, R, I);
return 0;
}
Value *TreeToLLVM::EmitABS_EXPR(tree exp) {
Value *Op = Emit(TREE_OPERAND(exp, 0), 0);
if (!Op->getType()->isFloatingPointTy()) {
Value *OpN = Builder.CreateNeg(Op, (Op->getNameStr()+"neg").c_str());
ICmpInst::Predicate pred = TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0))) ?
ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
Value *Cmp = Builder.CreateICmp(pred, Op,
Constant::getNullValue(Op->getType()), "abscond");
return Builder.CreateSelect(Cmp, Op, OpN, "abs");
}
const char *Name = 0;
switch (Op->getType()->getTypeID()) {
default: assert(0 && "Unknown FP type!");
case Type::FloatTyID: Name = "fabsf"; break;
case Type::DoubleTyID: Name = "fabs"; break;
case Type::X86_FP80TyID:
case Type::PPC_FP128TyID:
case Type::FP128TyID: Name = "fabsl"; break;
}
Value *V = TheModule->getOrInsertFunction(Name, Op->getType(), Op->getType(),
NULL);
CallInst *Call = Builder.CreateCall(V, Op);
Call->setDoesNotThrow();
Call->setDoesNotAccessMemory();
return Call;
}
static const Type *getSuitableBitCastIntType(const Type *Ty) {
if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
unsigned NumElements = VTy->getNumElements();
const Type *EltTy = VTy->getElementType();
return VectorType::get(
IntegerType::get(Context, EltTy->getPrimitiveSizeInBits()), NumElements);
}
return IntegerType::get(Context, Ty->getPrimitiveSizeInBits());
}
Value *TreeToLLVM::EmitBIT_NOT_EXPR(tree exp) {
Value *Op = Emit(TREE_OPERAND(exp, 0), 0);
const Type *Ty = Op->getType();
if (Ty->isPointerTy()) {
assert (TREE_CODE(TREE_TYPE(exp)) == INTEGER_TYPE &&
"Expected integer type here");
Ty = ConvertType(TREE_TYPE(exp));
Op = CastToType(Instruction::PtrToInt, Op, Ty);
} else if (Ty->isFloatingPointTy() ||
(Ty->isVectorTy() &&
cast<VectorType>(Ty)->getElementType()->isFloatingPointTy())) {
Op = BitCastToType(Op, getSuitableBitCastIntType(Ty));
}
return BitCastToType(Builder.CreateNot(Op,
(Op->getNameStr()+"not").c_str()),Ty);
}
Value *TreeToLLVM::EmitTRUTH_NOT_EXPR(tree exp) {
Value *V = Emit(TREE_OPERAND(exp, 0), 0);
if (V->getType() != Type::getInt1Ty(Context))
V = Builder.CreateICmpNE(V,
Constant::getNullValue(V->getType()), "toBool");
V = Builder.CreateNot(V, (V->getNameStr()+"not").c_str());
return CastToUIntType(V, ConvertType(TREE_TYPE(exp)));
}
Value *TreeToLLVM::EmitCompare(tree exp, unsigned UIOpc, unsigned SIOpc,
unsigned FPPred, const Type *DestTy) {
tree Op0Ty = TREE_TYPE(TREE_OPERAND(exp,0));
Value *Result;
if (TREE_CODE(Op0Ty) == COMPLEX_TYPE) {
Result = EmitComplexBinOp(exp, 0); } else {
Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
bool LHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
bool RHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 1)));
RHS = CastToAnyType(RHS, RHSIsSigned, LHS->getType(), LHSIsSigned);
assert(LHS->getType() == RHS->getType() && "Binop type equality failure!");
if (FLOAT_TYPE_P(Op0Ty)) {
Result = Builder.CreateFCmp(FCmpInst::Predicate(FPPred), LHS, RHS);
} else {
ICmpInst::Predicate pred =
ICmpInst::Predicate(TYPE_UNSIGNED(Op0Ty) ? UIOpc : SIOpc);
Result = Builder.CreateICmp(pred, LHS, RHS);
}
}
assert(Result->getType() == Type::getInt1Ty(Context) && "Expected i1 result for compare");
if (DestTy == 0)
DestTy = ConvertType(TREE_TYPE(exp));
if (Result->getType() == DestTy)
return Result;
return Builder.CreateZExt(Result, DestTy);
}
Value *TreeToLLVM::EmitBinOp(tree exp, const MemRef *DestLoc, unsigned Opc) {
const Type *Ty = ConvertType(TREE_TYPE(exp));
if (Ty->isPointerTy())
return EmitPtrBinOp(exp, Opc); if (Ty->isStructTy())
return EmitComplexBinOp(exp, DestLoc);
assert(Ty->isSingleValueType() && DestLoc == 0 &&
"Bad binary operation!");
Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
bool LHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
bool RHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 1)));
bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
bool IsExactDiv = TREE_CODE(exp) == EXACT_DIV_EXPR;
LHS = CastToAnyType(LHS, LHSIsSigned, Ty, TyIsSigned);
RHS = CastToAnyType(RHS, RHSIsSigned, Ty, TyIsSigned);
bool isLogicalOp = Opc == Instruction::And || Opc == Instruction::Or ||
Opc == Instruction::Xor;
const Type *ResTy = Ty;
if (isLogicalOp &&
(Ty->isFloatingPointTy() ||
(Ty->isVectorTy() &&
cast<VectorType>(Ty)->getElementType()->isFloatingPointTy()))) {
Ty = getSuitableBitCastIntType(Ty);
LHS = BitCastToType(LHS, Ty);
RHS = BitCastToType(RHS, Ty);
}
Value *V;
if (Opc == Instruction::SDiv && IsExactDiv)
V = Builder.CreateExactSDiv(LHS, RHS);
else if (Opc == Instruction::Add && !TYPE_OVERFLOW_WRAPS(TREE_TYPE(exp)))
V = Builder.CreateNSWAdd(LHS, RHS);
else if (Opc == Instruction::Sub && !TYPE_OVERFLOW_WRAPS(TREE_TYPE(exp)))
V = Builder.CreateNSWSub(LHS, RHS);
else if (Opc == Instruction::Mul && !TYPE_OVERFLOW_WRAPS(TREE_TYPE(exp)))
V = Builder.CreateNSWMul(LHS, RHS);
else
V = Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
if (ResTy != Ty)
V = BitCastToType(V, ResTy);
return V;
}
Value *TreeToLLVM::EmitPtrBinOp(tree exp, unsigned Opc) {
Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
if ((Opc == Instruction::Add || Opc == Instruction::Sub) &&
TREE_CODE(TREE_OPERAND(exp, 1)) == INTEGER_CST) {
int64_t Offset = getINTEGER_CSTVal(TREE_OPERAND(exp, 1));
if (POINTER_SIZE == 32 && !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 1))))
Offset = (Offset << 32) >> 32;
const Type *ElTy = cast<PointerType>(LHS->getType())->getElementType();
if (ElTy->isSized()) {
int64_t EltSize = TD.getTypeAllocSize(ElTy);
int64_t EltOffset = EltSize ? Offset/EltSize : 0;
if (EltOffset*EltSize == Offset) {
if (Opc == Instruction::Sub)
EltOffset = -EltOffset;
Constant *C = ConstantInt::get(Type::getInt64Ty(Context), EltOffset);
Value *V = flag_wrapv ?
Builder.CreateGEP(LHS, C) :
Builder.CreateInBoundsGEP(LHS, C);
return BitCastToType(V, ConvertType(TREE_TYPE(exp)));
}
}
}
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
const Type *IntPtrTy = TD.getIntPtrType(Context);
bool LHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
bool RHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 1)));
LHS = CastToAnyType(LHS, LHSIsSigned, IntPtrTy, false);
RHS = CastToAnyType(RHS, RHSIsSigned, IntPtrTy, false);
Value *V = Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
return CastToType(Instruction::IntToPtr, V, ConvertType(TREE_TYPE(exp)));
}
Value *TreeToLLVM::EmitTruthOp(tree exp, unsigned Opc) {
Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
LHS = Builder.CreateICmpNE(LHS,
Constant::getNullValue(LHS->getType()),
"toBool");
RHS = Builder.CreateICmpNE(RHS,
Constant::getNullValue(RHS->getType()),
"toBool");
Value *Res = Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
return CastToType(Instruction::ZExt, Res, ConvertType(TREE_TYPE(exp)));
}
Value *TreeToLLVM::EmitShiftOp(tree exp, const MemRef *DestLoc, unsigned Opc) {
assert(DestLoc == 0 && "aggregate shift?");
const Type *Ty = ConvertType(TREE_TYPE(exp));
assert(!Ty->isPointerTy() && "Pointer arithmetic!?");
Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
if (RHS->getType() != LHS->getType())
RHS = Builder.CreateIntCast(RHS, LHS->getType(), false,
(RHS->getNameStr()+".cast").c_str());
return Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
}
Value *TreeToLLVM::EmitRotateOp(tree exp, unsigned Opc1, unsigned Opc2) {
Value *In = Emit(TREE_OPERAND(exp, 0), 0);
Value *Amt = Emit(TREE_OPERAND(exp, 1), 0);
if (In->getType()->isPointerTy()) {
const Type *Ty =
IntegerType::get(Context,
TYPE_PRECISION(TREE_TYPE (TREE_OPERAND (exp, 0))));
In = Builder.CreatePtrToInt(In, Ty,
(In->getNameStr()+".cast").c_str());
}
if (Amt->getType() != In->getType())
Amt = Builder.CreateIntCast(Amt, In->getType(), false,
(Amt->getNameStr()+".cast").c_str());
Value *TypeSize =
ConstantInt::get(In->getType(),
In->getType()->getPrimitiveSizeInBits());
Value *V1 = Builder.CreateBinOp((Instruction::BinaryOps)Opc1, In, Amt);
Value *OtherShift = Builder.CreateSub(TypeSize, Amt);
Value *V2 = Builder.CreateBinOp((Instruction::BinaryOps)Opc2, In, OtherShift);
Value *Merge = Builder.CreateOr(V1, V2);
return CastToUIntType(Merge, ConvertType(TREE_TYPE(exp)));
}
Value *TreeToLLVM::EmitMinMaxExpr(tree exp, unsigned UIPred, unsigned SIPred,
unsigned FPPred) {
Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
const Type *Ty = ConvertType(TREE_TYPE(exp));
bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
bool LHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
bool RHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 1)));
Instruction::CastOps opcode =
CastInst::getCastOpcode(LHS, LHSIsSigned, Ty, TyIsSigned);
LHS = CastToType(opcode, LHS, Ty);
opcode = CastInst::getCastOpcode(RHS, RHSIsSigned, Ty, TyIsSigned);
RHS = CastToType(opcode, RHS, Ty);
Value *Compare;
if (LHS->getType()->isFloatingPointTy())
Compare = Builder.CreateFCmp(FCmpInst::Predicate(FPPred), LHS, RHS);
else if (TYPE_UNSIGNED(TREE_TYPE(exp)))
Compare = Builder.CreateICmp(ICmpInst::Predicate(UIPred), LHS, RHS);
else
Compare = Builder.CreateICmp(ICmpInst::Predicate(SIPred), LHS, RHS);
return Builder.CreateSelect(Compare, LHS, RHS,
TREE_CODE(exp) == MAX_EXPR ? "max" : "min");
}
Value *TreeToLLVM::EmitEXACT_DIV_EXPR(tree exp, const MemRef *DestLoc) {
if (TYPE_UNSIGNED(TREE_TYPE(exp)))
return EmitBinOp(exp, DestLoc, Instruction::UDiv);
return EmitBinOp(exp, DestLoc, Instruction::SDiv);
}
Value *TreeToLLVM::EmitFLOOR_MOD_EXPR(tree exp, const MemRef *DestLoc) {
if (TYPE_UNSIGNED(TREE_TYPE(exp)))
return EmitBinOp(exp, DestLoc, Instruction::URem);
const Type *Ty = ConvertType(TREE_TYPE(exp));
Constant *Zero = ConstantInt::get(Ty, 0);
Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
Value *Rem = Builder.CreateSRem(LHS, RHS, "rem");
Value *RemPlusRHS = Builder.CreateAdd(Rem, RHS);
Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
Value *HaveSameSign = Builder.CreateICmpEQ(LHSIsPositive,RHSIsPositive);
Value *RemIsZero = Builder.CreateICmpEQ(Rem, Zero);
Value *SameAsRem = Builder.CreateOr(HaveSameSign, RemIsZero);
return Builder.CreateSelect(SameAsRem, Rem, RemPlusRHS, "mod");
}
Value *TreeToLLVM::EmitCEIL_DIV_EXPR(tree exp) {
const Type *Ty = ConvertType(TREE_TYPE(exp));
Constant *Zero = ConstantInt::get(Ty, 0);
Constant *One = ConstantInt::get(Ty, 1);
Constant *MinusOne = Constant::getAllOnesValue(Ty);
Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
if (!TYPE_UNSIGNED(TREE_TYPE(exp))) {
Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
Value *HaveSameSign = Builder.CreateICmpEQ(LHSIsPositive, RHSIsPositive);
Value *LHSNotZero = Builder.CreateICmpNE(LHS, Zero);
Value *OffsetOne = Builder.CreateAnd(HaveSameSign, LHSNotZero);
Value *Offset = Builder.CreateSelect(OffsetOne, One, Zero);
Value *SignRHS = Builder.CreateSelect(RHSIsPositive, One, MinusOne);
Value *SignedOffset = CastToType(Instruction::SExt, OffsetOne, Ty);
SignedOffset = Builder.CreateAnd(SignRHS, SignedOffset);
Value *CDiv = Builder.CreateSub(LHS, SignedOffset);
CDiv = Builder.CreateSDiv(CDiv, RHS);
return Builder.CreateAdd(CDiv, Offset, "cdiv");
}
Value *LHSNotZero = Builder.CreateICmpNE(LHS, Zero);
Value *Offset = Builder.CreateSelect(LHSNotZero, One, Zero);
Value *CDiv = Builder.CreateSub(LHS, Offset);
CDiv = Builder.CreateUDiv(CDiv, RHS);
return Builder.CreateAdd(CDiv, Offset, "cdiv");
}
Value *TreeToLLVM::EmitFLOOR_DIV_EXPR(tree exp) {
Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
if (TYPE_UNSIGNED(TREE_TYPE(exp)))
return Builder.CreateUDiv(LHS, RHS, "fdiv");
const Type *Ty = ConvertType(TREE_TYPE(exp));
Constant *Zero = ConstantInt::get(Ty, 0);
Constant *One = ConstantInt::get(Ty, 1);
Constant *MinusOne = Constant::getAllOnesValue(Ty);
Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
Value *SignsDiffer = Builder.CreateICmpNE(LHSIsPositive, RHSIsPositive);
Value *LHSNotZero = Builder.CreateICmpNE(LHS, Zero);
Value *OffsetOne = Builder.CreateAnd(SignsDiffer, LHSNotZero);
Value *Offset = Builder.CreateSelect(OffsetOne, One, Zero);
Value *SignRHS = Builder.CreateSelect(RHSIsPositive, One, MinusOne);
Value *SignedOffset = CastToType(Instruction::SExt, OffsetOne, Ty);
SignedOffset = Builder.CreateAnd(SignRHS, SignedOffset);
Value *FDiv = Builder.CreateAdd(LHS, SignedOffset);
FDiv = Builder.CreateSDiv(FDiv, RHS);
return Builder.CreateSub(FDiv, Offset, "fdiv");
}
Value *TreeToLLVM::EmitROUND_DIV_EXPR(tree exp) {
const Type *Ty = ConvertType(TREE_TYPE(exp));
Constant *Zero = ConstantInt::get(Ty, 0);
Constant *Two = ConstantInt::get(Ty, 2);
Value *LHS = Emit(TREE_OPERAND(exp, 0), 0);
Value *RHS = Emit(TREE_OPERAND(exp, 1), 0);
if (!TYPE_UNSIGNED(TREE_TYPE(exp))) {
Value *LHSIsPositive = Builder.CreateICmpSGE(LHS, Zero);
Value *RHSIsPositive = Builder.CreateICmpSGE(RHS, Zero);
Value *HaveSameSign = Builder.CreateICmpEQ(LHSIsPositive, RHSIsPositive);
Value *MinusLHS = Builder.CreateNeg(LHS);
Value *AbsLHS = Builder.CreateSelect(LHSIsPositive, LHS, MinusLHS,
(LHS->getNameStr()+".abs").c_str());
Value *MinusRHS = Builder.CreateNeg(RHS);
Value *AbsRHS = Builder.CreateSelect(RHSIsPositive, RHS, MinusRHS,
(RHS->getNameStr()+".abs").c_str());
Value *HalfAbsRHS = Builder.CreateUDiv(AbsRHS, Two);
Value *Numerator = Builder.CreateAdd(AbsLHS, HalfAbsRHS);
Value *AbsRDiv = Builder.CreateUDiv(Numerator, AbsRHS);
Value *MinusAbsRDiv = Builder.CreateNeg(AbsRDiv);
return Builder.CreateSelect(HaveSameSign, AbsRDiv, MinusAbsRDiv, "rdiv");
}
Value *HalfRHS = Builder.CreateUDiv(RHS, Two);
Value *Numerator = Builder.CreateAdd(LHS, HalfRHS);
Value *Overflowed = Builder.CreateICmpULT(Numerator, HalfRHS);
Value *AltNumerator = Builder.CreateSub(Numerator, RHS);
Numerator = Builder.CreateSelect(Overflowed, AltNumerator, Numerator);
Value *Quotient = Builder.CreateUDiv(Numerator, RHS);
return Builder.CreateAdd(Quotient, CastToUIntType(Overflowed, Ty), "rdiv");
}
Value *TreeToLLVM::EmitEXC_PTR_EXPR(tree exp) {
CreateExceptionValues();
Value *V = Builder.CreateLoad(ExceptionValue, "eh_value");
return BitCastToType(V, ConvertType(TREE_TYPE(exp)));
}
Value *TreeToLLVM::EmitFILTER_EXPR(tree exp) {
CreateExceptionValues();
return Builder.CreateLoad(ExceptionSelectorValue, "eh_select");
}
Value *TreeToLLVM::EmitRESX_EXPR(tree exp) {
unsigned RegionNo = TREE_INT_CST_LOW(TREE_OPERAND (exp, 0));
std::vector<struct eh_region *> Handlers;
foreach_reachable_handler(RegionNo, true, AddHandler, &Handlers);
if (!Handlers.empty()) {
for (std::vector<struct eh_region *>::iterator I = Handlers.begin(),
E = Handlers.end(); I != E; ++I)
getPostPad(get_eh_region_number(*I));
Builder.CreateBr(getPostPad(get_eh_region_number(*Handlers.begin())));
} else {
assert(can_throw_external_1(RegionNo, true) &&
"Must-not-throw region handled by runtime?");
if (!UnwindBB)
UnwindBB = BasicBlock::Create(Context, "Unwind");
Builder.CreateBr(UnwindBB);
}
EmitBlock(BasicBlock::Create(Context, ""));
return 0;
}
#ifndef LLVM_GET_REG_NAME
#define LLVM_GET_REG_NAME(REG_NAME, REG_NUM) reg_names[REG_NUM]
#endif
Value *TreeToLLVM::EmitReadOfRegisterVariable(tree decl,
const MemRef *DestLoc) {
const Type *Ty = ConvertType(TREE_TYPE(decl));
if (ValidateRegisterVariable(decl)) {
if (Ty->isSingleValueType())
return UndefValue::get(Ty);
return 0; }
FunctionType *FTy = FunctionType::get(Ty, std::vector<const Type*>(),false);
const char *Name = extractRegisterName(decl);
int RegNum = decode_reg_name(Name);
Name = LLVM_GET_REG_NAME(Name, RegNum);
InlineAsm *IA = InlineAsm::get(FTy, "", "={"+std::string(Name)+"}", true);
CallInst *Call = Builder.CreateCall(IA);
Call->setDoesNotThrow();
return Call;
}
Value *TreeToLLVM::EmitMoveOfRegVariableToRightReg(Instruction *I, tree var) {
const Type *Ty = I->getType();
if (ValidateRegisterVariable(var)) {
if (Ty->isSingleValueType())
return UndefValue::get(Ty);
return 0; }
std::vector<const Type*> ArgTys;
ArgTys.push_back(Ty);
FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context),
ArgTys, false);
const char *Name = extractRegisterName(var);
int RegNum = decode_reg_name(Name);
Name = LLVM_GET_REG_NAME(Name, RegNum);
InlineAsm *IA = InlineAsm::get(FTy, "", "{"+std::string(Name)+"}",
true);
CallInst *Call = Builder.CreateCall(IA, I);
Call->setDoesNotThrow();
FunctionType *FTy2 = FunctionType::get(Ty, std::vector<const Type*>(),
false);
InlineAsm *IA2 = InlineAsm::get(FTy2, "", "={"+std::string(Name)+"}",
true);
CallInst *Call2 = Builder.CreateCall(IA2);
Call2->setDoesNotThrow();
return Call2;
}
void TreeToLLVM::EmitModifyOfRegisterVariable(tree decl, Value *RHS) {
if (ValidateRegisterVariable(decl))
return;
std::vector<const Type*> ArgTys;
const Type* Ty = ConvertType(TREE_TYPE(decl));
ArgTys.push_back(Ty);
FunctionType *FTy = FunctionType::get(Type::getVoidTy(Context), ArgTys, false);
const char *Name = extractRegisterName(decl);
int RegNum = decode_reg_name(Name);
Name = LLVM_GET_REG_NAME(Name, RegNum);
InlineAsm *IA = InlineAsm::get(FTy, "", "{"+std::string(Name)+"}", true);
CallInst *Call = Builder.CreateCall(IA, RHS);
Call->setDoesNotThrow();
}
static std::string ConvertInlineAsmStr(tree exp, unsigned NumOperands) {
tree str = ASM_STRING(exp);
if (TREE_CODE(str) == ADDR_EXPR) str = TREE_OPERAND(str, 0);
if (ASM_INPUT_P(exp)) {
const char *InStr = TREE_STRING_POINTER(str);
std::string Result;
while (1) {
switch (*InStr++) {
case 0: return Result; default: Result += InStr[-1]; break; case '$': Result += "$$"; break; }
}
}
str = resolve_asm_operand_names(str, ASM_OUTPUTS(exp), ASM_INPUTS(exp));
const char *InStr = TREE_STRING_POINTER(str);
std::string Result;
while (1) {
switch (*InStr++) {
case 0: return Result; default: Result += InStr[-1]; break; case '$': Result += "$$"; break; #ifdef ASSEMBLER_DIALECT
case '{': Result += "$("; break; case '}': Result += "$)"; break; case '|': Result += "$|"; break; #endif
case '%': char EscapedChar = *InStr++;
if (EscapedChar == '%') { Result += '%';
} else if (EscapedChar == '=') { Result += "${:uid}";
}
#ifdef LLVM_ASM_EXTENSIONS
LLVM_ASM_EXTENSIONS(EscapedChar, InStr, Result)
#endif
else if (ISALPHA(EscapedChar)) {
char *EndPtr;
unsigned long OpNum = strtoul(InStr, &EndPtr, 10);
if (InStr == EndPtr) {
error("%Hoperand number missing after %%-letter",&EXPR_LOCATION(exp));
return Result;
} else if (OpNum >= NumOperands) {
error("%Hoperand number out of range", &EXPR_LOCATION(exp));
return Result;
}
Result += "${" + utostr(OpNum) + ":" + EscapedChar + "}";
InStr = EndPtr;
} else if (ISDIGIT(EscapedChar)) {
char *EndPtr;
unsigned long OpNum = strtoul(InStr-1, &EndPtr, 10);
InStr = EndPtr;
Result += "$" + utostr(OpNum);
#ifdef PRINT_OPERAND_PUNCT_VALID_P
} else if (PRINT_OPERAND_PUNCT_VALID_P((unsigned char)EscapedChar)) {
Result += "${:";
Result += EscapedChar;
Result += "}";
#endif
} else {
output_operand_lossage("invalid %%-code");
}
break;
}
}
}
static std::string CanonicalizeConstraint(const char *Constraint) {
std::string Result;
bool DoneModifiers = false;
while (!DoneModifiers) {
switch (*Constraint) {
default: DoneModifiers = true; break;
case '=': assert(0 && "Should be after '='s");
case '+': assert(0 && "'+' should already be expanded");
case '*':
case '?':
case '!':
++Constraint;
break;
case '&': case '%': Result += *Constraint++;
break;
case '#': return Result;
}
}
while (*Constraint) {
char ConstraintChar = *Constraint++;
if (ConstraintChar == 'g') {
Result += "imr";
continue;
}
if (ConstraintChar == 'p')
ConstraintChar = 'r';
unsigned RegClass;
if (ConstraintChar == 'r')
RegClass = GENERAL_REGS;
else
RegClass = REG_CLASS_FROM_CONSTRAINT(Constraint[-1], Constraint-1);
if (RegClass == NO_REGS) { Result += ConstraintChar;
continue;
}
static std::map<unsigned, int> AnalyzedRegClasses;
std::map<unsigned, int>::iterator I =
AnalyzedRegClasses.lower_bound(RegClass);
int RegMember;
if (I != AnalyzedRegClasses.end() && I->first == RegClass) {
RegMember = I->second;
} else {
RegMember = -1; for (unsigned j = 0; j != FIRST_PSEUDO_REGISTER; ++j)
if (TEST_HARD_REG_BIT(reg_class_contents[RegClass], j)) {
if (RegMember == -1) {
RegMember = j;
} else {
RegMember = -1;
break;
}
}
AnalyzedRegClasses.insert(I, std::make_pair(RegClass, RegMember));
}
if (RegMember != -1) {
Result += '{';
Result += reg_names[RegMember];
Result += '}';
} else {
Result += ConstraintChar;
}
}
return Result;
}
static int MatchWeight(const char *Constraint, tree Operand) {
const char *p = Constraint;
int RetVal = 0;
if (TREE_CODE(Operand) == VAR_DECL && DECL_HARD_REGISTER(Operand)) {
int RegNum = decode_reg_name(extractRegisterName(Operand));
RetVal = -1;
if (RegNum >= 0) {
do {
unsigned RegClass;
if (*p == 'r')
RegClass = GENERAL_REGS;
else
RegClass = REG_CLASS_FROM_CONSTRAINT(*p, p);
if (RegClass != NO_REGS &&
TEST_HARD_REG_BIT(reg_class_contents[RegClass], RegNum)) {
RetVal = 1;
break;
}
++p;
} while (*p != ',' && *p != 0);
}
}
if (TREE_CODE(Operand) == INTEGER_CST) {
do {
RetVal = -1;
if (*p == 'i' || *p == 'n') { RetVal = 1;
break;
}
if (*p != 'm' && *p != 'o' && *p != 'V') RetVal = 0;
++p;
} while (*p != ',' && *p != 0);
}
return RetVal;
}
static void
ChooseConstraintTuple (const char **Constraints, tree exp, unsigned NumInputs,
unsigned NumOutputs, unsigned NumChoices,
const char **ReplacementStrings)
{
int MaxWeight = -1;
unsigned int CommasToSkip = 0;
int *Weights = (int *)alloca(NumChoices * sizeof(int));
const char** RunningConstraints =
(const char**)alloca((NumInputs+NumOutputs)*sizeof(const char*));
memcpy(RunningConstraints, Constraints,
(NumInputs+NumOutputs) * sizeof(const char*));
for (unsigned int i=0; i<NumChoices; i++) {
Weights[i] = 0;
unsigned int j = 0;
for (tree Output = ASM_OUTPUTS(exp); j<NumOutputs;
j++, Output = TREE_CHAIN(Output)) {
if (i==0)
RunningConstraints[j]++; const char* p = RunningConstraints[j];
while (*p=='*' || *p=='&' || *p=='%') p++;
if (Weights[i] != -1) {
int w = MatchWeight(p, TREE_VALUE(Output));
if (w < 0)
Weights[i] = -1;
else
Weights[i] += w;
}
while (*p!=0 && *p!=',')
p++;
if (*p!=0) {
p++; while (*p=='*' || *p=='&' || *p=='%')
p++; }
RunningConstraints[j] = p;
}
assert(j==NumOutputs);
for (tree Input = ASM_INPUTS(exp); j<NumInputs+NumOutputs;
j++, Input = TREE_CHAIN(Input)) {
const char* p = RunningConstraints[j];
if (Weights[i] != -1) {
int w = MatchWeight(p, TREE_VALUE(Input));
if (w < 0)
Weights[i] = -1; else
Weights[i] += w;
}
while (*p!=0 && *p!=',')
p++;
if (*p!=0)
p++;
RunningConstraints[j] = p;
}
if (Weights[i]>MaxWeight) {
CommasToSkip = i;
MaxWeight = Weights[i];
}
}
for (unsigned int i=0; i<NumInputs+NumOutputs; i++) {
assert(*(RunningConstraints[i])==0); const char* start = Constraints[i];
if (i<NumOutputs)
start++; const char* end = start;
while (*end != ',' && *end != 0)
end++;
for (unsigned int j=0; j<CommasToSkip; j++) {
start = end+1;
end = start;
while (*end != ',' && *end != 0)
end++;
}
char *newstring;
if (i<NumOutputs) {
newstring = (char *)xmalloc(end-start+1+1);
newstring[0] = *(Constraints[i]);
strncpy(newstring+1, start, end-start);
newstring[end-start+1] = 0;
} else {
newstring = (char *)xmalloc(end-start+1);
strncpy(newstring, start, end-start);
newstring[end-start] = 0;
}
Constraints[i] = (const char *)newstring;
ReplacementStrings[i] = (const char*)newstring;
}
}
static void FreeConstTupleStrings(const char **ReplacementStrings,
unsigned int Size) {
for (unsigned int i=0; i<Size; i++)
free((char *)ReplacementStrings[i]);
}
Value *TreeToLLVM::EmitASM_EXPR(tree exp) {
unsigned NumInputs = list_length(ASM_INPUTS(exp));
unsigned NumOutputs = list_length(ASM_OUTPUTS(exp));
unsigned NumInOut = 0;
unsigned NumChoices = 0; const char* p;
for (tree t = ASM_INPUTS(exp); t; t = TREE_CHAIN(t)) {
unsigned NumInputChoices = 1;
for (p = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(t))); *p; p++) {
if (*p == ',')
NumInputChoices++;
}
if (NumChoices==0)
NumChoices = NumInputChoices;
else if (NumChoices != NumInputChoices)
abort(); }
for (tree t = ASM_OUTPUTS(exp); t; t = TREE_CHAIN(t)) {
unsigned NumOutputChoices = 1;
for (p = TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(t))); *p; p++) {
if (*p == ',')
NumOutputChoices++;
}
if (NumChoices==0)
NumChoices = NumOutputChoices;
else if (NumChoices != NumOutputChoices)
abort(); }
const char **Constraints =
(const char **)alloca((NumOutputs + NumInputs) * sizeof(const char *));
int ValNum = 0;
for (tree Output = ASM_OUTPUTS(exp); Output;
Output = TREE_CHAIN(Output), ++ValNum) {
tree Operand = TREE_VALUE(Output);
tree type = TREE_TYPE(Operand);
if (type == error_mark_node) return 0;
const char *Constraint =
TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Output)));
Constraints[ValNum] = Constraint;
}
for (tree Input = ASM_INPUTS(exp); Input; Input = TREE_CHAIN(Input),++ValNum){
tree Val = TREE_VALUE(Input);
tree type = TREE_TYPE(Val);
if (type == error_mark_node) return 0;
const char *Constraint =
TREE_STRING_POINTER(TREE_VALUE(TREE_PURPOSE(Input)));
Constraints[ValNum] = Constraint;
}
const char** ReplacementStrings = 0;
if (NumChoices>1) {
ReplacementStrings =
(const char **)alloca((NumOutputs + NumInputs) * sizeof(const char *));
ChooseConstraintTuple(Constraints, exp, NumInputs, NumOutputs, NumChoices,
ReplacementStrings);
}
std::vector<Value*> CallOps;
std::vector<const Type*> CallArgTypes;
std::string NewAsmStr = ConvertInlineAsmStr(exp, NumOutputs+NumInputs);
std::string ConstraintStr;
bool HasSideEffects = ASM_VOLATILE_P(exp) || !ASM_OUTPUTS(exp);
SmallVector<Value *, 4> StoreCallResultAddrs;
SmallVector<const Type *, 4> CallResultTypes;
SmallVector<bool, 4> CallResultIsSigned;
SmallVector<std::pair<bool, unsigned>, 4> OutputLocations;
ValNum = 0;
for (tree Output = ASM_OUTPUTS(exp); Output;
Output = TREE_CHAIN(Output), ++ValNum) {
tree Operand = TREE_VALUE(Output);
const char *Constraint = Constraints[ValNum];
bool IsInOut, AllowsReg, AllowsMem;
if (!parse_output_constraint(&Constraint, ValNum, NumInputs, NumOutputs,
&AllowsMem, &AllowsReg, &IsInOut)) {
if (NumChoices>1)
FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
return 0;
}
assert(Constraint[0] == '=' && "Not an output constraint?");
if (!AllowsReg && (AllowsMem || IsInOut))
lang_hooks.mark_addressable(Operand);
if (IsInOut)
++NumInOut, ++NumInputs;
std::string SimplifiedConstraint;
if (TREE_CODE(Operand) == VAR_DECL && DECL_HARD_REGISTER(Operand)) {
const char* RegName = extractRegisterName(Operand);
int RegNum = decode_reg_name(RegName);
if (RegNum >= 0) {
RegName = LLVM_GET_REG_NAME(RegName, RegNum);
unsigned RegNameLen = strlen(RegName);
char *NewConstraint = (char*)alloca(RegNameLen+4);
NewConstraint[0] = '=';
NewConstraint[1] = '{';
memcpy(NewConstraint+2, RegName, RegNameLen);
NewConstraint[RegNameLen+2] = '}';
NewConstraint[RegNameLen+3] = 0;
SimplifiedConstraint = NewConstraint;
HasSideEffects = true;
AllowsMem = false;
} else {
SimplifiedConstraint = CanonicalizeConstraint(Constraint+1);
}
} else {
SimplifiedConstraint = CanonicalizeConstraint(Constraint+1);
}
LValue Dest = EmitLV(Operand);
const Type *DestValTy =
cast<PointerType>(Dest.Ptr->getType())->getElementType();
assert(!Dest.isBitfield() && "Cannot assign into a bitfield!");
if (!AllowsMem && DestValTy->isSingleValueType()) { StoreCallResultAddrs.push_back(Dest.Ptr);
ConstraintStr += ",=";
ConstraintStr += SimplifiedConstraint;
CallResultTypes.push_back(DestValTy);
CallResultIsSigned.push_back(!TYPE_UNSIGNED(TREE_TYPE(Operand)));
OutputLocations.push_back(std::make_pair(true, CallResultTypes.size()-1));
} else {
ConstraintStr += ",=*";
ConstraintStr += SimplifiedConstraint;
CallOps.push_back(Dest.Ptr);
CallArgTypes.push_back(Dest.Ptr->getType());
OutputLocations.push_back(std::make_pair(false, CallArgTypes.size()-1));
}
}
for (tree Input = ASM_INPUTS(exp); Input; Input = TREE_CHAIN(Input),++ValNum){
tree Val = TREE_VALUE(Input);
tree type = TREE_TYPE(Val);
const char *Constraint = Constraints[ValNum];
bool AllowsReg, AllowsMem;
if (!parse_input_constraint(Constraints+ValNum, ValNum-NumOutputs,
NumInputs, NumOutputs, NumInOut,
Constraints, &AllowsMem, &AllowsReg)) {
if (NumChoices>1)
FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
return 0;
}
bool isIndirect = false;
if (AllowsReg || !AllowsMem) { const Type *LLVMTy = ConvertType(type);
Value *Op = 0;
if (LLVMTy->isSingleValueType()) {
if (TREE_CODE(Val)==ADDR_EXPR &&
TREE_CODE(TREE_OPERAND(Val,0))==LABEL_DECL) {
Op = getLabelDeclBlock(TREE_OPERAND(Val, 0));
} else
Op = Emit(Val, 0);
} else {
LValue LV = EmitLV(Val);
assert(!LV.isBitfield() && "Inline asm can't have bitfield operand");
uint64_t TySize = TD.getTypeSizeInBits(LLVMTy);
if (TySize == 1 || TySize == 8 || TySize == 16 ||
TySize == 32 || TySize == 64 || (TySize == 128 && !AllowsMem)) {
LLVMTy = IntegerType::get(Context, TySize);
Op = Builder.CreateLoad(BitCastToType(LV.Ptr,
LLVMTy->getPointerTo()));
} else {
if (!AllowsMem)
error("%Haggregate does not match inline asm register constraint",
&EXPR_LOCATION(exp));
isIndirect = true;
Op = LV.Ptr;
}
}
const Type *OpTy = Op->getType();
if (ISDIGIT(Constraint[0])) {
unsigned Match = atoi(Constraint);
const Type *OTy = 0;
if (Match < OutputLocations.size()) {
if (OutputLocations[Match].first)
OTy = CallResultTypes[OutputLocations[Match].second];
else {
OTy = CallArgTypes[OutputLocations[Match].second];
assert(OTy->isPointerTy() && "Expected pointer type!");
OTy = cast<PointerType>(OTy)->getElementType();
}
}
if (OTy && OTy != OpTy) {
if (!(OTy->isIntegerTy() || OTy->isPointerTy()) ||
!(OpTy->isIntegerTy() || OpTy->isPointerTy())) {
error("%Hunsupported inline asm: input constraint with a matching "
"output constraint of incompatible type!",
&EXPR_LOCATION(exp));
if (NumChoices>1)
FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
return 0;
}
unsigned OTyBits = TD.getTypeSizeInBits(OTy);
unsigned OpTyBits = TD.getTypeSizeInBits(OpTy);
if (OTyBits == 0 || OpTyBits == 0 || OTyBits < OpTyBits) {
error("%Hunsupported inline asm: input constraint with a matching "
"output constraint of incompatible type!",
&EXPR_LOCATION(exp));
return 0;
} else if (OTyBits > OpTyBits) {
Op = CastToAnyType(Op, !TYPE_UNSIGNED(type),
OTy, CallResultIsSigned[Match]);
if (BYTES_BIG_ENDIAN) {
Constant *ShAmt = ConstantInt::get(Op->getType(),
OTyBits-OpTyBits);
Op = Builder.CreateLShr(Op, ShAmt);
}
OpTy = Op->getType();
}
}
}
CallOps.push_back(Op);
CallArgTypes.push_back(OpTy);
} else { lang_hooks.mark_addressable(TREE_VALUE(Input));
isIndirect = true;
LValue Src = EmitLV(Val);
assert(!Src.isBitfield() && "Cannot read from a bitfield!");
CallOps.push_back(Src.Ptr);
CallArgTypes.push_back(Src.Ptr->getType());
}
ConstraintStr += ',';
if (isIndirect)
ConstraintStr += '*';
if (TREE_CODE(Val) == VAR_DECL && DECL_HARD_REGISTER(Val)) {
const char *RegName = extractRegisterName(Val);
int RegNum = decode_reg_name(RegName);
if (RegNum >= 0) {
RegName = LLVM_GET_REG_NAME(RegName, RegNum);
ConstraintStr += '{';
ConstraintStr += RegName;
ConstraintStr += '}';
continue;
}
}
std::string Simplified = CanonicalizeConstraint(Constraint);
ConstraintStr += Simplified;
}
assert(ASM_USES(exp)==0);
tree Clobbers = targetm.md_asm_clobbers(ASM_OUTPUTS(exp), ASM_INPUTS(exp),
ASM_CLOBBERS(exp));
for (; Clobbers; Clobbers = TREE_CHAIN(Clobbers)) {
const char *RegName = TREE_STRING_POINTER(TREE_VALUE(Clobbers));
int RegCode = decode_reg_name(RegName);
switch (RegCode) {
case -1: case -2: error("%Hunknown register name %qs in %<asm%>", &EXPR_LOCATION(exp),
RegName);
if (NumChoices>1)
FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
return 0;
case -3: ConstraintStr += ",~{cc}";
break;
case -4: ConstraintStr += ",~{memory}";
break;
default: RegName = LLVM_GET_REG_NAME(RegName, RegCode);
ConstraintStr += ",~{";
ConstraintStr += RegName;
ConstraintStr += "}";
break;
}
}
const Type *CallResultType;
switch (CallResultTypes.size()) {
case 0: CallResultType = Type::getVoidTy(Context); break;
case 1: CallResultType = CallResultTypes[0]; break;
default:
std::vector<const Type*> TmpVec(CallResultTypes.begin(),
CallResultTypes.end());
CallResultType = StructType::get(Context, TmpVec);
break;
}
const FunctionType *FTy =
FunctionType::get(CallResultType, CallArgTypes, false);
if (!ConstraintStr.empty())
ConstraintStr.erase(ConstraintStr.begin());
if (!InlineAsm::Verify(FTy, ConstraintStr)) {
error("%HInvalid or unsupported inline assembly!", &EXPR_LOCATION(exp));
if (NumChoices>1)
FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
return 0;
}
Value *Asm = InlineAsm::get(FTy, NewAsmStr, ConstraintStr,
HasSideEffects, ASM_ASM_BLOCK(exp));
CallInst *CV = Builder.CreateCall(Asm, CallOps.begin(), CallOps.end(),
CallResultTypes.empty() ? "" : "asmtmp");
CV->setDoesNotThrow();
if (StoreCallResultAddrs.size() == 1)
Builder.CreateStore(CV, StoreCallResultAddrs[0]);
else if (unsigned NumResults = StoreCallResultAddrs.size()) {
for (unsigned i = 0; i != NumResults; ++i) {
Value *ValI = Builder.CreateExtractValue(CV, i, "asmresult");
Builder.CreateStore(ValI, StoreCallResultAddrs[i]);
}
}
if (const TargetLowering *TLI = TheTarget->getTargetLowering())
TLI->ExpandInlineAsm(CV);
if (NumChoices>1)
FreeConstTupleStrings(ReplacementStrings, NumInputs+NumOutputs);
return 0;
}
Value *TreeToLLVM::BuildVector(const std::vector<Value*> &Ops) {
assert((Ops.size() & (Ops.size()-1)) == 0 &&
"Not a power-of-two sized vector!");
bool AllConstants = true;
for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
AllConstants &= isa<Constant>(Ops[i]);
if (AllConstants) {
std::vector<Constant*> CstOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
CstOps.push_back(cast<Constant>(Ops[i]));
return ConstantVector::get(CstOps);
}
Value *Result =
UndefValue::get(VectorType::get(Ops[0]->getType(), Ops.size()));
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
Result = Builder.CreateInsertElement(Result, Ops[i],
ConstantInt::get(Type::getInt32Ty(Context), i));
return Result;
}
Value *TreeToLLVM::BuildVector(Value *Elt, ...) {
std::vector<Value*> Ops;
va_list VA;
va_start(VA, Elt);
Ops.push_back(Elt);
while (Value *Arg = va_arg(VA, Value *))
Ops.push_back(Arg);
va_end(VA);
return BuildVector(Ops);
}
Value *TreeToLLVM::BuildVectorShuffle(Value *InVec1, Value *InVec2, ...) {
assert(InVec1->getType()->isVectorTy() &&
InVec1->getType() == InVec2->getType() && "Invalid shuffle!");
unsigned NumElements = cast<VectorType>(InVec1->getType())->getNumElements();
std::vector<Constant*> Idxs;
va_list VA;
va_start(VA, InVec2);
for (unsigned i = 0; i != NumElements; ++i) {
int idx = va_arg(VA, int);
if (idx == -1)
Idxs.push_back(UndefValue::get(Type::getInt32Ty(Context)));
else {
assert((unsigned)idx < 2*NumElements && "Element index out of range!");
Idxs.push_back(ConstantInt::get(Type::getInt32Ty(Context), idx));
}
}
va_end(VA);
return Builder.CreateShuffleVector(InVec1, InVec2,
ConstantVector::get(Idxs));
}
bool TreeToLLVM::EmitFrontendExpandedBuiltinCall(tree exp, tree fndecl,
const MemRef *DestLoc,
Value *&Result) {
#ifdef LLVM_TARGET_INTRINSIC_LOWER
const Type *ResultType = ConvertType(TREE_TYPE(TREE_TYPE(fndecl)));
std::vector<Value*> Operands;
for (tree Op = TREE_OPERAND(exp, 1); Op; Op = TREE_CHAIN(Op)) {
tree OpVal = TREE_VALUE(Op);
if (isAggregateTreeType(TREE_TYPE(OpVal))) {
MemRef OpLoc = CreateTempLoc(ConvertType(TREE_TYPE(OpVal)));
Emit(OpVal, &OpLoc);
Operands.push_back(Builder.CreateLoad(OpLoc.Ptr));
} else {
Operands.push_back(Emit(OpVal, NULL));
}
}
unsigned FnCode = DECL_FUNCTION_CODE(fndecl);
return LLVM_TARGET_INTRINSIC_LOWER(exp, FnCode, DestLoc, Result, ResultType,
Operands);
#endif
return false;
}
static std::vector<Constant*> TargetBuiltinCache;
void clearTargetBuiltinCache() {
TargetBuiltinCache.clear();
}
void TreeToLLVM::EmitMemoryBarrier(bool ll, bool ls, bool sl, bool ss,
bool device) {
Value* C[5];
C[0] = ConstantInt::get(Type::getInt1Ty(Context), ll);
C[1] = ConstantInt::get(Type::getInt1Ty(Context), ls);
C[2] = ConstantInt::get(Type::getInt1Ty(Context), sl);
C[3] = ConstantInt::get(Type::getInt1Ty(Context), ss);
C[4] = ConstantInt::get(Type::getInt1Ty(Context), device);
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::memory_barrier),
C, C + 5);
}
Value *
TreeToLLVM::BuildBinaryAtomicBuiltin(tree exp, Intrinsic::ID id) {
const Type *ResultTy = ConvertType(TREE_TYPE(exp));
tree arglist = TREE_OPERAND(exp, 1);
Value* C[2] = {
Emit(TREE_VALUE(arglist), 0),
Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0)
};
const Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], !TYPE_UNSIGNED(TREE_TYPE(exp)),
"cast");
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
EmitMemoryBarrier(true, true, true, true, false);
#else
EmitMemoryBarrier(true, true, true, true, true);
#endif
Value *Result =
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, id,
Ty, 2),
C, C + 2);
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
EmitMemoryBarrier(true, true, true, true, false);
#else
EmitMemoryBarrier(true, true, true, true, true);
#endif
Result = Builder.CreateIntToPtr(Result, ResultTy);
return Result;
}
Value *
TreeToLLVM::BuildCmpAndSwapAtomicBuiltin(tree exp, tree type, bool isBool) {
const Type *ResultTy = ConvertType(type);
tree arglist = TREE_OPERAND(exp, 1);
Value* C[3] = {
Emit(TREE_VALUE(arglist), 0),
Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0),
Emit(TREE_VALUE(TREE_CHAIN(TREE_CHAIN(arglist))), 0)
};
const Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], !TYPE_UNSIGNED(TREE_TYPE(exp)),
"cast");
C[2] = Builder.CreateIntCast(C[2], Ty[0], !TYPE_UNSIGNED(TREE_TYPE(exp)),
"cast");
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
EmitMemoryBarrier(true, true, true, true, false);
#else
EmitMemoryBarrier(true, true, true, true, true);
#endif
Value *Result =
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::atomic_cmp_swap,
Ty, 2),
C, C + 3);
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
EmitMemoryBarrier(true, true, true, true, false);
#else
EmitMemoryBarrier(true, true, true, true, true);
#endif
if (isBool)
Result = CastToUIntType(Builder.CreateICmpEQ(Result, C[1]),
ConvertType(boolean_type_node));
else
Result = Builder.CreateIntToPtr(Result, ResultTy);
return Result;
}
bool TreeToLLVM::EmitBuiltinCall(tree exp, tree fndecl,
const MemRef *DestLoc, Value *&Result) {
if (DECL_BUILT_IN_CLASS(fndecl) == BUILT_IN_MD) {
unsigned FnCode = DECL_FUNCTION_CODE(fndecl);
if (TargetBuiltinCache.size() <= FnCode)
TargetBuiltinCache.resize(FnCode+1);
if (TargetBuiltinCache[FnCode] == 0) {
const char *TargetPrefix = "";
#ifdef LLVM_TARGET_INTRINSIC_PREFIX
TargetPrefix = LLVM_TARGET_INTRINSIC_PREFIX;
#endif
if (EmitFrontendExpandedBuiltinCall(exp, fndecl, DestLoc, Result))
return true;
const char *BuiltinName = IDENTIFIER_POINTER(DECL_NAME(fndecl));
Intrinsic::ID IntrinsicID =
Intrinsic::getIntrinsicForGCCBuiltin(TargetPrefix, BuiltinName);
if (IntrinsicID == Intrinsic::not_intrinsic) {
error("%Hunsupported target builtin %<%s%> used", &EXPR_LOCATION(exp),
BuiltinName);
const Type *ResTy = ConvertType(TREE_TYPE(exp));
if (ResTy->isSingleValueType())
Result = UndefValue::get(ResTy);
return true;
}
TargetBuiltinCache[FnCode] =
Intrinsic::getDeclaration(TheModule, IntrinsicID);
}
Result = EmitCallOf(TargetBuiltinCache[FnCode], exp, DestLoc,
AttrListPtr());
return true;
}
enum built_in_function fcode = DECL_FUNCTION_CODE(fndecl);
switch (fcode) {
default: return false;
case BUILT_IN_VA_START:
case BUILT_IN_STDARG_START: return EmitBuiltinVAStart(exp);
case BUILT_IN_VA_END: return EmitBuiltinVAEnd(exp);
case BUILT_IN_VA_COPY: return EmitBuiltinVACopy(exp);
case BUILT_IN_CONSTANT_P: return EmitBuiltinConstantP(exp, Result);
case BUILT_IN_ALLOCA: return EmitBuiltinAlloca(exp, Result);
case BUILT_IN_EXTEND_POINTER: return EmitBuiltinExtendPointer(exp, Result);
case BUILT_IN_EXPECT: return EmitBuiltinExpect(exp, DestLoc, Result);
case BUILT_IN_MEMCPY: return EmitBuiltinMemCopy(exp, Result,
false, false);
case BUILT_IN_MEMCPY_CHK: return EmitBuiltinMemCopy(exp, Result,
false, true);
case BUILT_IN_MEMMOVE: return EmitBuiltinMemCopy(exp, Result,
true, false);
case BUILT_IN_MEMMOVE_CHK: return EmitBuiltinMemCopy(exp, Result,
true, true);
case BUILT_IN_MEMSET: return EmitBuiltinMemSet(exp, Result, false);
case BUILT_IN_MEMSET_CHK: return EmitBuiltinMemSet(exp, Result, true);
case BUILT_IN_BZERO: return EmitBuiltinBZero(exp, Result);
case BUILT_IN_PREFETCH: return EmitBuiltinPrefetch(exp);
case BUILT_IN_FRAME_ADDRESS: return EmitBuiltinReturnAddr(exp, Result,true);
case BUILT_IN_RETURN_ADDRESS: return EmitBuiltinReturnAddr(exp, Result,false);
case BUILT_IN_STACK_SAVE: return EmitBuiltinStackSave(exp, Result);
case BUILT_IN_STACK_RESTORE: return EmitBuiltinStackRestore(exp);
case BUILT_IN_EXTRACT_RETURN_ADDR:
return EmitBuiltinExtractReturnAddr(exp, Result);
case BUILT_IN_FROB_RETURN_ADDR:
return EmitBuiltinFrobReturnAddr(exp, Result);
case BUILT_IN_INIT_TRAMPOLINE:
return EmitBuiltinInitTrampoline(exp, Result);
case BUILT_IN_DWARF_CFA:
return EmitBuiltinDwarfCFA(exp, Result);
#ifdef DWARF2_UNWIND_INFO
case BUILT_IN_DWARF_SP_COLUMN:
return EmitBuiltinDwarfSPColumn(exp, Result);
case BUILT_IN_INIT_DWARF_REG_SIZES:
return EmitBuiltinInitDwarfRegSizes(exp, Result);
#endif
case BUILT_IN_EH_RETURN:
return EmitBuiltinEHReturn(exp, Result);
#ifdef EH_RETURN_DATA_REGNO
case BUILT_IN_EH_RETURN_DATA_REGNO:
return EmitBuiltinEHReturnDataRegno(exp, Result);
#endif
case BUILT_IN_UNWIND_INIT:
return EmitBuiltinUnwindInit(exp, Result);
case BUILT_IN_OBJECT_SIZE: {
tree arglist = TREE_OPERAND (exp, 1);
if (!validate_arglist(arglist, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) {
error("Invalid builtin_object_size argument types");
return false;
}
tree ObjSizeTree = TREE_VALUE (TREE_CHAIN (arglist));
STRIP_NOPS (ObjSizeTree);
if (TREE_CODE (ObjSizeTree) != INTEGER_CST
|| tree_int_cst_sgn (ObjSizeTree) < 0
|| compare_tree_int (ObjSizeTree, 3) > 0) {
error("Invalid second builtin_object_size argument");
return false;
}
tree Object = TREE_VALUE(arglist);
tree ObjTy = TREE_VALUE(TREE_CHAIN(arglist));
Value *Tmp = Emit(ObjTy, 0);
ConstantInt *CI = cast<ConstantInt>(Tmp);
uint64_t val = (CI->getZExtValue() & 0x2) >> 1;
Value *NewTy = ConstantInt::get(Tmp->getType(), val);
Value* Args[] = {
Emit(Object, 0),
NewTy
};
const Type* Ty;
Ty = ConvertType(TREE_TYPE(exp));
Args[0] = Builder.CreateBitCast(Args[0], Type::getInt8PtrTy(Context));
Args[1] = Builder.CreateIntCast(Args[1], Type::getInt1Ty(Context),
false);
Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::objectsize,
&Ty,
1),
Args, Args + 2);
return true;
}
case BUILT_IN_CLZ: case BUILT_IN_CLZL:
case BUILT_IN_CLZLL: {
Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::ctlz);
const Type *DestTy = ConvertType(TREE_TYPE(exp));
Result = Builder.CreateIntCast(Result, DestTy, !TYPE_UNSIGNED(TREE_TYPE(exp)),
"cast");
return true;
}
case BUILT_IN_CTZ: case BUILT_IN_CTZL:
case BUILT_IN_CTZLL: {
Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::cttz);
const Type *DestTy = ConvertType(TREE_TYPE(exp));
Result = Builder.CreateIntCast(Result, DestTy, !TYPE_UNSIGNED(TREE_TYPE(exp)),
"cast");
return true;
}
case BUILT_IN_PARITYLL:
case BUILT_IN_PARITYL:
case BUILT_IN_PARITY: {
Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::ctpop);
Result = Builder.CreateBinOp(Instruction::And, Result,
ConstantInt::get(Result->getType(), 1));
return true;
}
case BUILT_IN_POPCOUNT: case BUILT_IN_POPCOUNTL:
case BUILT_IN_POPCOUNTLL: {
Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::ctpop);
const Type *DestTy = ConvertType(TREE_TYPE(exp));
Result = Builder.CreateIntCast(Result, DestTy, !TYPE_UNSIGNED(TREE_TYPE(exp)),
"cast");
return true;
}
case BUILT_IN_BSWAP32:
case BUILT_IN_BSWAP64: {
Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::bswap);
const Type *DestTy = ConvertType(TREE_TYPE(exp));
Result = Builder.CreateIntCast(Result, DestTy, !TYPE_UNSIGNED(TREE_TYPE(exp)),
"cast");
return true;
}
case BUILT_IN_SQRT:
case BUILT_IN_SQRTF:
case BUILT_IN_SQRTL:
break;
case BUILT_IN_POWI:
case BUILT_IN_POWIF:
case BUILT_IN_POWIL:
Result = EmitBuiltinPOWI(exp);
return true;
case BUILT_IN_POW:
case BUILT_IN_POWF:
case BUILT_IN_POWL:
if (!flag_errno_math) {
Result = EmitBuiltinPOW(exp);
return true;
}
break;
case BUILT_IN_LOG:
case BUILT_IN_LOGF:
case BUILT_IN_LOGL:
if (!flag_errno_math) {
Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::log);
Result = CastToFPType(Result, ConvertType(TREE_TYPE(exp)));
return true;
}
break;
case BUILT_IN_LOG2:
case BUILT_IN_LOG2F:
case BUILT_IN_LOG2L:
if (!flag_errno_math) {
Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::log2);
Result = CastToFPType(Result, ConvertType(TREE_TYPE(exp)));
return true;
}
break;
case BUILT_IN_LOG10:
case BUILT_IN_LOG10F:
case BUILT_IN_LOG10L:
if (!flag_errno_math) {
Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::log10);
Result = CastToFPType(Result, ConvertType(TREE_TYPE(exp)));
return true;
}
break;
case BUILT_IN_EXP:
case BUILT_IN_EXPF:
case BUILT_IN_EXPL:
if (!flag_errno_math) {
Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::exp);
Result = CastToFPType(Result, ConvertType(TREE_TYPE(exp)));
return true;
}
break;
case BUILT_IN_EXP2:
case BUILT_IN_EXP2F:
case BUILT_IN_EXP2L:
if (!flag_errno_math) {
Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::exp2);
Result = CastToFPType(Result, ConvertType(TREE_TYPE(exp)));
return true;
}
break;
case BUILT_IN_FFS: case BUILT_IN_FFSL:
case BUILT_IN_FFSLL: { Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
EmitBuiltinUnaryOp(Amt, Result, Intrinsic::cttz);
Result = Builder.CreateAdd(Result,
ConstantInt::get(Result->getType(), 1));
Result = CastToUIntType(Result, ConvertType(TREE_TYPE(exp)));
Value *Cond =
Builder.CreateICmpEQ(Amt,
Constant::getNullValue(Amt->getType()));
Result = Builder.CreateSelect(Cond,
Constant::getNullValue(Result->getType()),
Result);
return true;
}
case BUILT_IN_FLT_ROUNDS: {
Result =
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::flt_rounds));
Result = BitCastToType(Result, ConvertType(TREE_TYPE(exp)));
return true;
}
case BUILT_IN_TRAP:
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::trap));
Builder.CreateUnreachable();
EmitBlock(BasicBlock::Create(Context, ""));
return true;
case BUILT_IN_ANNOTATION: {
location_t locus = EXPR_LOCATION (exp);
Constant *lineNo = ConstantInt::get(Type::getInt32Ty(Context), locus.line);
Constant *file = ConvertMetadataStringToGV(locus.file);
const Type *SBP= Type::getInt8PtrTy(Context);
file = Builder.getFolder().CreateBitCast(file, SBP);
tree arglist = TREE_OPERAND(exp, 1);
Value *ExprVal = Emit(TREE_VALUE(arglist), 0);
const Type *Ty = ExprVal->getType();
Value *StrVal = Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0);
SmallVector<Value *, 4> Args;
Args.push_back(ExprVal);
Args.push_back(StrVal);
Args.push_back(file);
Args.push_back(lineNo);
assert(Ty && "llvm.annotation arg type may not be null");
Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::annotation,
&Ty,
1),
Args.begin(), Args.end());
return true;
}
case BUILT_IN_SYNCHRONIZE: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
Value* C[5];
C[0] = C[1] = C[2] = C[3] = ConstantInt::get(Type::getInt1Ty(Context), 1);
C[4] = ConstantInt::get(Type::getInt1Ty(Context), 0);
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::memory_barrier),
C, C + 5);
return true;
}
#if defined(TARGET_ALPHA) || defined(TARGET_386) || defined(TARGET_POWERPC) \
|| defined(TARGET_ARM)
case BUILT_IN_BOOL_COMPARE_AND_SWAP_1: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
Result = BuildCmpAndSwapAtomicBuiltin(exp, unsigned_char_type_node, true);
return true;
}
case BUILT_IN_BOOL_COMPARE_AND_SWAP_2: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
Result = BuildCmpAndSwapAtomicBuiltin(exp, short_unsigned_type_node, true);
return true;
}
case BUILT_IN_BOOL_COMPARE_AND_SWAP_4: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
Result = BuildCmpAndSwapAtomicBuiltin(exp, unsigned_type_node, true);
return true;
}
case BUILT_IN_BOOL_COMPARE_AND_SWAP_8: {
#if defined(TARGET_ARM)
return false;
#endif
#if defined(TARGET_POWERPC)
if (!TARGET_64BIT)
return false;
#endif
Result = BuildCmpAndSwapAtomicBuiltin(exp, long_long_unsigned_type_node,
true);
return true;
}
case BUILT_IN_VAL_COMPARE_AND_SWAP_8:
#if defined(TARGET_ARM)
return false;
#endif
#if defined(TARGET_POWERPC)
if (!TARGET_64BIT)
return false;
#endif
case BUILT_IN_VAL_COMPARE_AND_SWAP_1:
case BUILT_IN_VAL_COMPARE_AND_SWAP_2:
case BUILT_IN_VAL_COMPARE_AND_SWAP_4: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
tree type = TREE_TYPE(exp);
Result = BuildCmpAndSwapAtomicBuiltin(exp, type, false);
return true;
}
case BUILT_IN_FETCH_AND_ADD_8:
#if defined(TARGET_ARM)
return false;
#endif
#if defined(TARGET_POWERPC)
if (!TARGET_64BIT)
return false;
#endif
case BUILT_IN_FETCH_AND_ADD_1:
case BUILT_IN_FETCH_AND_ADD_2:
case BUILT_IN_FETCH_AND_ADD_4: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
Result = BuildBinaryAtomicBuiltin(exp, Intrinsic::atomic_load_add);
return true;
}
case BUILT_IN_FETCH_AND_SUB_8:
#if defined(TARGET_ARM)
return false;
#endif
#if defined(TARGET_POWERPC)
if (!TARGET_64BIT)
return false;
#endif
case BUILT_IN_FETCH_AND_SUB_1:
case BUILT_IN_FETCH_AND_SUB_2:
case BUILT_IN_FETCH_AND_SUB_4: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
Result = BuildBinaryAtomicBuiltin(exp, Intrinsic::atomic_load_sub);
return true;
}
case BUILT_IN_FETCH_AND_OR_8:
#if defined(TARGET_ARM)
return false;
#endif
#if defined(TARGET_POWERPC)
if (!TARGET_64BIT)
return false;
#endif
case BUILT_IN_FETCH_AND_OR_1:
case BUILT_IN_FETCH_AND_OR_2:
case BUILT_IN_FETCH_AND_OR_4: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
Result = BuildBinaryAtomicBuiltin(exp, Intrinsic::atomic_load_or);
return true;
}
case BUILT_IN_FETCH_AND_AND_8:
#if defined(TARGET_ARM)
return false;
#endif
#if defined(TARGET_POWERPC)
if (!TARGET_64BIT)
return false;
#endif
case BUILT_IN_FETCH_AND_AND_1:
case BUILT_IN_FETCH_AND_AND_2:
case BUILT_IN_FETCH_AND_AND_4: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
Result = BuildBinaryAtomicBuiltin(exp, Intrinsic::atomic_load_and);
return true;
}
case BUILT_IN_FETCH_AND_XOR_8:
#if defined(TARGET_ARM)
return false;
#endif
#if defined(TARGET_POWERPC)
if (!TARGET_64BIT)
return false;
#endif
case BUILT_IN_FETCH_AND_XOR_1:
case BUILT_IN_FETCH_AND_XOR_2:
case BUILT_IN_FETCH_AND_XOR_4: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
Result = BuildBinaryAtomicBuiltin(exp, Intrinsic::atomic_load_xor);
return true;
}
case BUILT_IN_FETCH_AND_NAND_8:
#if defined(TARGET_ARM)
return false;
#endif
#if defined(TARGET_POWERPC)
if (!TARGET_64BIT)
return false;
#endif
case BUILT_IN_FETCH_AND_NAND_1:
case BUILT_IN_FETCH_AND_NAND_2:
case BUILT_IN_FETCH_AND_NAND_4: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
Result = BuildBinaryAtomicBuiltin(exp, Intrinsic::atomic_load_nand);
return true;
}
case BUILT_IN_LOCK_TEST_AND_SET_8:
#if defined(TARGET_ARM)
return false;
#endif
#if defined(TARGET_POWERPC)
if (!TARGET_64BIT)
return false;
#endif
case BUILT_IN_LOCK_TEST_AND_SET_1:
case BUILT_IN_LOCK_TEST_AND_SET_2:
case BUILT_IN_LOCK_TEST_AND_SET_4: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
Result = BuildBinaryAtomicBuiltin(exp, Intrinsic::atomic_swap);
return true;
}
case BUILT_IN_ADD_AND_FETCH_8:
#if defined(TARGET_ARM)
return false;
#endif
#if defined(TARGET_POWERPC)
if (!TARGET_64BIT)
return false;
#endif
case BUILT_IN_ADD_AND_FETCH_1:
case BUILT_IN_ADD_AND_FETCH_2:
case BUILT_IN_ADD_AND_FETCH_4: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
const Type *ResultTy = ConvertType(TREE_TYPE(exp));
tree arglist = TREE_OPERAND(exp, 1);
Value* C[2] = {
Emit(TREE_VALUE(arglist), 0),
Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0)
};
const Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], !TYPE_UNSIGNED(TREE_TYPE(exp)),
"cast");
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
EmitMemoryBarrier(true, true, true, true, false);
#else
EmitMemoryBarrier(true, true, true, true, true);
#endif
Result =
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::atomic_load_add,
Ty, 2),
C, C + 2);
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
EmitMemoryBarrier(true, true, true, true, false);
#else
EmitMemoryBarrier(true, true, true, true, true);
#endif
Result = Builder.CreateAdd(Result, C[1]);
Result = Builder.CreateIntToPtr(Result, ResultTy);
return true;
}
case BUILT_IN_SUB_AND_FETCH_8:
#if defined(TARGET_ARM)
return false;
#endif
#if defined(TARGET_POWERPC)
if (!TARGET_64BIT)
return false;
#endif
case BUILT_IN_SUB_AND_FETCH_1:
case BUILT_IN_SUB_AND_FETCH_2:
case BUILT_IN_SUB_AND_FETCH_4: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
const Type *ResultTy = ConvertType(TREE_TYPE(exp));
tree arglist = TREE_OPERAND(exp, 1);
Value* C[2] = {
Emit(TREE_VALUE(arglist), 0),
Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0)
};
const Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], !TYPE_UNSIGNED(TREE_TYPE(exp)),
"cast");
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
EmitMemoryBarrier(true, true, true, true, false);
#else
EmitMemoryBarrier(true, true, true, true, true);
#endif
Result =
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::atomic_load_sub,
Ty, 2),
C, C + 2);
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
EmitMemoryBarrier(true, true, true, true, false);
#else
EmitMemoryBarrier(true, true, true, true, true);
#endif
Result = Builder.CreateSub(Result, C[1]);
Result = Builder.CreateIntToPtr(Result, ResultTy);
return true;
}
case BUILT_IN_OR_AND_FETCH_8:
#if defined(TARGET_ARM)
return false;
#endif
#if defined(TARGET_POWERPC)
if (!TARGET_64BIT)
return false;
#endif
case BUILT_IN_OR_AND_FETCH_1:
case BUILT_IN_OR_AND_FETCH_2:
case BUILT_IN_OR_AND_FETCH_4: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
const Type *ResultTy = ConvertType(TREE_TYPE(exp));
tree arglist = TREE_OPERAND(exp, 1);
Value* C[2] = {
Emit(TREE_VALUE(arglist), 0),
Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0)
};
const Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], !TYPE_UNSIGNED(TREE_TYPE(exp)),
"cast");
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
EmitMemoryBarrier(true, true, true, true, false);
#else
EmitMemoryBarrier(true, true, true, true, true);
#endif
Result =
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::atomic_load_or,
Ty, 2),
C, C + 2);
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
EmitMemoryBarrier(true, true, true, true, false);
#else
EmitMemoryBarrier(true, true, true, true, true);
#endif
Result = Builder.CreateOr(Result, C[1]);
Result = Builder.CreateIntToPtr(Result, ResultTy);
return true;
}
case BUILT_IN_AND_AND_FETCH_8:
#if defined(TARGET_ARM)
return false;
#endif
#if defined(TARGET_POWERPC)
if (!TARGET_64BIT)
return false;
#endif
case BUILT_IN_AND_AND_FETCH_1:
case BUILT_IN_AND_AND_FETCH_2:
case BUILT_IN_AND_AND_FETCH_4: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
const Type *ResultTy = ConvertType(TREE_TYPE(exp));
tree arglist = TREE_OPERAND(exp, 1);
Value* C[2] = {
Emit(TREE_VALUE(arglist), 0),
Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0)
};
const Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], !TYPE_UNSIGNED(TREE_TYPE(exp)),
"cast");
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
EmitMemoryBarrier(true, true, true, true, false);
#else
EmitMemoryBarrier(true, true, true, true, true);
#endif
Result =
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::atomic_load_and,
Ty, 2),
C, C + 2);
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
EmitMemoryBarrier(true, true, true, true, false);
#else
EmitMemoryBarrier(true, true, true, true, true);
#endif
Result = Builder.CreateAnd(Result, C[1]);
Result = Builder.CreateIntToPtr(Result, ResultTy);
return true;
}
case BUILT_IN_XOR_AND_FETCH_8:
#if defined(TARGET_ARM)
return false;
#endif
#if defined(TARGET_POWERPC)
if (!TARGET_64BIT)
return false;
#endif
case BUILT_IN_XOR_AND_FETCH_1:
case BUILT_IN_XOR_AND_FETCH_2:
case BUILT_IN_XOR_AND_FETCH_4: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
const Type *ResultTy = ConvertType(TREE_TYPE(exp));
tree arglist = TREE_OPERAND(exp, 1);
Value* C[2] = {
Emit(TREE_VALUE(arglist), 0),
Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0)
};
const Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], !TYPE_UNSIGNED(TREE_TYPE(exp)),
"cast");
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
EmitMemoryBarrier(true, true, true, true, false);
#else
EmitMemoryBarrier(true, true, true, true, true);
#endif
Result =
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::atomic_load_xor,
Ty, 2),
C, C + 2);
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
EmitMemoryBarrier(true, true, true, true, false);
#else
EmitMemoryBarrier(true, true, true, true, true);
#endif
Result = Builder.CreateXor(Result, C[1]);
Result = Builder.CreateIntToPtr(Result, ResultTy);
return true;
}
case BUILT_IN_NAND_AND_FETCH_8:
#if defined(TARGET_ARM)
return false;
#endif
#if defined(TARGET_POWERPC)
if (!TARGET_64BIT)
return false;
#endif
case BUILT_IN_NAND_AND_FETCH_1:
case BUILT_IN_NAND_AND_FETCH_2:
case BUILT_IN_NAND_AND_FETCH_4: {
#if defined(TARGET_ARM)
if (TARGET_THUMB1 || !arm_arch6)
return false;
#endif
const Type *ResultTy = ConvertType(TREE_TYPE(exp));
tree arglist = TREE_OPERAND(exp, 1);
Value* C[2] = {
Emit(TREE_VALUE(arglist), 0),
Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0)
};
const Type* Ty[2];
Ty[0] = ResultTy;
Ty[1] = ResultTy->getPointerTo();
C[0] = Builder.CreateBitCast(C[0], Ty[1]);
C[1] = Builder.CreateIntCast(C[1], Ty[0], !TYPE_UNSIGNED(TREE_TYPE(exp)),
"cast");
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
EmitMemoryBarrier(true, true, true, true, false);
#else
EmitMemoryBarrier(true, true, true, true, true);
#endif
Result =
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::atomic_load_nand,
Ty, 2),
C, C + 2);
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
EmitMemoryBarrier(true, true, true, true, false);
#else
EmitMemoryBarrier(true, true, true, true, true);
#endif
Result = Builder.CreateAnd(Builder.CreateNot(Result), C[1]);
Result = Builder.CreateIntToPtr(Result, ResultTy);
return true;
}
case BUILT_IN_LOCK_RELEASE_1:
case BUILT_IN_LOCK_RELEASE_2:
case BUILT_IN_LOCK_RELEASE_4:
case BUILT_IN_LOCK_RELEASE_8:
case BUILT_IN_LOCK_RELEASE_16: {
tree type;
switch(DECL_FUNCTION_CODE(fndecl)) {
case BUILT_IN_LOCK_RELEASE_1:
type = unsigned_char_type_node; break;
case BUILT_IN_LOCK_RELEASE_2:
type = short_unsigned_type_node; break;
case BUILT_IN_LOCK_RELEASE_4:
type = unsigned_type_node; break;
case BUILT_IN_LOCK_RELEASE_8:
type = long_long_unsigned_type_node; break;
case BUILT_IN_LOCK_RELEASE_16: default:
abort();
}
tree arglist = TREE_OPERAND(exp, 1);
tree t1 = build1 (INDIRECT_REF, type, TREE_VALUE (arglist));
TREE_THIS_VOLATILE(t1) = 1;
tree t = build2 (MODIFY_EXPR, type, t1,
build_int_cst (type, (HOST_WIDE_INT)0));
EmitMODIFY_EXPR(t, 0);
Result = 0;
return true;
}
#endif //FIXME: these break the build for backends that haven't implemented them
#if 1 // FIXME: Should handle these GCC extensions eventually.
case BUILT_IN_LONGJMP: {
tree arglist = TREE_OPERAND(exp, 1);
if (validate_arglist(arglist, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE)) {
tree value = TREE_VALUE(TREE_CHAIN(arglist));
if (TREE_CODE(value) != INTEGER_CST ||
cast<ConstantInt>(Emit(value, 0))->getValue() != 1) {
error ("%<__builtin_longjmp%> second argument must be 1");
return false;
}
}
#if defined(TARGET_ARM) && defined(CONFIG_DARWIN_H)
Value *Buf = Emit(TREE_VALUE(arglist), 0);
Buf = Builder.CreateBitCast(Buf, Type::getInt8Ty(Context)->getPointerTo());
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::eh_sjlj_longjmp),
Buf);
Result = 0;
return true;
#endif
}
case BUILT_IN_APPLY_ARGS:
case BUILT_IN_APPLY:
case BUILT_IN_RETURN:
case BUILT_IN_SAVEREGS:
case BUILT_IN_ARGS_INFO:
case BUILT_IN_NEXT_ARG:
case BUILT_IN_CLASSIFY_TYPE:
case BUILT_IN_AGGREGATE_INCOMING_ADDRESS:
case BUILT_IN_SETJMP_SETUP:
case BUILT_IN_SETJMP_DISPATCHER:
case BUILT_IN_SETJMP_RECEIVER:
case BUILT_IN_UPDATE_SETJMP_BUF:
{
const Type *Ty = ConvertType(TREE_TYPE(exp));
if (Ty != Type::getVoidTy(Context))
Result = Constant::getNullValue(Ty);
return true;
}
#endif // FIXME: Should handle these GCC extensions eventually.
}
return false;
}
bool TreeToLLVM::EmitBuiltinUnaryOp(Value *InVal, Value *&Result,
Intrinsic::ID Id) {
const Type *Ty = InVal->getType();
Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Id, &Ty, 1),
InVal);
return true;
}
Value *TreeToLLVM::EmitBuiltinSQRT(tree exp) {
Value *Amt = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
const Type* Ty = Amt->getType();
return Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::sqrt, &Ty, 1),
Amt);
}
Value *TreeToLLVM::EmitBuiltinPOWI(tree exp) {
tree ArgList = TREE_OPERAND (exp, 1);
if (!validate_arglist(ArgList, REAL_TYPE, INTEGER_TYPE, VOID_TYPE))
return 0;
Value *Val = Emit(TREE_VALUE(ArgList), 0);
Value *Pow = Emit(TREE_VALUE(TREE_CHAIN(ArgList)), 0);
const Type *Ty = Val->getType();
Pow = CastToSIntType(Pow, Type::getInt32Ty(Context));
SmallVector<Value *,2> Args;
Args.push_back(Val);
Args.push_back(Pow);
return Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::powi, &Ty, 1),
Args.begin(), Args.end());
}
Value *TreeToLLVM::EmitBuiltinPOW(tree exp) {
tree ArgList = TREE_OPERAND (exp, 1);
if (!validate_arglist(ArgList, REAL_TYPE, REAL_TYPE, VOID_TYPE))
return 0;
Value *Val = Emit(TREE_VALUE(ArgList), 0);
Value *Pow = Emit(TREE_VALUE(TREE_CHAIN(ArgList)), 0);
const Type *Ty = Val->getType();
SmallVector<Value *,2> Args;
Args.push_back(Val);
Args.push_back(Pow);
return Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::pow, &Ty, 1),
Args.begin(), Args.end());
}
bool TreeToLLVM::EmitBuiltinConstantP(tree exp, Value *&Result) {
Result = Constant::getNullValue(ConvertType(TREE_TYPE(exp)));
return true;
}
bool TreeToLLVM::EmitBuiltinExtendPointer(tree exp, Value *&Result) {
tree arglist = TREE_OPERAND(exp, 1);
Value *Amt = Emit(TREE_VALUE(arglist), 0);
bool AmtIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_VALUE(arglist)));
bool ExpIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
Result = CastToAnyType(Amt, AmtIsSigned, ConvertType(TREE_TYPE(exp)),
ExpIsSigned);
return true;
}
static bool OptimizeIntoPlainBuiltIn(tree exp, Value *Len, Value *Size) {
if (BitCastInst *SizeBC = dyn_cast<BitCastInst>(Size))
Size = SizeBC->getOperand(0);
ConstantInt *SizeCI = dyn_cast<ConstantInt>(Size);
if (!SizeCI)
return false;
if (SizeCI->isAllOnesValue())
return true;
if (BitCastInst *LenBC = dyn_cast<BitCastInst>(Len))
Len = LenBC->getOperand(0);
ConstantInt *LenCI = dyn_cast<ConstantInt>(Len);
if (!LenCI)
return false;
if (SizeCI->getValue().ult(LenCI->getValue())) {
location_t locus = EXPR_LOCATION(exp);
warning (0, "%Hcall to %D will always overflow destination buffer",
&locus, get_callee_fndecl(exp));
return false;
}
return true;
}
bool TreeToLLVM::EmitBuiltinMemCopy(tree exp, Value *&Result, bool isMemMove,
bool SizeCheck) {
tree arglist = TREE_OPERAND(exp, 1);
if (SizeCheck) {
if (!validate_arglist(arglist, POINTER_TYPE, POINTER_TYPE,
INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE))
return false;
} else {
if (!validate_arglist(arglist, POINTER_TYPE, POINTER_TYPE,
INTEGER_TYPE, VOID_TYPE))
return false;
}
tree Dst = TREE_VALUE(arglist);
tree Src = TREE_VALUE(TREE_CHAIN(arglist));
unsigned SrcAlign = getPointerAlignment(Src);
unsigned DstAlign = getPointerAlignment(Dst);
Value *DstV = Emit(Dst, 0);
Value *SrcV = Emit(Src, 0);
Value *Len = Emit(TREE_VALUE(TREE_CHAIN(TREE_CHAIN(arglist))), 0);
if (SizeCheck) {
tree SizeArg = TREE_VALUE(TREE_CHAIN(TREE_CHAIN(TREE_CHAIN(arglist))));
Value *Size = Emit(SizeArg, 0);
if (!OptimizeIntoPlainBuiltIn(exp, Len, Size))
return false;
}
Result = isMemMove ?
EmitMemMove(DstV, SrcV, Len, std::min(SrcAlign, DstAlign)) :
EmitMemCpy(DstV, SrcV, Len, std::min(SrcAlign, DstAlign));
return true;
}
bool TreeToLLVM::EmitBuiltinMemSet(tree exp, Value *&Result, bool SizeCheck) {
tree arglist = TREE_OPERAND(exp, 1);
if (SizeCheck) {
if (!validate_arglist(arglist, POINTER_TYPE, INTEGER_TYPE,
INTEGER_TYPE, INTEGER_TYPE, VOID_TYPE))
return false;
} else {
if (!validate_arglist(arglist, POINTER_TYPE, INTEGER_TYPE,
INTEGER_TYPE, VOID_TYPE))
return false;
}
tree Dst = TREE_VALUE(arglist);
unsigned DstAlign = getPointerAlignment(Dst);
Value *DstV = Emit(Dst, 0);
Value *Val = Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0);
Value *Len = Emit(TREE_VALUE(TREE_CHAIN(TREE_CHAIN(arglist))), 0);
if (SizeCheck) {
tree SizeArg = TREE_VALUE(TREE_CHAIN(TREE_CHAIN(TREE_CHAIN(arglist))));
Value *Size = Emit(SizeArg, 0);
if (!OptimizeIntoPlainBuiltIn(exp, Len, Size))
return false;
}
Result = EmitMemSet(DstV, Val, Len, DstAlign);
return true;
}
bool TreeToLLVM::EmitBuiltinBZero(tree exp,
Value *&Result ATTRIBUTE_UNUSED) {
tree arglist = TREE_OPERAND(exp, 1);
if (!validate_arglist(arglist, POINTER_TYPE, INTEGER_TYPE, VOID_TYPE))
return false;
tree Dst = TREE_VALUE(arglist);
unsigned DstAlign = getPointerAlignment(Dst);
Value *DstV = Emit(Dst, 0);
Value *Val = Constant::getNullValue(Type::getInt32Ty(Context));
Value *Len = Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0);
EmitMemSet(DstV, Val, Len, DstAlign);
return true;
}
bool TreeToLLVM::EmitBuiltinPrefetch(tree exp) {
tree arglist = TREE_OPERAND(exp, 1);
if (!validate_arglist(arglist, POINTER_TYPE, 0))
return false;
Value *Ptr = Emit(TREE_VALUE(arglist), 0);
Value *ReadWrite = 0;
Value *Locality = 0;
if (TREE_CHAIN(arglist)) { ReadWrite = Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0);
if (!isa<ConstantInt>(ReadWrite)) {
error("second argument to %<__builtin_prefetch%> must be a constant");
ReadWrite = 0;
} else if (cast<ConstantInt>(ReadWrite)->getZExtValue() > 1) {
warning (0, "invalid second argument to %<__builtin_prefetch%>;"
" using zero");
ReadWrite = 0;
} else {
ReadWrite = Builder.getFolder().CreateIntCast(cast<Constant>(ReadWrite),
Type::getInt32Ty(Context), false);
}
if (TREE_CHAIN(TREE_CHAIN(arglist))) {
Locality = Emit(TREE_VALUE(TREE_CHAIN(TREE_CHAIN(arglist))), 0);
if (!isa<ConstantInt>(Locality)) {
error("third argument to %<__builtin_prefetch%> must be a constant");
Locality = 0;
} else if (cast<ConstantInt>(Locality)->getZExtValue() > 3) {
warning(0, "invalid third argument to %<__builtin_prefetch%>; using 3");
Locality = 0;
} else {
Locality = Builder.getFolder().CreateIntCast(cast<Constant>(Locality),
Type::getInt32Ty(Context), false);
}
}
}
if (ReadWrite == 0)
ReadWrite = Constant::getNullValue(Type::getInt32Ty(Context));
if (Locality == 0)
Locality = ConstantInt::get(Type::getInt32Ty(Context), 3);
Ptr = BitCastToType(Ptr, Type::getInt8PtrTy(Context));
Value *Ops[3] = { Ptr, ReadWrite, Locality };
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::prefetch),
Ops, Ops+3);
return true;
}
bool TreeToLLVM::EmitBuiltinReturnAddr(tree exp, Value *&Result, bool isFrame) {
tree arglist = TREE_OPERAND(exp, 1);
if (!validate_arglist(arglist, INTEGER_TYPE, VOID_TYPE))
return false;
ConstantInt *Level = dyn_cast<ConstantInt>(Emit(TREE_VALUE(arglist), 0));
if (!Level) {
if (isFrame)
error("invalid argument to %<__builtin_frame_address%>");
else
error("invalid argument to %<__builtin_return_address%>");
return false;
}
Intrinsic::ID IID =
!isFrame ? Intrinsic::returnaddress : Intrinsic::frameaddress;
Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule, IID), Level);
Result = BitCastToType(Result, ConvertType(TREE_TYPE(exp)));
return true;
}
bool TreeToLLVM::EmitBuiltinExtractReturnAddr(tree exp, Value *&Result) {
tree arglist = TREE_OPERAND(exp, 1);
Value *Ptr = Emit(TREE_VALUE(arglist), 0);
Result = BitCastToType(Ptr, Type::getInt8PtrTy(Context));
return true;
}
bool TreeToLLVM::EmitBuiltinFrobReturnAddr(tree exp, Value *&Result) {
tree arglist = TREE_OPERAND(exp, 1);
Value *Ptr = Emit(TREE_VALUE(arglist), 0);
Result = BitCastToType(Ptr, Type::getInt8PtrTy(Context));
return true;
}
bool TreeToLLVM::EmitBuiltinStackSave(tree exp, Value *&Result) {
tree arglist = TREE_OPERAND(exp, 1);
if (!validate_arglist(arglist, VOID_TYPE))
return false;
Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::stacksave));
return true;
}
#ifndef ARG_POINTER_CFA_OFFSET
#define ARG_POINTER_CFA_OFFSET(FNDECL) FIRST_PARM_OFFSET (FNDECL)
#endif
#ifndef DWARF_FRAME_REGNUM
#define DWARF_FRAME_REGNUM(REG) DBX_REGISTER_NUMBER (REG)
#endif
#ifndef DWARF2_FRAME_REG_OUT
#define DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) (REGNO)
#endif
#ifndef HARD_REGNO_CALL_PART_CLOBBERED
#define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) 0
#endif
bool TreeToLLVM::EmitBuiltinDwarfCFA(tree exp, Value *&Result) {
if (!validate_arglist(TREE_OPERAND(exp, 1), VOID_TYPE))
return false;
int cfa_offset = ARG_POINTER_CFA_OFFSET(exp);
Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::eh_dwarf_cfa),
ConstantInt::get(Type::getInt32Ty(Context), cfa_offset));
return true;
}
bool TreeToLLVM::EmitBuiltinDwarfSPColumn(tree exp, Value *&Result) {
if (!validate_arglist(TREE_OPERAND(exp, 1), VOID_TYPE))
return false;
unsigned int dwarf_regnum = DWARF_FRAME_REGNUM(STACK_POINTER_REGNUM);
Result = ConstantInt::get(ConvertType(TREE_TYPE(exp)), dwarf_regnum);
return true;
}
bool TreeToLLVM::EmitBuiltinEHReturnDataRegno(tree exp, Value *&Result) {
#ifdef EH_RETURN_DATA_REGNO
tree arglist = TREE_OPERAND(exp, 1);
if (!validate_arglist(arglist, INTEGER_TYPE, VOID_TYPE))
return false;
tree which = TREE_VALUE (arglist);
unsigned HOST_WIDE_INT iwhich;
if (TREE_CODE (which) != INTEGER_CST) {
error ("argument of %<__builtin_eh_return_regno%> must be constant");
return false;
}
iwhich = tree_low_cst (which, 1);
iwhich = EH_RETURN_DATA_REGNO (iwhich);
if (iwhich == INVALID_REGNUM)
return false;
iwhich = DWARF_FRAME_REGNUM (iwhich);
Result = ConstantInt::get(ConvertType(TREE_TYPE(exp)), iwhich);
#endif
return true;
}
bool TreeToLLVM::EmitBuiltinEHReturn(tree exp, Value *&Result) {
tree arglist = TREE_OPERAND(exp, 1);
if (!validate_arglist(arglist, INTEGER_TYPE, POINTER_TYPE, VOID_TYPE))
return false;
const Type *IntPtr = TD.getIntPtrType(Context);
Value *Offset = Emit(TREE_VALUE(arglist), 0);
Value *Handler = Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0);
Intrinsic::ID IID = IntPtr->isIntegerTy(32) ?
Intrinsic::eh_return_i32 : Intrinsic::eh_return_i64;
Offset = Builder.CreateIntCast(Offset, IntPtr, true);
Handler = BitCastToType(Handler, Type::getInt8PtrTy(Context));
SmallVector<Value *, 2> Args;
Args.push_back(Offset);
Args.push_back(Handler);
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, IID),
Args.begin(), Args.end());
Result = Builder.CreateUnreachable();
EmitBlock(BasicBlock::Create(Context, ""));
return true;
}
bool TreeToLLVM::EmitBuiltinInitDwarfRegSizes(tree exp,
Value *&Result ATTRIBUTE_UNUSED) {
#ifdef DWARF2_UNWIND_INFO
unsigned int i;
bool wrote_return_column = false;
static bool reg_modes_initialized = false;
tree arglist = TREE_OPERAND(exp, 1);
if (!validate_arglist(arglist, POINTER_TYPE, VOID_TYPE))
return false;
if (!reg_modes_initialized) {
init_reg_modes_once();
reg_modes_initialized = true;
}
Value *Addr = BitCastToType(Emit(TREE_VALUE(arglist), 0),
Type::getInt8PtrTy(Context));
Constant *Size, *Idx;
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) {
int rnum = DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), 1);
if (rnum < DWARF_FRAME_REGISTERS) {
enum machine_mode save_mode = reg_raw_mode[i];
HOST_WIDE_INT size;
if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
save_mode = choose_hard_reg_mode (i, 1, true);
if (DWARF_FRAME_REGNUM (i) == DWARF_FRAME_RETURN_COLUMN) {
if (save_mode == VOIDmode)
continue;
wrote_return_column = true;
}
size = GET_MODE_SIZE (save_mode);
if (rnum < 0)
continue;
Size = ConstantInt::get(Type::getInt8Ty(Context), size);
Idx = ConstantInt::get(Type::getInt32Ty(Context), rnum);
Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx), false);
}
}
if (!wrote_return_column) {
Size = ConstantInt::get(Type::getInt8Ty(Context), GET_MODE_SIZE (Pmode));
Idx = ConstantInt::get(Type::getInt32Ty(Context), DWARF_FRAME_RETURN_COLUMN);
Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx), false);
}
#ifdef DWARF_ALT_FRAME_RETURN_COLUMN
Size = ConstantInt::get(Type::getInt8Ty(Context), GET_MODE_SIZE (Pmode));
Idx = ConstantInt::get(Type::getInt32Ty(Context), DWARF_ALT_FRAME_RETURN_COLUMN);
Builder.CreateStore(Size, Builder.CreateGEP(Addr, Idx), false);
#endif
#endif
return true;
}
bool TreeToLLVM::EmitBuiltinUnwindInit(tree exp, Value *&Result) {
if (!validate_arglist(TREE_OPERAND(exp, 1), VOID_TYPE))
return false;
Result = Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::eh_unwind_init));
return true;
}
bool TreeToLLVM::EmitBuiltinStackRestore(tree exp) {
tree arglist = TREE_OPERAND(exp, 1);
if (!validate_arglist(arglist, POINTER_TYPE, VOID_TYPE))
return false;
Value *Ptr = Emit(TREE_VALUE(arglist), 0);
Ptr = BitCastToType(Ptr, Type::getInt8PtrTy(Context));
Builder.CreateCall(Intrinsic::getDeclaration(TheModule,
Intrinsic::stackrestore), Ptr);
return true;
}
bool TreeToLLVM::EmitBuiltinAlloca(tree exp, Value *&Result) {
tree arglist = TREE_OPERAND(exp, 1);
if (!validate_arglist(arglist, INTEGER_TYPE, VOID_TYPE))
return false;
Value *Amt = Emit(TREE_VALUE(arglist), 0);
Amt = CastToSIntType(Amt, Type::getInt32Ty(Context));
Result = Builder.CreateAlloca(Type::getInt8Ty(Context), Amt);
return true;
}
bool TreeToLLVM::EmitBuiltinExpect(tree exp, const MemRef *DestLoc,
Value *&Result) {
tree arglist = TREE_OPERAND(exp, 1);
if (arglist == NULL_TREE || TREE_CHAIN(arglist) == NULL_TREE)
return true;
Result = Emit(TREE_VALUE(arglist), DestLoc);
return true;
}
bool TreeToLLVM::EmitBuiltinVAStart(tree exp) {
tree arglist = TREE_OPERAND(exp, 1);
tree fntype = TREE_TYPE(current_function_decl);
if (TYPE_ARG_TYPES(fntype) == 0 ||
(TREE_VALUE(tree_last(TYPE_ARG_TYPES(fntype))) == void_type_node)) {
error("`va_start' used in function with fixed args");
return true;
}
tree chain = TREE_CHAIN(arglist);
if (fold_builtin_next_arg (chain))
return true;
Value *ArgVal = Emit(TREE_VALUE(arglist), 0);
Constant *llvm_va_start_fn = Intrinsic::getDeclaration(TheModule,
Intrinsic::vastart);
ArgVal = BitCastToType(ArgVal, Type::getInt8PtrTy(Context));
Builder.CreateCall(llvm_va_start_fn, ArgVal);
return true;
}
bool TreeToLLVM::EmitBuiltinVAEnd(tree exp) {
Value *Arg = Emit(TREE_VALUE(TREE_OPERAND(exp, 1)), 0);
Arg = BitCastToType(Arg, Type::getInt8PtrTy(Context));
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::vaend),
Arg);
return true;
}
bool TreeToLLVM::EmitBuiltinVACopy(tree exp) {
tree Arg1T = TREE_VALUE(TREE_OPERAND(exp, 1));
tree Arg2T = TREE_VALUE(TREE_CHAIN(TREE_OPERAND(exp, 1)));
Value *Arg1 = Emit(Arg1T, 0); Value *Arg2;
if (!isAggregateTreeType(va_list_type_node)) {
Value *V2 = Emit(Arg2T, 0);
Arg2 = CreateTemporary(V2->getType());
Builder.CreateStore(V2, Arg2);
} else {
Arg2 = Emit(Arg2T, 0);
}
static const Type *VPTy = Type::getInt8PtrTy(Context);
SmallVector<Value *, 2> Args;
Args.push_back(BitCastToType(Arg1, VPTy));
Args.push_back(BitCastToType(Arg2, VPTy));
Builder.CreateCall(Intrinsic::getDeclaration(TheModule, Intrinsic::vacopy),
Args.begin(), Args.end());
return true;
}
bool TreeToLLVM::EmitBuiltinInitTrampoline(tree exp, Value *&Result) {
tree arglist = TREE_OPERAND(exp, 1);
if (!validate_arglist (arglist, POINTER_TYPE, POINTER_TYPE, POINTER_TYPE,
VOID_TYPE))
return false;
static const Type *VPTy = Type::getInt8PtrTy(Context);
Value *Tramp = Emit(TREE_VALUE(arglist), 0);
Tramp = BitCastToType(Tramp, VPTy);
Value *Func = Emit(TREE_VALUE(TREE_CHAIN(arglist)), 0);
Func = BitCastToType(Func, VPTy);
Value *Chain = Emit(TREE_VALUE(TREE_CHAIN(TREE_CHAIN(arglist))), 0);
Chain = BitCastToType(Chain, VPTy);
Value *Ops[3] = { Tramp, Func, Chain };
Function *Intr = Intrinsic::getDeclaration(TheModule,
Intrinsic::init_trampoline);
Result = Builder.CreateCall(Intr, Ops, Ops+3, "tramp");
return true;
}
void TreeToLLVM::EmitLoadFromComplex(Value *&Real, Value *&Imag,
MemRef SrcComplex) {
Value *RealPtr = Builder.CreateStructGEP(SrcComplex.Ptr, 0, "real");
Real = Builder.CreateLoad(RealPtr, SrcComplex.Volatile, "real");
cast<LoadInst>(Real)->setAlignment(SrcComplex.getAlignment());
Value *ImagPtr = Builder.CreateStructGEP(SrcComplex.Ptr, 1, "imag");
Imag = Builder.CreateLoad(ImagPtr, SrcComplex.Volatile, "imag");
cast<LoadInst>(Imag)->setAlignment(
MinAlign(SrcComplex.getAlignment(), TD.getTypeAllocSize(Real->getType()))
);
}
void TreeToLLVM::EmitStoreToComplex(MemRef DestComplex, Value *Real,
Value *Imag) {
StoreInst *St;
Value *RealPtr = Builder.CreateStructGEP(DestComplex.Ptr, 0, "real");
St = Builder.CreateStore(Real, RealPtr, DestComplex.Volatile);
St->setAlignment(DestComplex.getAlignment());
Value *ImagPtr = Builder.CreateStructGEP(DestComplex.Ptr, 1, "imag");
St = Builder.CreateStore(Imag, ImagPtr, DestComplex.Volatile);
St->setAlignment(
MinAlign(DestComplex.getAlignment(), TD.getTypeAllocSize(Real->getType()))
);
}
void TreeToLLVM::EmitCOMPLEX_EXPR(tree exp, const MemRef *DestLoc) {
Value *Real = Emit(TREE_OPERAND(exp, 0), 0);
Value *Imag = Emit(TREE_OPERAND(exp, 1), 0);
EmitStoreToComplex(*DestLoc, Real, Imag);
}
void TreeToLLVM::EmitCOMPLEX_CST(tree exp, const MemRef *DestLoc) {
Value *Real = Emit(TREE_REALPART(exp), 0);
Value *Imag = Emit(TREE_IMAGPART(exp), 0);
EmitStoreToComplex(*DestLoc, Real, Imag);
}
Value *TreeToLLVM::EmitComplexBinOp(tree exp, const MemRef *DestLoc) {
const Type *ComplexTy = ConvertType(TREE_TYPE(TREE_OPERAND(exp, 0)));
MemRef LHSTmp = CreateTempLoc(ComplexTy);
MemRef RHSTmp = CreateTempLoc(ComplexTy);
Emit(TREE_OPERAND(exp, 0), &LHSTmp);
Emit(TREE_OPERAND(exp, 1), &RHSTmp);
Value *LHSr, *LHSi;
EmitLoadFromComplex(LHSr, LHSi, LHSTmp);
Value *RHSr, *RHSi;
EmitLoadFromComplex(RHSr, RHSi, RHSTmp);
Value *DSTr, *DSTi;
switch (TREE_CODE(exp)) {
default: TODO(exp);
case PLUS_EXPR: if (LHSr->getType()->isFloatingPointTy()) {
DSTr = Builder.CreateFAdd(LHSr, RHSr, "tmpr");
DSTi = Builder.CreateFAdd(LHSi, RHSi, "tmpi");
} else {
DSTr = Builder.CreateAdd(LHSr, RHSr, "tmpr");
DSTi = Builder.CreateAdd(LHSi, RHSi, "tmpi");
}
break;
case MINUS_EXPR: if (LHSr->getType()->isFloatingPointTy()) {
DSTr = Builder.CreateFSub(LHSr, RHSr, "tmpr");
DSTi = Builder.CreateFSub(LHSi, RHSi, "tmpi");
} else {
DSTr = Builder.CreateSub(LHSr, RHSr, "tmpr");
DSTi = Builder.CreateSub(LHSi, RHSi, "tmpi");
}
break;
case MULT_EXPR: { if (LHSr->getType()->isFloatingPointTy()) {
Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr); Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi); DSTr = Builder.CreateFSub(Tmp1, Tmp2);
Value *Tmp3 = Builder.CreateFMul(LHSr, RHSi); Value *Tmp4 = Builder.CreateFMul(RHSr, LHSi); DSTi = Builder.CreateFAdd(Tmp3, Tmp4); } else {
Value *Tmp1 = Builder.CreateMul(LHSr, RHSr); Value *Tmp2 = Builder.CreateMul(LHSi, RHSi); DSTr = Builder.CreateSub(Tmp1, Tmp2);
Value *Tmp3 = Builder.CreateMul(LHSr, RHSi); Value *Tmp4 = Builder.CreateMul(RHSr, LHSi); DSTi = Builder.CreateAdd(Tmp3, Tmp4); }
break;
}
case RDIV_EXPR: { assert (LHSr->getType()->isFloatingPointTy());
Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr); Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi); Value *Tmp3 = Builder.CreateFAdd(Tmp1, Tmp2);
Value *Tmp4 = Builder.CreateFMul(RHSr, RHSr); Value *Tmp5 = Builder.CreateFMul(RHSi, RHSi); Value *Tmp6 = Builder.CreateFAdd(Tmp4, Tmp5); DSTr = Builder.CreateFDiv(Tmp3, Tmp6);
Value *Tmp7 = Builder.CreateFMul(LHSi, RHSr); Value *Tmp8 = Builder.CreateFMul(LHSr, RHSi); Value *Tmp9 = Builder.CreateFSub(Tmp7, Tmp8); DSTi = Builder.CreateFDiv(Tmp9, Tmp6);
break;
}
case EQ_EXPR: if (LHSr->getType()->isFloatingPointTy()) {
DSTr = Builder.CreateFCmpOEQ(LHSr, RHSr, "tmpr");
DSTi = Builder.CreateFCmpOEQ(LHSi, RHSi, "tmpi");
} else {
DSTr = Builder.CreateICmpEQ(LHSr, RHSr, "tmpr");
DSTi = Builder.CreateICmpEQ(LHSi, RHSi, "tmpi");
}
return Builder.CreateAnd(DSTr, DSTi);
case NE_EXPR: if (LHSr->getType()->isFloatingPointTy()) {
DSTr = Builder.CreateFCmpUNE(LHSr, RHSr, "tmpr");
DSTi = Builder.CreateFCmpUNE(LHSi, RHSi, "tmpi");
} else {
DSTr = Builder.CreateICmpNE(LHSr, RHSr, "tmpr");
DSTi = Builder.CreateICmpNE(LHSi, RHSi, "tmpi");
}
return Builder.CreateOr(DSTr, DSTi);
}
EmitStoreToComplex(*DestLoc, DSTr, DSTi);
return 0;
}
static unsigned getFieldOffsetInBits(tree Field) {
assert(DECL_FIELD_BIT_OFFSET(Field) != 0 && DECL_FIELD_OFFSET(Field) != 0);
unsigned Result = TREE_INT_CST_LOW(DECL_FIELD_BIT_OFFSET(Field));
if (TREE_CODE(DECL_FIELD_OFFSET(Field)) == INTEGER_CST)
Result += TREE_INT_CST_LOW(DECL_FIELD_OFFSET(Field))*8;
return Result;
}
static unsigned getComponentRefOffsetInBits(tree exp) {
assert(TREE_CODE(exp) == COMPONENT_REF && "not a COMPONENT_REF!");
tree field = TREE_OPERAND(exp, 1);
assert(TREE_CODE(field) == FIELD_DECL && "not a FIELD_DECL!");
tree field_offset = component_ref_field_offset (exp);
assert(DECL_FIELD_BIT_OFFSET(field) && field_offset);
unsigned Result = TREE_INT_CST_LOW(DECL_FIELD_BIT_OFFSET(field));
if (TREE_CODE(field_offset) == INTEGER_CST)
Result += TREE_INT_CST_LOW(field_offset)*8;
return Result;
}
Value *TreeToLLVM::EmitFieldAnnotation(Value *FieldPtr, tree FieldDecl) {
tree AnnotateAttr = lookup_attribute("annotate", DECL_ATTRIBUTES(FieldDecl));
const Type *SBP = Type::getInt8PtrTy(Context);
Function *Fn = Intrinsic::getDeclaration(TheModule,
Intrinsic::ptr_annotation,
&SBP, 1);
Constant *LineNo = ConstantInt::get(Type::getInt32Ty(Context),
DECL_SOURCE_LINE(FieldDecl));
Constant *File = ConvertMetadataStringToGV(DECL_SOURCE_FILE(FieldDecl));
File = TheFolder->CreateBitCast(File, SBP);
while (AnnotateAttr) {
tree args = TREE_VALUE(AnnotateAttr);
for (tree a = args; a; a = TREE_CHAIN(a)) {
tree val = TREE_VALUE(a);
assert(TREE_CODE(val) == STRING_CST &&
"Annotate attribute arg should always be a string");
Constant *strGV = TreeConstantToLLVM::EmitLV_STRING_CST(val);
BitCastInst *CastFieldPtr = new BitCastInst(FieldPtr, SBP,
FieldPtr->getName());
Builder.Insert(CastFieldPtr);
Value *Ops[4] = {
CastFieldPtr, BitCastToType(strGV, SBP),
File, LineNo
};
const Type* FieldPtrType = FieldPtr->getType();
FieldPtr = Builder.CreateCall(Fn, Ops, Ops+4);
FieldPtr = BitCastToType(FieldPtr, FieldPtrType);
}
AnnotateAttr = TREE_CHAIN(AnnotateAttr);
if (AnnotateAttr)
AnnotateAttr = lookup_attribute("annotate", AnnotateAttr);
}
return FieldPtr;
}
LValue TreeToLLVM::EmitLV_ARRAY_REF(tree exp) {
tree Array = TREE_OPERAND(exp, 0);
tree ArrayTreeType = TREE_TYPE(Array);
tree Index = TREE_OPERAND(exp, 1);
tree IndexType = TREE_TYPE(Index);
tree ElementType = TREE_TYPE(ArrayTreeType);
assert((TREE_CODE (ArrayTreeType) == ARRAY_TYPE ||
TREE_CODE (ArrayTreeType) == POINTER_TYPE ||
TREE_CODE (ArrayTreeType) == REFERENCE_TYPE ||
TREE_CODE (ArrayTreeType) == BLOCK_POINTER_TYPE) &&
"Unknown ARRAY_REF!");
Value *ArrayAddr;
unsigned ArrayAlign;
if (TREE_CODE(ArrayTreeType) == ARRAY_TYPE) {
tree LowerBound = array_ref_low_bound(exp);
if (!integer_zerop(LowerBound))
Index = fold(build2(MINUS_EXPR, IndexType, Index, LowerBound));
LValue ArrayAddrLV = EmitLV(Array);
assert(!ArrayAddrLV.isBitfield() && "Arrays cannot be bitfields!");
ArrayAddr = ArrayAddrLV.Ptr;
ArrayAlign = ArrayAddrLV.getAlignment();
} else {
ArrayAddr = Emit(Array, 0);
if (TREE_CODE (ArrayTreeType) == POINTER_TYPE)
ArrayAlign = getPointerAlignment(Array);
else
ArrayAlign = 1;
}
Value *IndexVal = Emit(Index, 0);
const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
if (TYPE_UNSIGNED(IndexType)) IndexVal = CastToUIntType(IndexVal, IntPtrTy);
else
IndexVal = CastToSIntType(IndexVal, IntPtrTy);
if (isSequentialCompatible(ArrayTreeType)) {
SmallVector<Value*, 2> Idx;
if (TREE_CODE(ArrayTreeType) == ARRAY_TYPE)
Idx.push_back(ConstantInt::get(IntPtrTy, 0));
Idx.push_back(IndexVal);
Value *Ptr = flag_wrapv ?
Builder.CreateGEP(ArrayAddr, Idx.begin(), Idx.end()) :
Builder.CreateInBoundsGEP(ArrayAddr, Idx.begin(), Idx.end());
const Type *ElementTy = ConvertType(ElementType);
unsigned Alignment = MinAlign(ArrayAlign, TD.getABITypeAlignment(ElementTy));
return LValue(BitCastToType(Ptr,
ConvertType(TREE_TYPE(exp))->getPointerTo()),
Alignment);
}
ArrayAddr = BitCastToType(ArrayAddr,
Type::getInt8PtrTy(Context));
if (VOID_TYPE_P(TREE_TYPE(ArrayTreeType)))
return LValue(Builder.CreateGEP(ArrayAddr, IndexVal), 1);
Value *TypeSize = Emit(array_ref_element_size(exp), 0);
TypeSize = CastToUIntType(TypeSize, IntPtrTy);
IndexVal = Builder.CreateMul(IndexVal, TypeSize);
unsigned Alignment = 1;
if (isa<ConstantInt>(IndexVal))
Alignment = MinAlign(ArrayAlign,
cast<ConstantInt>(IndexVal)->getZExtValue());
Value *Ptr = flag_wrapv ?
Builder.CreateGEP(ArrayAddr, IndexVal) :
Builder.CreateInBoundsGEP(ArrayAddr, IndexVal);
return LValue(BitCastToType(Ptr, ConvertType(TREE_TYPE(exp))->getPointerTo()),
Alignment);
}
LValue TreeToLLVM::EmitLV_BIT_FIELD_REF(tree exp) {
LValue Ptr = EmitLV(TREE_OPERAND(exp, 0));
assert(!Ptr.isBitfield() && "BIT_FIELD_REF operands cannot be bitfields!");
unsigned BitStart = (unsigned)TREE_INT_CST_LOW(TREE_OPERAND(exp, 2));
unsigned BitSize = (unsigned)TREE_INT_CST_LOW(TREE_OPERAND(exp, 1));
const Type *ValTy = ConvertType(TREE_TYPE(exp));
unsigned ValueSizeInBits = TD.getTypeSizeInBits(ValTy);
assert(BitSize <= ValueSizeInBits &&
"ValTy isn't large enough to hold the value loaded!");
assert(ValueSizeInBits == TD.getTypeAllocSizeInBits(ValTy) &&
"FIXME: BIT_FIELD_REF logic is broken for non-round types");
if (unsigned UnitOffset = BitStart / ValueSizeInBits) {
Ptr.Ptr = BitCastToType(Ptr.Ptr, ValTy->getPointerTo());
Ptr.Ptr = Builder.CreateGEP(Ptr.Ptr,
ConstantInt::get(Type::getInt32Ty(Context), UnitOffset));
BitStart -= UnitOffset*ValueSizeInBits;
}
if (BitStart == 0 && BitSize == ValueSizeInBits) {
return LValue(BitCastToType(Ptr.Ptr, ValTy->getPointerTo()),
Ptr.getAlignment());
}
return LValue(BitCastToType(Ptr.Ptr, ValTy->getPointerTo()), 1,
BitStart, BitSize);
}
LValue TreeToLLVM::EmitLV_COMPONENT_REF(tree exp) {
LValue StructAddrLV = EmitLV(TREE_OPERAND(exp, 0));
tree FieldDecl = TREE_OPERAND(exp, 1);
unsigned LVAlign = StructAddrLV.getAlignment();
assert((TREE_CODE(DECL_CONTEXT(FieldDecl)) == RECORD_TYPE ||
TREE_CODE(DECL_CONTEXT(FieldDecl)) == UNION_TYPE ||
TREE_CODE(DECL_CONTEXT(FieldDecl)) == QUAL_UNION_TYPE));
const Type *StructTy = ConvertType(DECL_CONTEXT(FieldDecl));
assert((!StructAddrLV.isBitfield() ||
StructAddrLV.BitStart == 0) && "structs cannot be bitfields!");
StructAddrLV.Ptr = BitCastToType(StructAddrLV.Ptr,
StructTy->getPointerTo());
const Type *FieldTy = ConvertType(getDeclaredType(FieldDecl));
unsigned BitStart = getComponentRefOffsetInBits(exp);
Value *FieldPtr;
unsigned ByteOffset = 0;
tree field_offset = component_ref_field_offset (exp);
if (TREE_CODE(field_offset) == INTEGER_CST) {
unsigned int MemberIndex = GET_LLVM_FIELD_INDEX(FieldDecl);
FieldPtr = StructAddrLV.Ptr;
if (StructTy->getNumContainedTypes() != 0) {
assert(MemberIndex < StructTy->getNumContainedTypes() &&
"Field Idx out of range!");
FieldPtr = Builder.CreateStructGEP(FieldPtr, MemberIndex);
}
if (MemberIndex) {
const StructLayout *SL = TD.getStructLayout(cast<StructType>(StructTy));
ByteOffset = SL->getElementOffset(MemberIndex);
BitStart -= ByteOffset * 8;
LVAlign = MinAlign(LVAlign, ByteOffset);
}
#if 0
if (BitStart == 0 && !isBitfield(FieldDecl) && DECL_ALIGN(FieldDecl))
LVAlign = std::max(LVAlign, unsigned(DECL_ALIGN(FieldDecl)) / 8);
#endif
if (lookup_attribute("annotate", DECL_ATTRIBUTES(FieldDecl)))
FieldPtr = EmitFieldAnnotation(FieldPtr, FieldDecl);
} else {
Value *Offset = Emit(field_offset, 0);
tree field_bit_offset = objc_v2_bitfield_ivar_bitpos(exp);
if (field_bit_offset) {
BitStart = (unsigned)getINTEGER_CSTVal(field_bit_offset);
}
ByteOffset = BitStart/8;
if (ByteOffset > 0) {
Offset = Builder.CreateAdd(Offset,
ConstantInt::get(Offset->getType(), ByteOffset));
BitStart -= ByteOffset*8;
LVAlign = MinAlign(LVAlign, ByteOffset);
}
FieldTy = ConvertType(TREE_TYPE(FieldDecl));
Value *Ptr = CastToType(Instruction::PtrToInt, StructAddrLV.Ptr,
Offset->getType());
Ptr = Builder.CreateAdd(Ptr, Offset);
FieldPtr = CastToType(Instruction::IntToPtr, Ptr,
FieldTy->getPointerTo());
}
if (isBitfield(FieldDecl)) {
assert(FieldTy->isIntegerTy() && "Invalid bitfield");
assert(DECL_SIZE(FieldDecl) &&
TREE_CODE(DECL_SIZE(FieldDecl)) == INTEGER_CST &&
"Variable sized bitfield?");
unsigned BitfieldSize = TREE_INT_CST_LOW(DECL_SIZE(FieldDecl));
const Type *LLVMFieldTy =
cast<PointerType>(FieldPtr->getType())->getElementType();
tree gccContext = DECL_FIELD_CONTEXT(FieldDecl);
tree gccSize = TYPE_SIZE(gccContext);
unsigned int gccStructSize = TREE_CODE(gccSize) == INTEGER_CST ?
TREE_INT_CST_LOW(gccSize) : 1;
bool piecemeal = false;
if (ByteOffset * 8 + TD.getTypeAllocSizeInBits(FieldTy) > gccStructSize) {
unsigned int byteAlignedBitfieldSize = (BitfieldSize % 8) ?
((BitfieldSize / 8) + 1) * 8 : BitfieldSize;
FieldTy = Type::getIntNTy(Context, byteAlignedBitfieldSize);
piecemeal = true;
} else if (LLVMFieldTy->isIntegerTy() &&
LLVMFieldTy->getPrimitiveSizeInBits() >= BitStart + BitfieldSize &&
LLVMFieldTy->getPrimitiveSizeInBits() ==
TD.getTypeAllocSizeInBits(LLVMFieldTy))
FieldTy = LLVMFieldTy;
else
FieldTy = IntegerType::get(Context, TD.getTypeAllocSizeInBits(FieldTy));
assert((piecemeal || (FieldTy->getPrimitiveSizeInBits() ==
TD.getTypeAllocSizeInBits(FieldTy))) &&
"Field type not sequential!");
FieldPtr = BitCastToType(FieldPtr, FieldTy->getPointerTo());
unsigned LLVMValueBitSize = FieldTy->getPrimitiveSizeInBits();
if (BitStart >= LLVMValueBitSize) {
unsigned ByteAlignment = TD.getABITypeAlignment(FieldTy);
if (DECL_PACKED(FieldDecl))
ByteAlignment = 1;
assert(ByteAlignment*8 <= LLVMValueBitSize && "Unknown overlap case!");
unsigned NumAlignmentUnits = BitStart/(ByteAlignment*8);
assert(NumAlignmentUnits && "Not adjusting pointer?");
unsigned ByteOffset = NumAlignmentUnits*ByteAlignment;
LVAlign = MinAlign(LVAlign, ByteOffset);
Constant *Offset = ConstantInt::get(TD.getIntPtrType(Context), ByteOffset);
FieldPtr = CastToType(Instruction::PtrToInt, FieldPtr,
Offset->getType());
FieldPtr = Builder.CreateAdd(FieldPtr, Offset);
FieldPtr = CastToType(Instruction::IntToPtr, FieldPtr,
FieldTy->getPointerTo());
BitStart -= ByteOffset*8;
assert(BitStart < LLVMValueBitSize &&
BitStart+BitfieldSize < 2*LLVMValueBitSize &&
"Couldn't get bitfield into value!");
}
if (BitfieldSize != LLVMValueBitSize || BitStart != 0)
return LValue(FieldPtr, LVAlign, BitStart, BitfieldSize);
} else {
const Type *EltTy = ConvertType(TREE_TYPE(exp));
FieldPtr = BitCastToType(FieldPtr, EltTy->getPointerTo());
}
assert(BitStart == 0 &&
"It's a bitfield reference or we didn't get to the field!");
return LValue(FieldPtr, LVAlign);
}
LValue TreeToLLVM::EmitLV_DECL(tree exp) {
if (TREE_CODE(exp) == PARM_DECL || TREE_CODE(exp) == VAR_DECL ||
TREE_CODE(exp) == CONST_DECL) {
if (DECL_SIZE(exp) == 0 && COMPLETE_OR_UNBOUND_ARRAY_TYPE_P(TREE_TYPE(exp))
&& (TREE_STATIC(exp) || DECL_EXTERNAL(exp))) {
layout_decl(exp, 0);
#if 0
if (Value *Val = DECL_LLVM_IF_SET(exp)) {
SET_DECL_LLVM(exp, 0);
llvm_assemble_external(exp);
V2GV(Val)->ForwardedGlobal = V2GV(DECL_LLVM(exp));
}
#endif
}
}
assert(!isGimpleTemporary(exp) &&
"Cannot use a gimple temporary as an l-value");
Value *Decl = DECL_LLVM(exp);
if (Decl == 0) {
if (errorcount || sorrycount) {
const Type *Ty = ConvertType(TREE_TYPE(exp));
const PointerType *PTy = Ty->getPointerTo();
LValue LV(ConstantPointerNull::get(PTy), 1);
return LV;
}
assert(0 && "INTERNAL ERROR: Referencing decl that hasn't been laid out");
abort();
}
if (!TREE_USED(exp)) {
assemble_external(exp);
TREE_USED(exp) = 1;
Decl = DECL_LLVM(exp);
}
if (GlobalValue *GV = dyn_cast<GlobalValue>(Decl)) {
if (TREE_CODE(exp) == CONST_DECL || TREE_CODE(exp) == VAR_DECL) {
if ((DECL_INITIAL(exp) || !TREE_PUBLIC(exp)) && !DECL_EXTERNAL(exp) &&
GV->isDeclaration() &&
!BOGUS_CTOR(exp)) {
emit_global_to_llvm(exp);
Decl = DECL_LLVM(exp); }
} else {
mark_decl_referenced(exp);
if (tree ID = DECL_ASSEMBLER_NAME(exp))
mark_referenced(ID);
}
}
const Type *Ty = ConvertType(TREE_TYPE(exp));
if (Ty->isVoidTy()) Ty = StructType::get(Context);
const PointerType *PTy = Ty->getPointerTo();
unsigned Alignment = Ty->isSized() ? TD.getABITypeAlignment(Ty) : 1;
if (DECL_ALIGN(exp)) {
if (DECL_USER_ALIGN(exp) || 8 * Alignment < (unsigned)DECL_ALIGN(exp))
Alignment = DECL_ALIGN(exp) / 8;
}
if (Argument *Arg = dyn_cast<Argument>(Decl)) {
unsigned pa = Arg->getParent()->getParamAlignment(Arg->getArgNo()+1);
if (pa && pa < Alignment)
Alignment = pa;
}
return LValue(BitCastToType(Decl, PTy), Alignment);
}
LValue TreeToLLVM::EmitLV_EXC_PTR_EXPR(tree exp) {
CreateExceptionValues();
unsigned Alignment = TD.getABITypeAlignment(cast<PointerType>(ExceptionValue->
getType())->getElementType());
return LValue(BitCastToType(ExceptionValue,
ConvertType(TREE_TYPE(exp))->getPointerTo()),
Alignment);
}
LValue TreeToLLVM::EmitLV_FILTER_EXPR(tree exp) {
CreateExceptionValues();
unsigned Alignment =
TD.getABITypeAlignment(cast<PointerType>(ExceptionSelectorValue->
getType())->getElementType());
return LValue(ExceptionSelectorValue, Alignment);
}
LValue TreeToLLVM::EmitLV_INDIRECT_REF(tree exp) {
LValue LV = LValue(Emit(TREE_OPERAND(exp, 0), 0), expr_align(exp) / 8);
LV.Ptr = BitCastToType(LV.Ptr, ConvertType(TREE_TYPE(exp))->getPointerTo());
return LV;
}
LValue TreeToLLVM::EmitLV_VIEW_CONVERT_EXPR(tree exp) {
tree Op = TREE_OPERAND(exp, 0);
if (isAggregateTreeType(TREE_TYPE(Op))) {
LValue LV = EmitLV(Op);
LV.Ptr = BitCastToType(LV.Ptr, ConvertType(TREE_TYPE(exp))->getPointerTo());
return LV;
} else {
Value *Dest = CreateTemporary(ConvertType(TREE_TYPE(Op)));
Builder.CreateStore(Emit(Op, 0), Dest);
Dest = BitCastToType(Dest, ConvertType(TREE_TYPE(exp))->getPointerTo());
return LValue(Dest, 1);
}
}
LValue TreeToLLVM::EmitLV_WITH_SIZE_EXPR(tree exp) {
return EmitLV(TREE_OPERAND(exp, 0));
}
LValue TreeToLLVM::EmitLV_XXXXPART_EXPR(tree exp, unsigned Idx) {
LValue Ptr = EmitLV(TREE_OPERAND(exp, 0));
assert(!Ptr.isBitfield() &&
"REALPART_EXPR / IMAGPART_EXPR operands cannot be bitfields!");
unsigned Alignment;
if (Idx == 0)
Alignment = Ptr.getAlignment();
else
Alignment = MinAlign(Ptr.getAlignment(),
TD.getTypeAllocSize(Ptr.Ptr->getType()));
return LValue(Builder.CreateStructGEP(Ptr.Ptr, Idx), Alignment);
}
Constant *TreeToLLVM::EmitLV_LABEL_DECL(tree exp) {
return BlockAddress::get(Fn, getLabelDeclBlock(exp));
}
Value *TreeToLLVM::EmitCONSTRUCTOR(tree exp, const MemRef *DestLoc) {
tree type = TREE_TYPE(exp);
const Type *Ty = ConvertType(type);
if (const VectorType *PTy = dyn_cast<VectorType>(Ty)) {
assert(DestLoc == 0 && "Dest location for packed value?");
std::vector<Value *> BuildVecOps;
Constant *Zero = Constant::getNullValue(PTy->getElementType());
BuildVecOps.resize(cast<VectorType>(Ty)->getNumElements(), Zero);
unsigned HOST_WIDE_INT ix;
tree purpose, value;
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), ix, purpose, value) {
if (!purpose) continue;
unsigned FieldNo = TREE_INT_CST_LOW(purpose);
if (FieldNo < BuildVecOps.size())
BuildVecOps[FieldNo] = Emit(value, 0);
}
return BuildVector(BuildVecOps);
}
assert(!Ty->isSingleValueType() && "Constructor for scalar type??");
EmitAggregateZero(*DestLoc, type);
VEC(constructor_elt, gc) *elt = CONSTRUCTOR_ELTS(exp);
switch (TREE_CODE(TREE_TYPE(exp))) {
case ARRAY_TYPE:
case RECORD_TYPE:
default:
if (elt && VEC_length(constructor_elt, elt)) {
TODO(exp);
}
return 0;
case QUAL_UNION_TYPE:
case UNION_TYPE:
if (!elt || VEC_empty(constructor_elt, elt)) return 0; assert(VEC_length(constructor_elt, elt) == 1
&& "Union CONSTRUCTOR should have one element!");
tree tree_purpose = VEC_index(constructor_elt, elt, 0)->index;
tree tree_value = VEC_index(constructor_elt, elt, 0)->value;
if (!tree_purpose)
return 0;
if (!ConvertType(TREE_TYPE(tree_purpose))->isSingleValueType()) {
Value *V = Emit(tree_value, DestLoc);
assert(V == 0 && "Aggregate value returned in a register?");
} else {
Value *V = Emit(tree_value, 0);
Value *Ptr = BitCastToType(DestLoc->Ptr, V->getType()->getPointerTo());
StoreInst *St = Builder.CreateStore(V, Ptr, DestLoc->Volatile);
St->setAlignment(DestLoc->getAlignment());
}
break;
}
return 0;
}
Constant *TreeConstantToLLVM::Convert(tree exp) {
exp = lang_hooks.expand_constant (exp);
assert((TREE_CONSTANT(exp) || TREE_CODE(exp) == STRING_CST) &&
"Isn't a constant!");
switch (TREE_CODE(exp)) {
case FDESC_EXPR: default:
debug_tree(exp);
assert(0 && "Unknown constant to convert!");
abort();
case INTEGER_CST: return ConvertINTEGER_CST(exp);
case REAL_CST: return ConvertREAL_CST(exp);
case VECTOR_CST: return ConvertVECTOR_CST(exp);
case STRING_CST: return ConvertSTRING_CST(exp);
case COMPLEX_CST: return ConvertCOMPLEX_CST(exp);
case NOP_EXPR: return ConvertNOP_EXPR(exp);
case CONVERT_EXPR: return ConvertCONVERT_EXPR(exp);
case PLUS_EXPR:
case MINUS_EXPR: return ConvertBinOp_CST(exp);
case CONSTRUCTOR: return ConvertCONSTRUCTOR(exp);
case VIEW_CONVERT_EXPR: return Convert(TREE_OPERAND(exp, 0));
case ADDR_EXPR:
return TheFolder->CreateBitCast(EmitLV(TREE_OPERAND(exp, 0)),
ConvertType(TREE_TYPE(exp)));
}
}
Constant *TreeConstantToLLVM::ConvertINTEGER_CST(tree exp) {
const Type *Ty = ConvertType(TREE_TYPE(exp));
if (const IntegerType *IT = dyn_cast<IntegerType>(Ty)) {
if (IT->getBitWidth() == 128) {
assert(HOST_BITS_PER_WIDE_INT == 64 &&
"i128 only supported on 64-bit system");
uint64_t Bits[] = { TREE_INT_CST_LOW(exp), TREE_INT_CST_HIGH(exp) };
return ConstantInt::get(Context, APInt(128, 2, Bits));
}
}
uint64_t IntValue = getINTEGER_CSTVal(exp);
ConstantInt *C = ConstantInt::get(Type::getInt64Ty(Context), IntValue);
Instruction::CastOps opcode = CastInst::getCastOpcode(C, false, Ty,
!TYPE_UNSIGNED(TREE_TYPE(exp)));
return TheFolder->CreateCast(opcode, C, Ty);
}
Constant *TreeConstantToLLVM::ConvertREAL_CST(tree exp) {
const Type *Ty = ConvertType(TREE_TYPE(exp));
assert(Ty->isFloatingPointTy() && "Integer REAL_CST?");
long RealArr[2];
union {
int UArr[2];
double V;
};
if (Ty->isFloatTy() || Ty->isDoubleTy()) {
REAL_VALUE_TO_TARGET_DOUBLE(TREE_REAL_CST(exp), RealArr);
UArr[0] = RealArr[0]; UArr[1] = RealArr[1];
if (llvm::sys::isBigEndianHost() != FLOAT_WORDS_BIG_ENDIAN)
std::swap(UArr[0], UArr[1]);
return
ConstantFP::get(Context, Ty->isFloatTy() ?
APFloat((float)V) : APFloat(V));
} else if (Ty->isX86_FP80Ty()) {
long RealArr[4];
uint64_t UArr[2];
REAL_VALUE_TO_TARGET_LONG_DOUBLE(TREE_REAL_CST(exp), RealArr);
UArr[0] = ((uint64_t)((uint32_t)RealArr[0])) |
((uint64_t)((uint32_t)RealArr[1]) << 32);
UArr[1] = (uint16_t)RealArr[2];
return ConstantFP::get(Context, APFloat(APInt(80, 2, UArr)));
} else if (Ty->isPPC_FP128Ty() ||
Ty->isFP128Ty()) {
long RealArr[4];
uint64_t UArr[2];
REAL_VALUE_TO_TARGET_LONG_DOUBLE(TREE_REAL_CST(exp), RealArr);
UArr[0] = ((uint64_t)((uint32_t)RealArr[0]) << 32) |
((uint64_t)((uint32_t)RealArr[1]));
UArr[1] = ((uint64_t)((uint32_t)RealArr[2]) << 32) |
((uint64_t)((uint32_t)RealArr[3]));
return ConstantFP::get(Context,
APFloat(APInt(128, 2, UArr),
Ty->isFP128Ty()));
}
assert(0 && "Floating point type not handled yet");
return 0; }
Constant *TreeConstantToLLVM::ConvertVECTOR_CST(tree exp) {
if (!TREE_VECTOR_CST_ELTS(exp))
return Constant::getNullValue(ConvertType(TREE_TYPE(exp)));
std::vector<Constant*> Elts;
for (tree elt = TREE_VECTOR_CST_ELTS(exp); elt; elt = TREE_CHAIN(elt))
Elts.push_back(Convert(TREE_VALUE(elt)));
if (Elts.size() < TYPE_VECTOR_SUBPARTS(TREE_TYPE(exp))) {
tree EltType = TREE_TYPE(TREE_TYPE(exp));
Constant *Zero = Constant::getNullValue(ConvertType(EltType));
while (Elts.size() < TYPE_VECTOR_SUBPARTS(TREE_TYPE(exp)))
Elts.push_back(Zero);
}
return ConstantVector::get(Elts);
}
Constant *TreeConstantToLLVM::ConvertSTRING_CST(tree exp) {
const ArrayType *StrTy = cast<ArrayType>(ConvertType(TREE_TYPE(exp)));
const Type *ElTy = StrTy->getElementType();
unsigned Len = (unsigned)TREE_STRING_LENGTH(exp);
std::vector<Constant*> Elts;
if (ElTy->isIntegerTy(8)) {
const unsigned char *InStr =(const unsigned char *)TREE_STRING_POINTER(exp);
for (unsigned i = 0; i != Len; ++i)
Elts.push_back(ConstantInt::get(Type::getInt8Ty(Context), InStr[i]));
} else if (ElTy->isIntegerTy(16)) {
assert((Len&1) == 0 &&
"Length in bytes should be a multiple of element size");
const uint16_t *InStr =
(const unsigned short *)TREE_STRING_POINTER(exp);
for (unsigned i = 0; i != Len/2; ++i) {
if (llvm::sys::isBigEndianHost() == BYTES_BIG_ENDIAN)
Elts.push_back(ConstantInt::get(Type::getInt16Ty(Context), InStr[i]));
else
Elts.push_back(ConstantInt::get(Type::getInt16Ty(Context), ByteSwap_16(InStr[i])));
}
} else if (ElTy->isIntegerTy(32)) {
assert((Len&3) == 0 &&
"Length in bytes should be a multiple of element size");
const uint32_t *InStr = (const uint32_t *)TREE_STRING_POINTER(exp);
for (unsigned i = 0; i != Len/4; ++i) {
if (llvm::sys::isBigEndianHost() == BYTES_BIG_ENDIAN)
Elts.push_back(ConstantInt::get(Type::getInt32Ty(Context), InStr[i]));
else
Elts.push_back(ConstantInt::get(Type::getInt32Ty(Context), ByteSwap_32(InStr[i])));
}
} else {
assert(0 && "Unknown character type!");
}
unsigned LenInElts = Len /
TREE_INT_CST_LOW(TYPE_SIZE_UNIT(TREE_TYPE(TREE_TYPE(exp))));
unsigned ConstantSize = StrTy->getNumElements();
if (LenInElts != ConstantSize) {
if (ConstantSize == 0) {
tree Domain = TYPE_DOMAIN(TREE_TYPE(exp));
if (!Domain || !TYPE_MAX_VALUE(Domain)) {
ConstantSize = LenInElts;
StrTy = ArrayType::get(ElTy, LenInElts);
}
}
if (ConstantSize < LenInElts) {
Elts.resize(ConstantSize);
} else {
Constant *C = Constant::getNullValue(ElTy);
for (; LenInElts != ConstantSize; ++LenInElts)
Elts.push_back(C);
}
}
return ConstantArray::get(StrTy, Elts);
}
Constant *TreeConstantToLLVM::ConvertCOMPLEX_CST(tree exp) {
std::vector<Constant*> Elts;
Elts.push_back(Convert(TREE_REALPART(exp)));
Elts.push_back(Convert(TREE_IMAGPART(exp)));
return ConstantStruct::get(Context, Elts, false);
}
Constant *TreeConstantToLLVM::ConvertNOP_EXPR(tree exp) {
Constant *Elt = Convert(TREE_OPERAND(exp, 0));
const Type *Ty = ConvertType(TREE_TYPE(exp));
bool EltIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
if (!Elt->getType()->isSingleValueType() || !Ty->isSingleValueType())
return Elt;
Instruction::CastOps opcode = CastInst::getCastOpcode(Elt, EltIsSigned,
Ty, TyIsSigned);
return TheFolder->CreateCast(opcode, Elt, Ty);
}
Constant *TreeConstantToLLVM::ConvertCONVERT_EXPR(tree exp) {
Constant *Elt = Convert(TREE_OPERAND(exp, 0));
bool EltIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp, 0)));
const Type *Ty = ConvertType(TREE_TYPE(exp));
bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
Instruction::CastOps opcode = CastInst::getCastOpcode(Elt, EltIsSigned, Ty,
TyIsSigned);
return TheFolder->CreateCast(opcode, Elt, Ty);
}
Constant *TreeConstantToLLVM::ConvertBinOp_CST(tree exp) {
Constant *LHS = Convert(TREE_OPERAND(exp, 0));
bool LHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp,0)));
Constant *RHS = Convert(TREE_OPERAND(exp, 1));
bool RHSIsSigned = !TYPE_UNSIGNED(TREE_TYPE(TREE_OPERAND(exp,1)));
Instruction::CastOps opcode;
if (LHS->getType()->isPointerTy()) {
const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
opcode = CastInst::getCastOpcode(LHS, LHSIsSigned, IntPtrTy, false);
LHS = TheFolder->CreateCast(opcode, LHS, IntPtrTy);
opcode = CastInst::getCastOpcode(RHS, RHSIsSigned, IntPtrTy, false);
RHS = TheFolder->CreateCast(opcode, RHS, IntPtrTy);
}
Constant *Result;
switch (TREE_CODE(exp)) {
default: assert(0 && "Unexpected case!");
case PLUS_EXPR: Result = TheFolder->CreateAdd(LHS, RHS); break;
case MINUS_EXPR: Result = TheFolder->CreateSub(LHS, RHS); break;
}
const Type *Ty = ConvertType(TREE_TYPE(exp));
bool TyIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
opcode = CastInst::getCastOpcode(Result, LHSIsSigned, Ty, TyIsSigned);
return TheFolder->CreateCast(opcode, Result, Ty);
}
Constant *TreeConstantToLLVM::ConvertCONSTRUCTOR(tree exp) {
if (CONSTRUCTOR_ELTS(exp) == 0 ||
VEC_length(constructor_elt, CONSTRUCTOR_ELTS(exp)) == 0) return Constant::getNullValue(ConvertType(TREE_TYPE(exp)));
switch (TREE_CODE(TREE_TYPE(exp))) {
default:
debug_tree(exp);
assert(0 && "Unknown ctor!");
case VECTOR_TYPE:
case ARRAY_TYPE: return ConvertArrayCONSTRUCTOR(exp);
case RECORD_TYPE: return ConvertRecordCONSTRUCTOR(exp);
case QUAL_UNION_TYPE:
case UNION_TYPE: return ConvertUnionCONSTRUCTOR(exp);
}
}
Constant *TreeConstantToLLVM::ConvertArrayCONSTRUCTOR(tree exp) {
tree InitType = TREE_TYPE(exp);
tree min_element = size_zero_node;
std::vector<Constant*> ResultElts;
if (TREE_CODE(InitType) == VECTOR_TYPE) {
ResultElts.resize(TYPE_VECTOR_SUBPARTS(InitType));
} else {
assert(TREE_CODE(InitType) == ARRAY_TYPE && "Unknown type for init");
tree Domain = TYPE_DOMAIN(InitType);
if (Domain && TYPE_MIN_VALUE(Domain))
min_element = fold_convert(sizetype, TYPE_MIN_VALUE(Domain));
if (Domain && TYPE_MAX_VALUE(Domain)) {
tree max_element = fold_convert(sizetype, TYPE_MAX_VALUE(Domain));
tree size = size_binop (MINUS_EXPR, max_element, min_element);
size = size_binop (PLUS_EXPR, size, size_one_node);
if (host_integerp(size, 1))
ResultElts.resize(tree_low_cst(size, 1));
}
}
unsigned NextFieldToFill = 0;
unsigned HOST_WIDE_INT ix;
tree elt_index, elt_value;
Constant *SomeVal = 0;
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), ix, elt_index, elt_value) {
Constant *Val = Convert(elt_value);
SomeVal = Val;
tree index = elt_index;
unsigned FieldOffset, FieldLastOffset;
if (index && TREE_CODE(index) == RANGE_EXPR) {
tree first = fold_convert (sizetype, TREE_OPERAND(index, 0));
tree last = fold_convert (sizetype, TREE_OPERAND(index, 1));
first = size_binop (MINUS_EXPR, first, min_element);
last = size_binop (MINUS_EXPR, last, min_element);
assert(host_integerp(first, 1) && host_integerp(last, 1) &&
"Unknown range_expr!");
FieldOffset = tree_low_cst(first, 1);
FieldLastOffset = tree_low_cst(last, 1);
} else if (index) {
index = size_binop (MINUS_EXPR, fold_convert (sizetype, index),
min_element);
assert(host_integerp(index, 1));
FieldOffset = tree_low_cst(index, 1);
FieldLastOffset = FieldOffset;
} else {
FieldOffset = NextFieldToFill;
FieldLastOffset = FieldOffset;
}
for (--FieldOffset; FieldOffset != FieldLastOffset; ) {
++FieldOffset;
if (FieldOffset == ResultElts.size())
ResultElts.push_back(Val);
else {
if (FieldOffset >= ResultElts.size())
ResultElts.resize(FieldOffset+1);
ResultElts[FieldOffset] = Val;
}
NextFieldToFill = FieldOffset+1;
}
}
if (ResultElts.empty())
return ConstantArray::get(
cast<ArrayType>(ConvertType(TREE_TYPE(exp))), ResultElts);
assert(SomeVal && "If we had some initializer, we should have some value!");
const Type *ElTy = SomeVal->getType();
Constant *Filler = Constant::getNullValue(ElTy);
bool AllEltsSameType = true;
for (unsigned i = 0, e = ResultElts.size(); i != e; ++i) {
if (ResultElts[i] == 0)
ResultElts[i] = Filler;
else if (ResultElts[i]->getType() != ElTy)
AllEltsSameType = false;
}
if (TREE_CODE(InitType) == VECTOR_TYPE) {
assert(AllEltsSameType && "Vector of heterogeneous element types?");
return ConstantVector::get(ResultElts);
}
if (AllEltsSameType)
return ConstantArray::get(
ArrayType::get(ElTy, ResultElts.size()), ResultElts);
return ConstantStruct::get(Context, ResultElts, false);
}
namespace {
struct ConstantLayoutInfo {
const TargetData &TD;
std::vector<Constant*> ResultElts;
bool StructIsPacked;
uint64_t NextFieldByteStart;
unsigned MaxLLVMFieldAlignment;
ConstantLayoutInfo(const TargetData &TD) : TD(TD) {
StructIsPacked = false;
NextFieldByteStart = 0;
MaxLLVMFieldAlignment = 1;
}
void ConvertToPacked();
void AddFieldToRecordConstant(Constant *Val, uint64_t GCCFieldOffsetInBits);
void AddBitFieldToRecordConstant(ConstantInt *Val,
uint64_t GCCFieldOffsetInBits);
void HandleTailPadding(uint64_t GCCStructBitSize);
};
}
void ConstantLayoutInfo::ConvertToPacked() {
assert(!StructIsPacked && "Struct is already packed");
uint64_t EltOffs = 0;
for (unsigned i = 0, e = ResultElts.size(); i != e; ++i) {
Constant *Val = ResultElts[i];
unsigned ValAlign = TD.getABITypeAlignment(Val->getType());
uint64_t AlignedEltOffs = TargetData::RoundUpAlignment(EltOffs, ValAlign);
if (AlignedEltOffs == EltOffs) {
EltOffs += TD.getTypeAllocSize(Val->getType());
continue;
}
const Type *PadTy = Type::getInt8Ty(Context);
if (AlignedEltOffs-EltOffs != 1)
PadTy = ArrayType::get(PadTy, AlignedEltOffs-EltOffs);
ResultElts.insert(ResultElts.begin()+i,
Constant::getNullValue(PadTy));
EltOffs = AlignedEltOffs;
++e; }
MaxLLVMFieldAlignment = 1;
StructIsPacked = true;
}
void ConstantLayoutInfo::
AddFieldToRecordConstant(Constant *Val, uint64_t GCCFieldOffsetInBits) {
assert(NextFieldByteStart*8 <= GCCFieldOffsetInBits &&
"Overlapping LLVM fields!");
unsigned ValLLVMAlign = 1;
if (!StructIsPacked) { ValLLVMAlign = TD.getABITypeAlignment(Val->getType());
MaxLLVMFieldAlignment = std::max(MaxLLVMFieldAlignment, ValLLVMAlign);
}
uint64_t LLVMNaturalByteOffset
= TargetData::RoundUpAlignment(NextFieldByteStart, ValLLVMAlign);
if (LLVMNaturalByteOffset*8 > GCCFieldOffsetInBits) {
ConvertToPacked();
assert(NextFieldByteStart*8 <= GCCFieldOffsetInBits &&
"Packing didn't fix the problem!");
return AddFieldToRecordConstant(Val, GCCFieldOffsetInBits);
}
if (LLVMNaturalByteOffset*8 < GCCFieldOffsetInBits) {
const Type *FillTy = Type::getInt8Ty(Context);
if (GCCFieldOffsetInBits/8-NextFieldByteStart != 1)
FillTy = ArrayType::get(FillTy,
GCCFieldOffsetInBits/8-NextFieldByteStart);
ResultElts.push_back(Constant::getNullValue(FillTy));
NextFieldByteStart = GCCFieldOffsetInBits/8;
return AddFieldToRecordConstant(Val, GCCFieldOffsetInBits);
}
assert(LLVMNaturalByteOffset*8 == GCCFieldOffsetInBits);
ResultElts.push_back(Val);
NextFieldByteStart = LLVMNaturalByteOffset;
NextFieldByteStart += TD.getTypeAllocSize(Val->getType());
}
void ConstantLayoutInfo::
AddBitFieldToRecordConstant(ConstantInt *ValC, uint64_t GCCFieldOffsetInBits) {
while (GCCFieldOffsetInBits > NextFieldByteStart*8) {
ResultElts.push_back(ConstantInt::get(Type::getInt8Ty(Context), 0));
++NextFieldByteStart;
}
if (GCCFieldOffsetInBits < NextFieldByteStart*8) {
unsigned ValBitSize = ValC->getBitWidth();
assert(!ResultElts.empty() && "Bitfield starts before first element?");
assert(ResultElts.back()->getType()->isIntegerTy(8) &&
isa<ConstantInt>(ResultElts.back()) &&
"Merging bitfield with non-bitfield value?");
assert(NextFieldByteStart*8 - GCCFieldOffsetInBits < 8 &&
"Bitfield overlaps backwards more than one field?");
unsigned BitsInPreviousField =
unsigned(NextFieldByteStart*8 - GCCFieldOffsetInBits);
assert(BitsInPreviousField != 0 && "Previous field should not be null!");
APInt ValForPrevField(ValC->getValue());
if (BitsInPreviousField >= ValBitSize) {
ValC = 0;
} else if (!BYTES_BIG_ENDIAN) {
ValForPrevField.trunc(BitsInPreviousField);
APInt Tmp = ValC->getValue();
Tmp = Tmp.lshr(BitsInPreviousField);
Tmp = Tmp.trunc(ValBitSize-BitsInPreviousField);
ValC = ConstantInt::get(Context, Tmp);
} else {
ValForPrevField = ValForPrevField.lshr(ValBitSize-BitsInPreviousField);
ValForPrevField.trunc(BitsInPreviousField);
APInt Tmp = ValC->getValue();
Tmp = Tmp.trunc(ValBitSize-BitsInPreviousField);
ValC = ConstantInt::get(Context, Tmp);
}
ValForPrevField.zext(8);
if (!BYTES_BIG_ENDIAN) {
ValForPrevField = ValForPrevField.shl(8-BitsInPreviousField);
} else {
if (BitsInPreviousField > ValBitSize)
ValForPrevField = ValForPrevField.shl(BitsInPreviousField-ValBitSize);
}
const APInt &LastElt = cast<ConstantInt>(ResultElts.back())->getValue();
ResultElts.back() = ConstantInt::get(Context, ValForPrevField | LastElt);
if (ValC == 0) return;
GCCFieldOffsetInBits = NextFieldByteStart*8;
}
APInt Val = ValC->getValue();
while (1) {
ConstantInt *ValToAppend;
if (Val.getBitWidth() > 8) {
if (!BYTES_BIG_ENDIAN) {
APInt Tmp = Val;
Tmp.trunc(8);
ValToAppend = ConstantInt::get(Context, Tmp);
Val = Val.lshr(8);
} else {
APInt Tmp = Val;
Tmp = Tmp.lshr(Tmp.getBitWidth()-8);
Tmp.trunc(8);
ValToAppend = ConstantInt::get(Context, Tmp);
}
} else if (Val.getBitWidth() == 8) {
ValToAppend = ConstantInt::get(Context, Val);
} else {
APInt Tmp = Val;
Tmp.zext(8);
if (BYTES_BIG_ENDIAN)
Tmp = Tmp << 8-Val.getBitWidth();
ValToAppend = ConstantInt::get(Context, Tmp);
}
ResultElts.push_back(ValToAppend);
++NextFieldByteStart;
if (Val.getBitWidth() <= 8)
break;
Val.trunc(Val.getBitWidth()-8);
}
}
void ConstantLayoutInfo::HandleTailPadding(uint64_t GCCStructBitSize) {
uint64_t GCCStructSize = (GCCStructBitSize+7)/8;
uint64_t LLVMNaturalSize =
TargetData::RoundUpAlignment(NextFieldByteStart, MaxLLVMFieldAlignment);
if (NextFieldByteStart <= GCCStructSize && LLVMNaturalSize > GCCStructSize) { assert(!StructIsPacked && "LLVM Struct type overflow!");
ConvertToPacked();
LLVMNaturalSize = NextFieldByteStart;
assert(LLVMNaturalSize <= GCCStructSize &&
"Oversized should be handled by packing");
}
if (LLVMNaturalSize < GCCStructSize) {
const Type *FillTy = Type::getInt8Ty(Context);
if (GCCStructSize - NextFieldByteStart != 1)
FillTy = ArrayType::get(FillTy, GCCStructSize - NextFieldByteStart);
ResultElts.push_back(Constant::getNullValue(FillTy));
NextFieldByteStart = GCCStructSize;
LLVMNaturalSize =
TargetData::RoundUpAlignment(NextFieldByteStart, MaxLLVMFieldAlignment);
if (LLVMNaturalSize > GCCStructSize) {
assert(!StructIsPacked && "LLVM Struct type overflow!");
ConvertToPacked();
}
}
}
Constant *TreeConstantToLLVM::ConvertRecordCONSTRUCTOR(tree exp) {
ConstantLayoutInfo LayoutInfo(getTargetData());
tree NextField = TYPE_FIELDS(TREE_TYPE(exp));
unsigned HOST_WIDE_INT CtorIndex;
tree FieldValue;
tree Field;
FOR_EACH_CONSTRUCTOR_ELT(CONSTRUCTOR_ELTS(exp), CtorIndex, Field, FieldValue){
if (Field == 0) {
Field = NextField;
while (1) {
assert(Field && "Fell off end of record!");
if (TREE_CODE(Field) == FIELD_DECL) break;
Field = TREE_CHAIN(Field);
}
}
uint64_t GCCFieldOffsetInBits = getFieldOffsetInBits(Field);
NextField = TREE_CHAIN(Field);
uint64_t FieldSizeInBits = 0;
uint64_t ValueSizeInBits = 0;
Constant *Val = 0;
ConstantInt *ValC = 0;
if (!DECL_BIT_FIELD_TYPE(Field) || TYPE_PRECISION(TREE_TYPE(Field)))
Val = Convert(FieldValue);
if (DECL_SIZE(Field)) {
FieldSizeInBits = getInt64(DECL_SIZE(Field), true);
if (FieldSizeInBits == 0)
continue; ValueSizeInBits = Val->getType()->getPrimitiveSizeInBits();
ValC = dyn_cast<ConstantInt>(Val);
if (ValC && ValC->isZero()) {
if (ValueSizeInBits != FieldSizeInBits) {
APInt ValAsInt = ValC->getValue();
ValC = ConstantInt::get(Context, ValueSizeInBits < FieldSizeInBits ?
ValAsInt.zext(FieldSizeInBits) :
ValAsInt.trunc(FieldSizeInBits));
ValueSizeInBits = FieldSizeInBits;
Val = ValC;
}
}
}
if (!isBitfield(Field))
LayoutInfo.AddFieldToRecordConstant(Val, GCCFieldOffsetInBits);
else {
assert(ValC);
assert(DECL_SIZE(Field));
assert(ValueSizeInBits >= FieldSizeInBits &&
"disagreement between LLVM and GCC on bitfield size");
if (ValueSizeInBits != FieldSizeInBits) {
APInt ValAsInt = ValC->getValue();
ValC = ConstantInt::get(Context, ValAsInt.trunc(FieldSizeInBits));
}
LayoutInfo.AddBitFieldToRecordConstant(ValC, GCCFieldOffsetInBits);
}
}
tree StructTypeSizeTree = TYPE_SIZE(TREE_TYPE(exp));
if (StructTypeSizeTree && TREE_CODE(StructTypeSizeTree) == INTEGER_CST)
LayoutInfo.HandleTailPadding(getInt64(StructTypeSizeTree, true));
return ConstantStruct::get(Context, LayoutInfo.ResultElts,
LayoutInfo.StructIsPacked);
}
Constant *TreeConstantToLLVM::ConvertUnionCONSTRUCTOR(tree exp) {
assert(!VEC_empty(constructor_elt, CONSTRUCTOR_ELTS(exp))
&& "Union CONSTRUCTOR has no elements? Zero?");
VEC(constructor_elt, gc) *elt = CONSTRUCTOR_ELTS(exp);
assert(VEC_length(constructor_elt, elt) == 1
&& "Union CONSTRUCTOR with multiple elements?");
ConstantLayoutInfo LayoutInfo(getTargetData());
Constant *Val = Convert(VEC_index(constructor_elt, elt, 0)->value);
tree Field = TYPE_FIELDS(TREE_TYPE(exp));
tree namedField = Field;
tree first_field_decl = 0;
assert(Field && "cannot initialize union with no fields");
while (namedField && (TREE_CODE(namedField) != FIELD_DECL ||
DECL_NAME(namedField) == NULL_TREE)) {
if (!first_field_decl && TREE_CODE(namedField) == FIELD_DECL)
first_field_decl = namedField;
namedField = TREE_CHAIN(namedField);
}
if (namedField)
Field = namedField;
else if (first_field_decl)
Field = first_field_decl;
assert(Field && "cannot initialize union with no fields");
if (!isBitfield(Field))
LayoutInfo.AddFieldToRecordConstant(Val, 0);
else {
ConstantInt *ValC = cast<ConstantInt>(Val);
uint64_t FieldSizeInBits = getInt64(DECL_SIZE(Field), true);
uint64_t ValueSizeInBits = Val->getType()->getPrimitiveSizeInBits();
assert(ValueSizeInBits >= FieldSizeInBits &&
"disagreement between LLVM and GCC on bitfield size");
if (ValueSizeInBits != FieldSizeInBits) {
APInt ValAsInt = ValC->getValue();
ValC = ConstantInt::get(Context, ValAsInt.trunc(FieldSizeInBits));
}
LayoutInfo.AddBitFieldToRecordConstant(ValC, 0);
}
tree UnionTypeSizeTree = TYPE_SIZE(TREE_TYPE(exp));
if (UnionTypeSizeTree && TREE_CODE(UnionTypeSizeTree) == INTEGER_CST)
LayoutInfo.HandleTailPadding(getInt64(UnionTypeSizeTree, true));
return ConstantStruct::get(Context, LayoutInfo.ResultElts,
LayoutInfo.StructIsPacked);
}
Constant *TreeConstantToLLVM::EmitLV(tree exp) {
Constant *LV;
switch (TREE_CODE(exp)) {
default:
debug_tree(exp);
assert(0 && "Unknown constant lvalue to convert!");
abort();
case FUNCTION_DECL:
case CONST_DECL:
case VAR_DECL:
LV = EmitLV_Decl(exp);
break;
case LABEL_DECL:
LV = EmitLV_LABEL_DECL(exp);
break;
case COMPLEX_CST:
LV = EmitLV_COMPLEX_CST(exp);
break;
case STRING_CST:
LV = EmitLV_STRING_CST(exp);
break;
case COMPONENT_REF:
LV = EmitLV_COMPONENT_REF(exp);
break;
case ARRAY_RANGE_REF:
case ARRAY_REF:
LV = EmitLV_ARRAY_REF(exp);
break;
case INDIRECT_REF:
LV = Convert(TREE_OPERAND(exp, 0));
break;
case COMPOUND_LITERAL_EXPR:
LV = EmitLV(DECL_EXPR_DECL (TREE_OPERAND (exp, 0)));
break;
}
assert((VOID_TYPE_P(TREE_TYPE(exp)) ||
LV->getType() == ConvertType(TREE_TYPE(exp))->getPointerTo()) &&
"LValue of constant has wrong type!");
return LV;
}
Constant *TreeConstantToLLVM::EmitLV_Decl(tree exp) {
GlobalValue *Val = cast<GlobalValue>(DECL_LLVM(exp));
if (!TREE_USED(exp)) {
assemble_external(exp);
TREE_USED(exp) = 1;
Val = cast<GlobalValue>(DECL_LLVM(exp));
}
if (TREE_CODE(exp) == CONST_DECL || TREE_CODE(exp) == VAR_DECL) {
if ((DECL_INITIAL(exp) || !TREE_PUBLIC(exp)) && !DECL_EXTERNAL(exp) &&
Val->isDeclaration() &&
!BOGUS_CTOR(exp)) {
emit_global_to_llvm(exp);
Val = cast<GlobalValue>(DECL_LLVM(exp));
}
} else {
mark_decl_referenced(exp);
if (tree ID = DECL_ASSEMBLER_NAME(exp))
mark_referenced(ID);
}
const Type *Ty = ConvertType(TREE_TYPE(exp));
if (Ty->isVoidTy()) Ty = Type::getInt8Ty(Context);
return TheFolder->CreateBitCast(Val, Ty->getPointerTo());
}
Constant *TreeConstantToLLVM::EmitLV_LABEL_DECL(tree exp) {
assert(TheTreeToLLVM &&
"taking the address of a label while not compiling the function!");
if (DECL_CONTEXT(exp)) {
assert(TREE_CODE(DECL_CONTEXT(exp)) == FUNCTION_DECL &&
"Address of label in nested function?");
assert(TheTreeToLLVM->getFUNCTION_DECL() == DECL_CONTEXT(exp) &&
"Taking the address of a label that isn't in the current fn!?");
}
return TheTreeToLLVM->EmitLV_LABEL_DECL(exp);
}
Constant *TreeConstantToLLVM::EmitLV_COMPLEX_CST(tree exp) {
Constant *Init = TreeConstantToLLVM::ConvertCOMPLEX_CST(exp);
static std::map<Constant*, GlobalVariable*> ComplexCSTCache;
GlobalVariable *&Slot = ComplexCSTCache[Init];
if (Slot) return Slot;
Slot = new GlobalVariable(*TheModule, Init->getType(), true,
GlobalVariable::PrivateLinkage, Init, ".cpx");
return Slot;
}
Constant *TreeConstantToLLVM::EmitLV_STRING_CST(tree exp) {
Constant *Init = TreeConstantToLLVM::ConvertSTRING_CST(exp);
bool StringIsConstant = !flag_writable_strings;
#ifdef CONFIG_DARWIN_H
StringIsConstant |= darwin_constant_cfstring_p(exp);
#endif
GlobalValue::LinkageTypes Linkage = StringIsConstant
? GlobalValue::PrivateLinkage
: GlobalValue::InternalLinkage;
GlobalVariable **SlotP = 0;
if (StringIsConstant) {
static std::map<Constant*, GlobalVariable*> StringCSTCache;
GlobalVariable *&Slot = StringCSTCache[Init];
if (Slot) return Slot;
SlotP = &Slot;
}
GlobalVariable *GV = new GlobalVariable(*TheModule, Init->getType(),
StringIsConstant, Linkage, Init,
".str");
GV->setAlignment(get_constant_alignment(exp) / 8);
if (SlotP) *SlotP = GV;
return GV;
}
Constant *TreeConstantToLLVM::EmitLV_ARRAY_REF(tree exp) {
tree Array = TREE_OPERAND(exp, 0);
tree ArrayType = TREE_TYPE(Array);
tree Index = TREE_OPERAND(exp, 1);
tree IndexType = TREE_TYPE(Index);
assert((TREE_CODE(ArrayType) == ARRAY_TYPE ||
TREE_CODE(ArrayType) == POINTER_TYPE ||
TREE_CODE(ArrayType) == REFERENCE_TYPE ||
TREE_CODE(ArrayType) == BLOCK_POINTER_TYPE) &&
"Unknown ARRAY_REF!");
assert(isSequentialCompatible(ArrayType) && "Global with variable size?");
Constant *ArrayAddr;
if (TREE_CODE(ArrayType) == ARRAY_TYPE) {
tree LowerBound = array_ref_low_bound(exp);
if (!integer_zerop(LowerBound))
Index = fold(build2(MINUS_EXPR, IndexType, Index, LowerBound));
ArrayAddr = EmitLV(Array);
} else {
ArrayAddr = Convert(Array);
}
Constant *IndexVal = Convert(Index);
const Type *IntPtrTy = getTargetData().getIntPtrType(Context);
if (IndexVal->getType() != IntPtrTy)
IndexVal = TheFolder->CreateIntCast(IndexVal, IntPtrTy,
!TYPE_UNSIGNED(IndexType));
std::vector<Value*> Idx;
if (TREE_CODE(ArrayType) == ARRAY_TYPE)
Idx.push_back(ConstantInt::get(IntPtrTy, 0));
Idx.push_back(IndexVal);
return TheFolder->CreateGetElementPtr(ArrayAddr, &Idx[0], Idx.size());
}
Constant *TreeConstantToLLVM::EmitLV_COMPONENT_REF(tree exp) {
Constant *StructAddrLV = EmitLV(TREE_OPERAND(exp, 0));
const Type *StructTy = ConvertType(TREE_TYPE(TREE_OPERAND(exp, 0)));
tree FieldDecl = TREE_OPERAND(exp, 1);
StructAddrLV = TheFolder->CreateBitCast(StructAddrLV,
StructTy->getPointerTo());
const Type *FieldTy = ConvertType(getDeclaredType(FieldDecl));
unsigned BitStart = getComponentRefOffsetInBits(exp);
Constant *FieldPtr;
const TargetData &TD = getTargetData();
tree field_offset = component_ref_field_offset (exp);
if (TREE_CODE(field_offset) == INTEGER_CST) {
unsigned int MemberIndex = GET_LLVM_FIELD_INDEX(FieldDecl);
Constant *Ops[] = {
StructAddrLV,
Constant::getNullValue(Type::getInt32Ty(Context)),
ConstantInt::get(Type::getInt32Ty(Context), MemberIndex)
};
FieldPtr = TheFolder->CreateGetElementPtr(StructAddrLV, Ops+1, 2);
FieldPtr = ConstantFoldInstOperands(Instruction::GetElementPtr,
FieldPtr->getType(), Ops, 3, &TD);
if (MemberIndex) {
const StructLayout *SL = TD.getStructLayout(cast<StructType>(StructTy));
BitStart -= SL->getElementOffset(MemberIndex) * 8;
}
} else {
Constant *Offset = Convert(field_offset);
Constant *Ptr = TheFolder->CreatePtrToInt(StructAddrLV, Offset->getType());
Ptr = TheFolder->CreateAdd(Ptr, Offset);
FieldPtr = TheFolder->CreateIntToPtr(Ptr,
FieldTy->getPointerTo());
}
if (FieldTy->getPointerTo() != FieldPtr->getType())
FieldPtr = TheFolder->CreateBitCast(FieldPtr, FieldTy->getPointerTo());
assert(BitStart == 0 &&
"It's a bitfield reference or we didn't get to the field!");
return FieldPtr;
}