#include "llvm-internal.h"
#include "llvm/CallingConv.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
#include "llvm/TypeSymbolTable.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
#undef VISIBILITY_HIDDEN
extern "C" {
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tree.h"
}
#include "llvm-abi.h"
static std::vector<const Type *> LTypes;
typedef DenseMap<const Type *, unsigned> LTypesMapTy;
static LTypesMapTy LTypesMap;
static LLVMContext &Context = getGlobalContext();
#define SET_TYPE_SYMTAB_LLVM(NODE, index) \
(TYPE_CHECK (NODE)->type.symtab.llvm = index)
static const Type * llvm_set_type(tree Tr, const Type *Ty) {
#ifndef NDEBUG
if (TYPE_SIZE(Tr) && Ty->isSized() && isInt64(TYPE_SIZE(Tr), true)) {
uint64_t LLVMSize = getTargetData().getTypeAllocSizeInBits(Ty);
if (getInt64(TYPE_SIZE(Tr), true) != LLVMSize) {
errs() << "GCC: ";
debug_tree(Tr);
errs() << "LLVM: ";
Ty->print(errs());
errs() << " (" << LLVMSize << " bits)\n";
errs() << "LLVM type size doesn't match GCC type size!";
abort();
}
}
#endif
unsigned &TypeSlot = LTypesMap[Ty];
if (TypeSlot) {
SET_TYPE_SYMTAB_LLVM(Tr, TypeSlot);
return Ty;
}
unsigned Index = LTypes.size() + 1;
LTypes.push_back(Ty);
SET_TYPE_SYMTAB_LLVM(Tr, Index);
LTypesMap[Ty] = Index;
return Ty;
}
#define SET_TYPE_LLVM(NODE, TYPE) (const Type *)llvm_set_type(NODE, TYPE)
extern "C" const Type *llvm_get_type(unsigned Index) {
if (Index == 0)
return NULL;
assert ((Index - 1) < LTypes.size() && "Invalid LLVM Type index");
return LTypes[Index - 1];
}
#define GET_TYPE_LLVM(NODE) \
(const Type *)llvm_get_type( TYPE_CHECK (NODE)->type.symtab.llvm)
static void llvmEraseLType(const Type *Ty) {
LTypesMapTy::iterator I = LTypesMap.find(Ty);
if (I != LTypesMap.end()) {
LTypes[ LTypesMap[Ty] - 1] = NULL;
LTypesMap.erase(I);
}
}
void readLLVMTypesStringTable() {
GlobalValue *V = TheModule->getNamedGlobal("llvm.pch.types");
if (!V)
return;
GlobalVariable *GV = cast<GlobalVariable>(V);
ConstantStruct *LTypesNames = cast<ConstantStruct>(GV->getOperand(0));
for (unsigned i = 0; i < LTypesNames->getNumOperands(); ++i) {
const Type *Ty = NULL;
if (ConstantArray *CA =
dyn_cast<ConstantArray>(LTypesNames->getOperand(i))) {
std::string Str = CA->getAsString();
Ty = TheModule->getTypeByName(Str);
assert (Ty != NULL && "Invalid Type in LTypes string table");
}
LTypes.push_back(Ty);
}
GV->eraseFromParent();
}
void writeLLVMTypesStringTable() {
if (LTypes.empty())
return;
std::vector<Constant *> LTypesNames;
std::map < const Type *, std::string > TypeNameMap;
const TypeSymbolTable &ST = TheModule->getTypeSymbolTable();
TypeSymbolTable::const_iterator TI = ST.begin();
for (; TI != ST.end(); ++TI) {
TypeNameMap[TI->second] = TI->first;
}
for (std::vector<const Type *>::iterator I = LTypes.begin(),
E = LTypes.end(); I != E; ++I) {
const Type *Ty = *I;
if (Ty && TypeNameMap[Ty].empty()) {
std::string NewName =
TheModule->getTypeSymbolTable().getUniqueName("llvm.fe.ty");
TheModule->addTypeName(NewName, Ty);
TypeNameMap[*I] = NewName;
}
const std::string &TypeName = TypeNameMap[*I];
LTypesNames.push_back(ConstantArray::get(Context, TypeName, false));
}
Constant *LTypesNameTable = ConstantStruct::get(Context, LTypesNames, false);
GlobalVariable *GV = new GlobalVariable(*TheModule,
LTypesNameTable->getType(), true,
GlobalValue::ExternalLinkage,
LTypesNameTable,
"llvm.pch.types");
}
static FunctionType *GetFunctionType(const PATypeHolder &Res,
std::vector<PATypeHolder> &ArgTys,
bool isVarArg) {
std::vector<const Type*> ArgTysP;
ArgTysP.reserve(ArgTys.size());
for (unsigned i = 0, e = ArgTys.size(); i != e; ++i)
ArgTysP.push_back(ArgTys[i]);
return FunctionType::get(Res, ArgTysP, isVarArg);
}
bool isPassedByInvisibleReference(tree Type) {
if (Type == error_mark_node)
return false;
return TREE_ADDRESSABLE(Type) || TYPE_SIZE(Type) == 0 ||
TREE_CODE(TYPE_SIZE(Type)) != INTEGER_CST;
}
static std::string GetTypeName(const char *Prefix, tree type) {
const char *Name = "anon";
if (TYPE_NAME(type)) {
if (TREE_CODE(TYPE_NAME(type)) == IDENTIFIER_NODE)
Name = IDENTIFIER_POINTER(TYPE_NAME(type));
else if (DECL_NAME(TYPE_NAME(type)))
Name = IDENTIFIER_POINTER(DECL_NAME(TYPE_NAME(type)));
}
std::string ContextStr;
tree Context = TYPE_CONTEXT(type);
while (Context) {
switch (TREE_CODE(Context)) {
case TRANSLATION_UNIT_DECL: Context = 0; break; case RECORD_TYPE:
case NAMESPACE_DECL:
if (TREE_CODE(Context) == RECORD_TYPE) {
if (TYPE_NAME(Context)) {
std::string NameFrag;
if (TREE_CODE(TYPE_NAME(Context)) == IDENTIFIER_NODE) {
NameFrag = IDENTIFIER_POINTER(TYPE_NAME(Context));
} else {
NameFrag = IDENTIFIER_POINTER(DECL_NAME(TYPE_NAME(Context)));
}
ContextStr = NameFrag + "::" + ContextStr;
Context = TYPE_CONTEXT(Context);
break;
}
} else if (DECL_NAME(Context)
){
assert(TREE_CODE(DECL_NAME(Context)) == IDENTIFIER_NODE);
std::string NamespaceName = IDENTIFIER_POINTER(DECL_NAME(Context));
ContextStr = NamespaceName + "::" + ContextStr;
Context = DECL_CONTEXT(Context);
break;
}
default: {
static unsigned UniqueID = 0;
ContextStr = "." + utostr(UniqueID++);
Context = 0; break;
}
}
}
return Prefix + ContextStr + Name;
}
bool isSequentialCompatible(tree_node *type) {
assert((TREE_CODE(type) == ARRAY_TYPE ||
TREE_CODE(type) == POINTER_TYPE ||
TREE_CODE(type) == REFERENCE_TYPE ||
TREE_CODE(type) == BLOCK_POINTER_TYPE) && "not a sequential type!");
return TYPE_SIZE(TREE_TYPE(type)) &&
isInt64(TYPE_SIZE(TREE_TYPE(type)), true);
}
bool isBitfield(tree_node *field_decl) {
tree type = DECL_BIT_FIELD_TYPE(field_decl);
if (!type)
return false;
assert(DECL_FIELD_BIT_OFFSET(field_decl) && "Bitfield with no bit offset!");
if (TREE_INT_CST_LOW(DECL_FIELD_BIT_OFFSET(field_decl)) & 7)
return true;
if (!TYPE_SIZE(type) || !isInt64(TYPE_SIZE (type), true))
return true;
uint64_t TypeSizeInBits = getInt64(TYPE_SIZE (type), true);
assert(!(TypeSizeInBits & 7) && "A type with a non-byte size!");
assert(DECL_SIZE(field_decl) && "Bitfield with no bit size!");
uint64_t FieldSizeInBits = getInt64(DECL_SIZE(field_decl), true);
if (FieldSizeInBits < TypeSizeInBits)
return true;
return false;
}
tree getDeclaredType(tree_node *field_decl) {
return DECL_BIT_FIELD_TYPE(field_decl) ?
DECL_BIT_FIELD_TYPE(field_decl) : TREE_TYPE (field_decl);
}
void refine_type_to(tree old_type, tree new_type)
{
const OpaqueType *OldTy = cast_or_null<OpaqueType>(GET_TYPE_LLVM(old_type));
if (OldTy) {
const Type *NewTy = ConvertType (new_type);
const_cast<OpaqueType*>(OldTy)->refineAbstractTypeTo(NewTy);
}
}
namespace {
class TypeRefinementDatabase : public AbstractTypeUser {
virtual void refineAbstractType(const DerivedType *OldTy,
const Type *NewTy);
virtual void typeBecameConcrete(const DerivedType *AbsTy);
public:
std::map<const Type*, std::vector<tree> > TypeUsers;
inline const Type *setType(tree type, const Type *Ty) {
if (GET_TYPE_LLVM(type))
RemoveTypeFromTable(type);
if (Ty->isAbstract()) {
std::vector<tree> &Users = TypeUsers[Ty];
if (Users.empty()) Ty->addAbstractTypeUser(this);
Users.push_back(type);
}
return SET_TYPE_LLVM(type, Ty);
}
void friend readLLVMTypeUsers();
void friend writeLLVMTypeUsers();
void RemoveTypeFromTable(tree type);
void dump() const;
};
TypeRefinementDatabase TypeDB;
}
void TypeRefinementDatabase::RemoveTypeFromTable(tree type) {
const Type *Ty = GET_TYPE_LLVM(type);
if (!Ty->isAbstract()) return;
std::map<const Type*, std::vector<tree> >::iterator I = TypeUsers.find(Ty);
assert(I != TypeUsers.end() && "Using an abstract type but not in table?");
bool FoundIt = false;
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
if (I->second[i] == type) {
FoundIt = true;
std::swap(I->second[i], I->second.back());
I->second.pop_back();
break;
}
assert(FoundIt && "Using an abstract type but not in table?");
if (I->second.empty()) {
TypeUsers.erase(I);
Ty->removeAbstractTypeUser(this);
}
}
void TypeRefinementDatabase::refineAbstractType(const DerivedType *OldTy,
const Type *NewTy) {
if (OldTy == NewTy && OldTy->isAbstract()) return;
std::map<const Type*, std::vector<tree> >::iterator I = TypeUsers.find(OldTy);
assert(I != TypeUsers.end() && "Using an abstract type but not in table?");
if (!NewTy->isAbstract()) {
if (OldTy != NewTy)
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
SET_TYPE_LLVM(I->second[i], NewTy);
} else {
std::vector<tree> &NewSlot = TypeUsers[NewTy];
if (NewSlot.empty()) NewTy->addAbstractTypeUser(this);
for (unsigned i = 0, e = I->second.size(); i != e; ++i) {
NewSlot.push_back(I->second[i]);
SET_TYPE_LLVM(I->second[i], NewTy);
}
}
llvmEraseLType(OldTy);
TypeUsers.erase(I);
if (const StructType *STy = dyn_cast<StructType>(OldTy))
getTargetData().InvalidateStructLayoutInfo(STy);
OldTy->removeAbstractTypeUser(this);
}
void TypeRefinementDatabase::typeBecameConcrete(const DerivedType *AbsTy) {
assert(TypeUsers.count(AbsTy) && "Not using this type!");
TypeUsers.erase(AbsTy);
AbsTy->removeAbstractTypeUser(this);
}
void TypeRefinementDatabase::dump() const {
outs() << "TypeRefinementDatabase\n";
outs().flush();
}
void readLLVMTypeUsers() {
tree ty;
while ((ty = llvm_pop_TypeUsers())) {
const Type *NewTy = GET_TYPE_LLVM(ty);
std::vector<tree> &NewSlot = TypeDB.TypeUsers[NewTy];
if (NewSlot.empty()) NewTy->addAbstractTypeUser(&TypeDB);
NewSlot.push_back(ty);
}
}
void writeLLVMTypeUsers() {
std::map<const Type*, std::vector<tree> >::iterator
I = TypeDB.TypeUsers.begin(),
E = TypeDB.TypeUsers.end();
for (; I != E; ++I)
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
llvm_push_TypeUsers(I->second[i]);
}
static uint64_t getFieldOffsetInBits(tree Field) {
assert(DECL_FIELD_BIT_OFFSET(Field) != 0 && DECL_FIELD_OFFSET(Field) != 0);
uint64_t Result = getInt64(DECL_FIELD_BIT_OFFSET(Field), true);
if (TREE_CODE(DECL_FIELD_OFFSET(Field)) == INTEGER_CST)
Result += getInt64(DECL_FIELD_OFFSET(Field), true)*8;
return Result;
}
static void FindLLVMTypePadding(const Type *Ty, tree type, uint64_t BitOffset,
SmallVector<std::pair<uint64_t,uint64_t>, 16> &Padding) {
if (const StructType *STy = dyn_cast<StructType>(Ty)) {
const TargetData &TD = getTargetData();
const StructLayout *SL = TD.getStructLayout(STy);
uint64_t PrevFieldEnd = 0;
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
if (type && isPaddingElement(type, i))
continue;
uint64_t FieldBitOffset = SL->getElementOffset(i)*8;
FindLLVMTypePadding(STy->getElementType(i), 0,
BitOffset+FieldBitOffset, Padding);
if (PrevFieldEnd < FieldBitOffset)
Padding.push_back(std::make_pair(PrevFieldEnd+BitOffset,
FieldBitOffset-PrevFieldEnd));
PrevFieldEnd =
FieldBitOffset + TD.getTypeSizeInBits(STy->getElementType(i));
}
if (PrevFieldEnd < SL->getSizeInBits())
Padding.push_back(std::make_pair(PrevFieldEnd,
SL->getSizeInBits()-PrevFieldEnd));
} else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
uint64_t EltSize = getTargetData().getTypeSizeInBits(ATy->getElementType());
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
FindLLVMTypePadding(ATy->getElementType(), 0, BitOffset+i*EltSize,
Padding);
}
}
static bool GCCTypeOverlapsWithPadding(tree type, int PadStartBits,
int PadSizeBits) {
assert(type != error_mark_node);
type = TYPE_MAIN_VARIANT(type);
if (!TYPE_SIZE(type))
return true;
if (!isInt64(TYPE_SIZE(type), true))
return true;
if (!getInt64(TYPE_SIZE(type), true) ||
PadStartBits >= (int64_t)getInt64(TYPE_SIZE(type), false) ||
PadStartBits+PadSizeBits <= 0)
return false;
switch (TREE_CODE(type)) {
default:
fprintf(stderr, "Unknown type to compare:\n");
debug_tree(type);
abort();
case VOID_TYPE:
case BOOLEAN_TYPE:
case ENUMERAL_TYPE:
case INTEGER_TYPE:
case REAL_TYPE:
case COMPLEX_TYPE:
case VECTOR_TYPE:
case POINTER_TYPE:
case REFERENCE_TYPE:
case BLOCK_POINTER_TYPE:
case OFFSET_TYPE:
return true;
case ARRAY_TYPE: {
unsigned EltSizeBits = TREE_INT_CST_LOW(TYPE_SIZE(TREE_TYPE(type)));
unsigned NumElts = cast<ArrayType>(ConvertType(type))->getNumElements();
for (unsigned i = 0; i != NumElts; ++i)
if (GCCTypeOverlapsWithPadding(TREE_TYPE(type),
PadStartBits- i*EltSizeBits, PadSizeBits))
return true;
return false;
}
case QUAL_UNION_TYPE:
case UNION_TYPE: {
if (TYPE_TRANSPARENT_UNION(type)) {
tree Field = TYPE_FIELDS(type);
assert(Field && "Transparent union must have some elements!");
while (TREE_CODE(Field) != FIELD_DECL) {
Field = TREE_CHAIN(Field);
assert(Field && "Transparent union must have some elements!");
}
return GCCTypeOverlapsWithPadding(TREE_TYPE(Field),
PadStartBits, PadSizeBits);
}
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
if (TREE_CODE(Field) != FIELD_DECL) continue;
assert(getFieldOffsetInBits(Field) == 0 && "Union with non-zero offset?");
if (TREE_CODE(type) == QUAL_UNION_TYPE &&
integer_zerop(DECL_QUALIFIER(Field)))
continue;
if (GCCTypeOverlapsWithPadding(TREE_TYPE(Field),
PadStartBits, PadSizeBits))
return true;
if (TREE_CODE(type) == QUAL_UNION_TYPE &&
integer_onep(DECL_QUALIFIER(Field)))
break;
}
return false;
}
case RECORD_TYPE:
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
if (TREE_CODE(Field) != FIELD_DECL) continue;
if (TREE_CODE(DECL_FIELD_OFFSET(Field)) != INTEGER_CST)
return true;
uint64_t FieldBitOffset = getFieldOffsetInBits(Field);
if (GCCTypeOverlapsWithPadding(getDeclaredType(Field),
PadStartBits-FieldBitOffset, PadSizeBits))
return true;
}
return false;
}
}
bool TypeConverter::GCCTypeOverlapsWithLLVMTypePadding(tree type,
const Type *Ty) {
SmallVector<std::pair<uint64_t,uint64_t>, 16> StructPadding;
FindLLVMTypePadding(Ty, type, 0, StructPadding);
for (unsigned i = 0, e = StructPadding.size(); i != e; ++i)
if (GCCTypeOverlapsWithPadding(type, StructPadding[i].first,
StructPadding[i].second))
return true;
return false;
}
const Type *TypeConverter::ConvertType(tree orig_type) {
if (orig_type == error_mark_node) return Type::getInt32Ty(Context);
tree type = TYPE_MAIN_VARIANT(orig_type);
switch (TREE_CODE(type)) {
default:
fprintf(stderr, "Unknown type to convert:\n");
debug_tree(type);
abort();
case VOID_TYPE: return SET_TYPE_LLVM(type, Type::getVoidTy(Context));
case RECORD_TYPE:
case QUAL_UNION_TYPE:
case UNION_TYPE: return ConvertRECORD(type, orig_type);
case BOOLEAN_TYPE: {
if (const Type *Ty = GET_TYPE_LLVM(type))
return Ty;
return SET_TYPE_LLVM(type,
IntegerType::get(Context, TREE_INT_CST_LOW(TYPE_SIZE(type))));
}
case ENUMERAL_TYPE:
if (TYPE_SIZE(orig_type) == 0) {
if (const Type *Ty = GET_TYPE_LLVM(orig_type))
return Ty;
const Type *Ty = OpaqueType::get(Context);
TheModule->addTypeName(GetTypeName("enum.", orig_type), Ty);
return TypeDB.setType(orig_type, Ty);
}
type = orig_type;
case INTEGER_TYPE: {
if (const Type *Ty = GET_TYPE_LLVM(type)) return Ty;
int precision = TYPE_PRECISION(type) == 511 ? 512 : TYPE_PRECISION(type);
return SET_TYPE_LLVM(type, IntegerType::get(Context, precision));
}
case REAL_TYPE:
if (const Type *Ty = GET_TYPE_LLVM(type)) return Ty;
switch (TYPE_PRECISION(type)) {
default:
fprintf(stderr, "Unknown FP type!\n");
debug_tree(type);
abort();
case 32: return SET_TYPE_LLVM(type, Type::getFloatTy(Context));
case 64: return SET_TYPE_LLVM(type, Type::getDoubleTy(Context));
case 80: return SET_TYPE_LLVM(type, Type::getX86_FP80Ty(Context));
case 128:
#ifdef TARGET_POWERPC
return SET_TYPE_LLVM(type, Type::getPPC_FP128Ty(Context));
#elif defined(TARGET_ZARCH) || defined(TARGET_CPU_sparc) // FIXME: Use some generic define.
return SET_TYPE_LLVM(type, Type::getFP128Ty(Context));
#else
return SET_TYPE_LLVM(type,
StructType::get(Context, Type::getDoubleTy(Context),
Type::getDoubleTy(Context), NULL));
#endif
}
case COMPLEX_TYPE: {
if (const Type *Ty = GET_TYPE_LLVM(type)) return Ty;
const Type *Ty = ConvertType(TREE_TYPE(type));
assert(!Ty->isAbstract() && "should use TypeDB.setType()");
return SET_TYPE_LLVM(type, StructType::get(Context, Ty, Ty, NULL));
}
case VECTOR_TYPE: {
if (const Type *Ty = GET_TYPE_LLVM(type)) return Ty;
const Type *Ty = ConvertType(TREE_TYPE(type));
assert(!Ty->isAbstract() && "should use TypeDB.setType()");
Ty = VectorType::get(Ty, TYPE_VECTOR_SUBPARTS(type));
return SET_TYPE_LLVM(type, Ty);
}
case POINTER_TYPE:
case REFERENCE_TYPE:
case BLOCK_POINTER_TYPE:
if (const PointerType *PTy = cast_or_null<PointerType>(GET_TYPE_LLVM(type))){
if (PointersToReresolve.empty() || PointersToReresolve.back() != type ||
ConvertingStruct)
return PTy;
assert(PTy->getElementType()->isOpaqueTy() && "Not a deferred ref!");
PointersToReresolve.back() = 0;
ConvertingStruct = true;
const Type *Actual = ConvertType(TREE_TYPE(type));
assert(GET_TYPE_LLVM(type) == PTy && "Pointer invalidated!");
ConvertingStruct = false;
if (Actual->isVoidTy())
Actual = Type::getInt8Ty(Context);
const OpaqueType *OT = cast<OpaqueType>(PTy->getElementType());
const_cast<OpaqueType*>(OT)->refineAbstractTypeTo(Actual);
return GET_TYPE_LLVM(type);
} else {
const Type *Ty;
if (ConvertingStruct) {
Ty = GET_TYPE_LLVM(TYPE_MAIN_VARIANT(TREE_TYPE(type)));
if (Ty == 0) {
PointersToReresolve.push_back(type);
return TypeDB.setType(type, OpaqueType::get(Context)->getPointerTo());
}
} else {
Ty = ConvertType(TREE_TYPE(type));
}
if (Ty->isVoidTy())
Ty = Type::getInt8Ty(Context); return TypeDB.setType(type, Ty->getPointerTo());
}
case METHOD_TYPE:
case FUNCTION_TYPE: {
if (const Type *Ty = GET_TYPE_LLVM(type))
return Ty;
CallingConv::ID CallingConv;
AttrListPtr PAL;
return TypeDB.setType(type, ConvertFunctionType(type, NULL, NULL,
CallingConv, PAL));
}
case ARRAY_TYPE: {
if (const Type *Ty = GET_TYPE_LLVM(type))
return Ty;
uint64_t ElementSize;
const Type *ElementTy;
if (isSequentialCompatible(type)) {
ElementSize = getInt64(TYPE_SIZE(TREE_TYPE(type)), true);
ElementTy = ConvertType(TREE_TYPE(type));
} else {
ElementSize = 8;
ElementTy = Type::getInt8Ty(Context);
}
uint64_t NumElements;
if (!TYPE_SIZE(type)) {
NumElements = 0;
} else if (!isInt64(TYPE_SIZE(type), true)) {
NumElements = 0;
} else if (integer_zerop(TYPE_SIZE(type))) {
NumElements = 0;
} else {
assert(ElementSize
&& "Array of positive size with elements of zero size!");
NumElements = getInt64(TYPE_SIZE(type), true);
assert(!(NumElements % ElementSize)
&& "Array size is not a multiple of the element size!");
NumElements /= ElementSize;
}
return TypeDB.setType(type, ArrayType::get(ElementTy, NumElements));
}
case OFFSET_TYPE:
switch (getTargetData().getPointerSize()) {
default: assert(0 && "Unknown pointer size!");
case 4: return Type::getInt32Ty(Context);
case 8: return Type::getInt64Ty(Context);
}
}
}
namespace {
class FunctionTypeConversion : public DefaultABIClient {
PATypeHolder &RetTy;
std::vector<PATypeHolder> &ArgTypes;
CallingConv::ID &CallingConv;
bool isShadowRet;
bool KNRPromotion;
unsigned Offset;
public:
FunctionTypeConversion(PATypeHolder &retty, std::vector<PATypeHolder> &AT,
CallingConv::ID &CC, bool KNR)
: RetTy(retty), ArgTypes(AT), CallingConv(CC), KNRPromotion(KNR), Offset(0) {
CallingConv = CallingConv::C;
isShadowRet = false;
}
CallingConv::ID& getCallingConv(void) { return CallingConv; }
bool isShadowReturn() const { return isShadowRet; }
void HandleScalarResult(const Type *RetTy) {
this->RetTy = RetTy;
}
void HandleAggregateResultAsScalar(const Type *ScalarTy, unsigned Offset=0) {
RetTy = ScalarTy;
this->Offset = Offset;
}
void HandleAggregateResultAsAggregate(const Type *AggrTy) {
RetTy = AggrTy;
}
void HandleShadowResult(const PointerType *PtrArgTy, bool RetPtr) {
RetTy = RetPtr ? PtrArgTy : Type::getVoidTy(Context);
ArgTypes.push_back(PtrArgTy);
isShadowRet = true;
}
void HandleAggregateShadowResult(const PointerType *PtrArgTy,
bool RetPtr) {
HandleShadowResult(PtrArgTy, RetPtr);
}
void HandleScalarShadowResult(const PointerType *PtrArgTy, bool RetPtr) {
HandleShadowResult(PtrArgTy, RetPtr);
}
void HandlePad(const llvm::Type *LLVMTy) {
HandleScalarArgument(LLVMTy, 0, 0);
}
void HandleScalarArgument(const llvm::Type *LLVMTy, tree type,
unsigned RealSize = 0) {
if (KNRPromotion) {
if (type == float_type_node)
LLVMTy = ConvertType(double_type_node);
else if (LLVMTy->isIntegerTy(16) || LLVMTy->isIntegerTy(8) ||
LLVMTy->isIntegerTy(1))
LLVMTy = Type::getInt32Ty(Context);
}
ArgTypes.push_back(LLVMTy);
}
void HandleByInvisibleReferenceArgument(const llvm::Type *PtrTy, tree type) {
ArgTypes.push_back(PtrTy);
}
void HandleByValArgument(const llvm::Type *LLVMTy, tree type) {
HandleScalarArgument(LLVMTy->getPointerTo(), type);
}
void HandleFCAArgument(const llvm::Type *LLVMTy,
tree type ATTRIBUTE_UNUSED) {
ArgTypes.push_back(LLVMTy);
}
};
}
static Attributes HandleArgumentExtension(tree ArgTy) {
if (TREE_CODE(ArgTy) == BOOLEAN_TYPE) {
if (TREE_INT_CST_LOW(TYPE_SIZE(ArgTy)) < INT_TYPE_SIZE)
return Attribute::ZExt;
} else if (TREE_CODE(ArgTy) == INTEGER_TYPE &&
TREE_INT_CST_LOW(TYPE_SIZE(ArgTy)) < INT_TYPE_SIZE) {
if (TYPE_UNSIGNED(ArgTy))
return Attribute::ZExt;
else
return Attribute::SExt;
}
return Attribute::None;
}
const FunctionType *TypeConverter::
ConvertArgListToFnType(tree type, tree Args, tree static_chain,
CallingConv::ID &CallingConv, AttrListPtr &PAL) {
tree ReturnType = TREE_TYPE(type);
std::vector<PATypeHolder> ArgTys;
PATypeHolder RetTy(Type::getVoidTy(Context));
FunctionTypeConversion Client(RetTy, ArgTys, CallingConv, true );
DefaultABI ABIConverter(Client);
#ifdef TARGET_ADJUST_LLVM_CC
TARGET_ADJUST_LLVM_CC(CallingConv, type);
#endif
ABIConverter.HandleReturnType(ReturnType, current_function_decl, false);
SmallVector<AttributeWithIndex, 8> Attrs;
Attributes RAttributes = HandleArgumentExtension(ReturnType);
#ifdef TARGET_ADJUST_LLVM_RETATTR
TARGET_ADJUST_LLVM_RETATTR(RAttributes, type);
#endif
if (RAttributes != Attribute::None)
Attrs.push_back(AttributeWithIndex::get(0, RAttributes));
if (ABIConverter.isShadowReturn())
Attrs.push_back(AttributeWithIndex::get(ArgTys.size(),
Attribute::StructRet));
std::vector<const Type*> ScalarArgs;
if (static_chain) {
ABIConverter.HandleArgument(TREE_TYPE(static_chain), ScalarArgs);
Attrs.push_back(AttributeWithIndex::get(ArgTys.size(),
Attribute::Nest));
}
for (; Args && TREE_TYPE(Args) != void_type_node; Args = TREE_CHAIN(Args)) {
tree ArgTy = TREE_TYPE(Args);
Attributes PAttributes = Attribute::None;
ABIConverter.HandleArgument(ArgTy, ScalarArgs, &PAttributes);
PAttributes |= HandleArgumentExtension(ArgTy);
if (PAttributes != Attribute::None)
Attrs.push_back(AttributeWithIndex::get(ArgTys.size(), PAttributes));
}
PAL = AttrListPtr::get(Attrs.begin(), Attrs.end());
return GetFunctionType(RetTy, ArgTys, false);
}
const FunctionType *TypeConverter::
ConvertFunctionType(tree type, tree decl, tree static_chain,
CallingConv::ID &CallingConv, AttrListPtr &PAL) {
PATypeHolder RetTy = Type::getVoidTy(Context);
std::vector<PATypeHolder> ArgTypes;
bool isVarArg = false;
FunctionTypeConversion Client(RetTy, ArgTypes, CallingConv, false);
DefaultABI ABIConverter(Client);
#ifdef TARGET_ADJUST_LLVM_CC
TARGET_ADJUST_LLVM_CC(CallingConv, type);
#endif
ABIConverter.HandleReturnType(TREE_TYPE(type), current_function_decl,
decl ? DECL_BUILT_IN(decl) : false);
SmallVector<AttributeWithIndex, 8> Attrs;
Attributes FnAttributes = Attribute::None;
int flags = flags_from_decl_or_type(decl ? decl : type);
if (flags & ECF_NORETURN)
FnAttributes |= Attribute::NoReturn;
if (flags & ECF_NOTHROW)
FnAttributes |= Attribute::NoUnwind;
if (flags & ECF_CONST)
FnAttributes |= Attribute::ReadNone;
if (flags & ECF_PURE && !(flags & ECF_CONST))
FnAttributes |= Attribute::ReadOnly;
if (ABIConverter.isShadowReturn())
FnAttributes &= ~(Attribute::ReadNone|Attribute::ReadOnly);
if (static_chain && (FnAttributes & Attribute::ReadNone)) {
FnAttributes &= ~Attribute::ReadNone;
FnAttributes |= Attribute::ReadOnly;
}
Attributes RAttributes = Attribute::None;
RAttributes |= HandleArgumentExtension(TREE_TYPE(type));
#ifdef TARGET_ADJUST_LLVM_RETATTR
TARGET_ADJUST_LLVM_RETATTR(RAttributes, type);
#endif
if (flags & ECF_MALLOC)
RAttributes |= Attribute::NoAlias;
if (RAttributes != Attribute::None)
Attrs.push_back(AttributeWithIndex::get(0, RAttributes));
if (ABIConverter.isShadowReturn())
Attrs.push_back(AttributeWithIndex::get(ArgTypes.size(),
Attribute::StructRet | Attribute::NoAlias));
std::vector<const Type*> ScalarArgs;
if (static_chain) {
ABIConverter.HandleArgument(TREE_TYPE(static_chain), ScalarArgs);
Attrs.push_back(AttributeWithIndex::get(ArgTypes.size(),
Attribute::Nest));
}
int local_regparam = 0;
int local_fp_regparam = 0;
#ifdef LLVM_TARGET_ENABLE_REGPARM
LLVM_TARGET_INIT_REGPARM(local_regparam, local_fp_regparam, type);
#endif // LLVM_TARGET_ENABLE_REGPARM
bool HasByVal = false;
tree DeclArgs = (decl) ? DECL_ARGUMENTS(decl) : NULL;
tree Args = TYPE_ARG_TYPES(type);
for (; Args && TREE_VALUE(Args) != void_type_node; Args = TREE_CHAIN(Args)){
tree ArgTy = TREE_VALUE(Args);
if (!isPassedByInvisibleReference(ArgTy) &&
ConvertType(ArgTy)->isOpaqueTy()) {
if (CallingConv == CallingConv::C)
ArgTypes.clear();
else
ArgTypes.erase(ArgTypes.begin()+1, ArgTypes.end());
Args = 0;
break;
}
Attributes PAttributes = Attribute::None;
unsigned OldSize = ArgTypes.size();
ABIConverter.HandleArgument(ArgTy, ScalarArgs, &PAttributes);
PAttributes |= HandleArgumentExtension(ArgTy);
tree RestrictArgTy = (DeclArgs) ? TREE_TYPE(DeclArgs) : ArgTy;
if (TREE_CODE(RestrictArgTy) == POINTER_TYPE ||
TREE_CODE(RestrictArgTy) == REFERENCE_TYPE ||
TREE_CODE(RestrictArgTy) == BLOCK_POINTER_TYPE) {
if (TYPE_RESTRICT(RestrictArgTy))
PAttributes |= Attribute::NoAlias;
}
#ifdef LLVM_TARGET_ENABLE_REGPARM
if (INTEGRAL_TYPE_P(ArgTy) || POINTER_TYPE_P(ArgTy) ||
SCALAR_FLOAT_TYPE_P(ArgTy))
LLVM_ADJUST_REGPARM_ATTRIBUTE(PAttributes, ArgTy,
TREE_INT_CST_LOW(TYPE_SIZE(ArgTy)),
local_regparam, local_fp_regparam);
#endif // LLVM_TARGET_ENABLE_REGPARM
if (PAttributes != Attribute::None) {
HasByVal |= PAttributes & Attribute::ByVal;
for (unsigned i = OldSize + 1; i <= ArgTypes.size(); ++i) {
Attrs.push_back(AttributeWithIndex::get(i, PAttributes));
}
}
if (DeclArgs)
DeclArgs = TREE_CHAIN(DeclArgs);
}
if (HasByVal)
FnAttributes &= ~(Attribute::ReadNone | Attribute::ReadOnly);
isVarArg = (Args == 0);
assert(RetTy && "Return type not specified!");
if (FnAttributes != Attribute::None)
Attrs.push_back(AttributeWithIndex::get(~0, FnAttributes));
PAL = AttrListPtr::get(Attrs.begin(), Attrs.end());
return GetFunctionType(RetTy, ArgTypes, isVarArg);
}
struct StructTypeConversionInfo {
std::vector<const Type*> Elements;
std::vector<uint64_t> ElementOffsetInBytes;
std::vector<uint64_t> ElementSizeInBytes;
std::vector<bool> PaddingElement; const TargetData &TD;
unsigned GCCStructAlignmentInBytes;
bool Packed; bool AllBitFields; bool LastFieldStartsAtNonByteBoundry;
unsigned ExtraBitsAvailable;
StructTypeConversionInfo(TargetMachine &TM, unsigned GCCAlign, bool P)
: TD(*TM.getTargetData()), GCCStructAlignmentInBytes(GCCAlign),
Packed(P), AllBitFields(true), LastFieldStartsAtNonByteBoundry(false),
ExtraBitsAvailable(0) {}
void lastFieldStartsAtNonByteBoundry(bool value) {
LastFieldStartsAtNonByteBoundry = value;
}
void extraBitsAvailable (unsigned E) {
ExtraBitsAvailable = E;
}
bool isPacked() { return Packed; }
void markAsPacked() {
Packed = true;
}
void allFieldsAreNotBitFields() {
AllBitFields = false;
LastFieldStartsAtNonByteBoundry = false;
}
unsigned getGCCStructAlignmentInBytes() const {
return GCCStructAlignmentInBytes;
}
unsigned getTypeAlignment(const Type *Ty) const {
return Packed ? 1 : TD.getABITypeAlignment(Ty);
}
uint64_t getTypeSize(const Type *Ty) const {
return TD.getTypeAllocSize(Ty);
}
const Type *getLLVMType() const {
return StructType::get(Context, Elements,
Packed || (!Elements.empty() && AllBitFields));
}
uint64_t getAlignmentAsLLVMStruct() const {
if (Packed || AllBitFields) return 1;
unsigned MaxAlign = 1;
for (unsigned i = 0, e = Elements.size(); i != e; ++i)
MaxAlign = std::max(MaxAlign, getTypeAlignment(Elements[i]));
return MaxAlign;
}
uint64_t getSizeAsLLVMStruct() const {
if (Elements.empty()) return 0;
unsigned MaxAlign = getAlignmentAsLLVMStruct();
uint64_t Size = ElementOffsetInBytes.back()+ElementSizeInBytes.back();
return (Size+MaxAlign-1) & ~(MaxAlign-1);
}
void RemoveExtraBytes () {
unsigned NoOfBytesToRemove = ExtraBitsAvailable/8;
if (!Packed && !AllBitFields)
return;
if (NoOfBytesToRemove == 0)
return;
const Type *LastType = Elements.back();
unsigned PadBytes = 0;
if (LastType->isIntegerTy(8))
PadBytes = 1 - NoOfBytesToRemove;
else if (LastType->isIntegerTy(16))
PadBytes = 2 - NoOfBytesToRemove;
else if (LastType->isIntegerTy(32))
PadBytes = 4 - NoOfBytesToRemove;
else if (LastType->isIntegerTy(64))
PadBytes = 8 - NoOfBytesToRemove;
else
return;
assert (PadBytes > 0 && "Unable to remove extra bytes");
const Type *Pad = ArrayType::get(Type::getInt8Ty(Context), PadBytes);
unsigned OriginalSize = ElementSizeInBytes.back();
Elements.pop_back();
Elements.push_back(Pad);
ElementSizeInBytes.pop_back();
ElementSizeInBytes.push_back(OriginalSize - NoOfBytesToRemove);
}
bool ResizeLastElementIfOverlapsWith(uint64_t ByteOffset, tree Field,
const Type *Ty) {
const Type *SavedTy = NULL;
if (!Elements.empty()) {
assert(ElementOffsetInBytes.back() <= ByteOffset &&
"Cannot go backwards in struct");
SavedTy = Elements.back();
if (ElementOffsetInBytes.back()+ElementSizeInBytes.back() > ByteOffset) {
uint64_t PoppedOffset = ElementOffsetInBytes.back();
Elements.pop_back();
ElementOffsetInBytes.pop_back();
ElementSizeInBytes.pop_back();
PaddingElement.pop_back();
uint64_t EndOffset = getNewElementByteOffset(1);
if (EndOffset < PoppedOffset) {
const Type *Pad = Type::getInt8Ty(Context);
if (PoppedOffset != EndOffset + 1)
Pad = ArrayType::get(Pad, PoppedOffset - EndOffset);
addElement(Pad, EndOffset, PoppedOffset - EndOffset);
}
}
}
unsigned ByteAlignment = getTypeAlignment(Ty);
uint64_t NextByteOffset = getNewElementByteOffset(ByteAlignment);
if (NextByteOffset > ByteOffset ||
ByteAlignment > getGCCStructAlignmentInBytes()) {
return false;
}
if (NextByteOffset < ByteOffset) {
uint64_t CurOffset = getNewElementByteOffset(1);
const Type *Pad = Type::getInt8Ty(Context);
if (SavedTy && LastFieldStartsAtNonByteBoundry)
addElement(SavedTy, CurOffset, ByteOffset - CurOffset);
else if (ByteOffset - CurOffset != 1)
Pad = ArrayType::get(Pad, ByteOffset - CurOffset);
addElement(Pad, CurOffset, ByteOffset - CurOffset);
}
return true;
}
void RemoveFieldsAfter(unsigned FieldNo) {
Elements.erase(Elements.begin()+FieldNo, Elements.end());
ElementOffsetInBytes.erase(ElementOffsetInBytes.begin()+FieldNo,
ElementOffsetInBytes.end());
ElementSizeInBytes.erase(ElementSizeInBytes.begin()+FieldNo,
ElementSizeInBytes.end());
PaddingElement.erase(PaddingElement.begin()+FieldNo,
PaddingElement.end());
}
uint64_t getNewElementByteOffset(unsigned ByteAlignment) {
if (Elements.empty()) return 0;
uint64_t LastElementEnd =
ElementOffsetInBytes.back() + ElementSizeInBytes.back();
return (LastElementEnd+ByteAlignment-1) & ~(ByteAlignment-1);
}
void addElement(const Type *Ty, uint64_t Offset, uint64_t Size,
bool ExtraPadding = false) {
Elements.push_back(Ty);
ElementOffsetInBytes.push_back(Offset);
ElementSizeInBytes.push_back(Size);
PaddingElement.push_back(ExtraPadding);
lastFieldStartsAtNonByteBoundry(false);
ExtraBitsAvailable = 0;
}
uint64_t getFieldEndOffsetInBytes(unsigned FieldNo) const {
assert(FieldNo < ElementOffsetInBytes.size() && "Invalid field #!");
return ElementOffsetInBytes[FieldNo]+ElementSizeInBytes[FieldNo];
}
uint64_t getEndUnallocatedByte() const {
if (ElementOffsetInBytes.empty()) return 0;
return getFieldEndOffsetInBytes(ElementOffsetInBytes.size()-1);
}
unsigned getLLVMFieldFor(uint64_t FieldOffsetInBits, unsigned &CurFieldNo,
bool isZeroSizeField) {
if (!isZeroSizeField) {
while (CurFieldNo < ElementOffsetInBytes.size() &&
getFieldEndOffsetInBytes(CurFieldNo)*8 <= FieldOffsetInBits)
++CurFieldNo;
if (CurFieldNo < ElementOffsetInBytes.size())
return CurFieldNo;
return ~0U;
}
while (CurFieldNo < ElementOffsetInBytes.size() &&
getFieldEndOffsetInBytes(CurFieldNo)*8 <
FieldOffsetInBits + (ElementSizeInBytes[CurFieldNo] != 0))
++CurFieldNo;
if (CurFieldNo+1 < ElementOffsetInBytes.size() &&
ElementSizeInBytes[CurFieldNo+1] == 0) {
return CurFieldNo++;
}
if (CurFieldNo < ElementOffsetInBytes.size() &&
ElementSizeInBytes[CurFieldNo] == 0) {
return CurFieldNo;
}
assert(0 && "Could not find field!");
return ~0U;
}
void addNewBitField(uint64_t Size, uint64_t Extra,
uint64_t FirstUnallocatedByte);
void dump() const;
};
void StructTypeConversionInfo::addNewBitField(uint64_t Size, uint64_t Extra,
uint64_t FirstUnallocatedByte) {
const Type *NewFieldTy = 0;
uint64_t XSize = Size + Extra;
for (unsigned w = NextPowerOf2(std::min(UINT64_C(64), XSize))/2;
w >= Size && w >= 8; w /= 2) {
if (TD.isIllegalInteger(w))
continue;
const unsigned a = TD.getABIIntegerTypeAlignment(w);
if (FirstUnallocatedByte & (a-1) || a > getGCCStructAlignmentInBytes())
continue;
NewFieldTy = IntegerType::get(Context, w);
break;
}
if (!NewFieldTy) {
if (Size <= 8)
NewFieldTy = Type::getInt8Ty(Context);
else if (Size <= 16)
NewFieldTy = Type::getInt16Ty(Context);
else if (Size <= 32)
NewFieldTy = Type::getInt32Ty(Context);
else {
assert(Size <= 64 && "Bitfield too large!");
NewFieldTy = Type::getInt64Ty(Context);
}
}
unsigned ByteAlignment = getTypeAlignment(NewFieldTy);
if (FirstUnallocatedByte & (ByteAlignment-1) ||
ByteAlignment > getGCCStructAlignmentInBytes()) {
NewFieldTy = ArrayType::get(Type::getInt8Ty(Context), (Size+7)/8);
}
addElement(NewFieldTy, FirstUnallocatedByte, getTypeSize(NewFieldTy));
ExtraBitsAvailable = NewFieldTy->getPrimitiveSizeInBits() - Size;
}
void StructTypeConversionInfo::dump() const {
raw_ostream &OS = outs();
OS << "Info has " << Elements.size() << " fields:\n";
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
OS << " Offset = " << ElementOffsetInBytes[i]
<< " Size = " << ElementSizeInBytes[i]
<< " Type = ";
WriteTypeSymbolic(OS, Elements[i], TheModule);
OS << "\n";
}
OS.flush();
}
std::map<tree, StructTypeConversionInfo *> StructTypeInfoMap;
bool isPaddingElement(tree type, unsigned index) {
StructTypeConversionInfo *Info = StructTypeInfoMap[type];
if (!Info)
return false;
assert ( Info->Elements.size() == Info->PaddingElement.size()
&& "Invalid StructTypeConversionInfo");
assert ( index < Info->PaddingElement.size()
&& "Invalid PaddingElement index");
return Info->PaddingElement[index];
}
void adjustPaddingElement(tree oldtree, tree newtree) {
StructTypeConversionInfo *OldInfo = StructTypeInfoMap[oldtree];
StructTypeConversionInfo *NewInfo = StructTypeInfoMap[newtree];
if (!OldInfo || !NewInfo)
return;
for (unsigned i = 0, size = NewInfo->PaddingElement.size(); i != size; ++i)
NewInfo->PaddingElement[i] = false;
for (unsigned i = 0, size = OldInfo->PaddingElement.size(); i != size; ++i)
OldInfo->PaddingElement[i] = false;
}
static DenseMap<std::pair<tree, unsigned int>, tree > BaseTypesMap;
static tree FixBaseClassField(tree Field) {
tree oldTy = TREE_TYPE(Field);
std::pair<tree, unsigned int> p = std::make_pair(oldTy,
std::min(DECL_ALIGN(Field), TYPE_ALIGN(oldTy)));
tree newTy = BaseTypesMap[p];
if (!newTy) {
newTy = copy_node(oldTy);
tree F2 = 0, prevF2 = 0, F;
for (F = TYPE_FIELDS(oldTy); F; prevF2 = F2, F = TREE_CHAIN(F)) {
if (TREE_CODE(F) == TYPE_DECL)
break;
if (TREE_CODE(F) == FIELD_DECL) {
F2 = copy_node(F);
if (prevF2)
TREE_CHAIN(prevF2) = F2;
else
TYPE_FIELDS(newTy) = F2;
TREE_CHAIN(F2) = 0;
}
}
if (!F || TREE_CODE(F) != TYPE_DECL) {
BaseTypesMap[p] = oldTy;
return oldTy;
}
BaseTypesMap[p] = newTy;
BaseTypesMap[std::make_pair(newTy, 0U)] = oldTy;
llvm_note_type_used(newTy);
TYPE_SIZE(newTy) = DECL_SIZE(Field);
TYPE_SIZE_UNIT(newTy) = DECL_SIZE_UNIT(Field);
if (DECL_ALIGN(Field) < TYPE_ALIGN(newTy))
TYPE_ALIGN(newTy) = DECL_ALIGN(Field);
TYPE_MAIN_VARIANT(newTy) = newTy;
TYPE_STUB_DECL(newTy) = TYPE_STUB_DECL(oldTy);
if (TYPE_NAME(oldTy)) {
const char *p = "anon";
if (TREE_CODE(TYPE_NAME(oldTy)) ==IDENTIFIER_NODE)
p = IDENTIFIER_POINTER(TYPE_NAME(oldTy));
else if (DECL_NAME(TYPE_NAME(oldTy)))
p = IDENTIFIER_POINTER(DECL_NAME(TYPE_NAME(oldTy)));
char *q = (char *)xmalloc(strlen(p)+20);
sprintf(q, "%s.base.%d", p, TYPE_ALIGN(newTy));
TYPE_NAME(newTy) = get_identifier(q);
free(q);
}
}
return newTy;
}
static void FixUpFields(tree type) {
if (TREE_CODE(type)!=RECORD_TYPE)
return;
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
if (TREE_CODE(Field)==FIELD_DECL &&
!DECL_BIT_FIELD_TYPE(Field) &&
TREE_CODE(DECL_FIELD_OFFSET(Field))==INTEGER_CST &&
TREE_CODE(TREE_TYPE(Field))==RECORD_TYPE &&
TYPE_SIZE(TREE_TYPE(Field)) &&
DECL_SIZE(Field) &&
((TREE_CODE(DECL_SIZE(Field))==INTEGER_CST &&
TREE_CODE(TYPE_SIZE(TREE_TYPE(Field)))==INTEGER_CST &&
TREE_INT_CST_LOW(DECL_SIZE(Field)) <
TREE_INT_CST_LOW(TYPE_SIZE(TREE_TYPE(Field)))) ||
(DECL_ALIGN(Field) < TYPE_ALIGN(TREE_TYPE(Field))))) {
tree newType = FixBaseClassField(Field);
if (newType != TREE_TYPE(Field)) {
TREE_TYPE(Field) = newType;
DECL_FIELD_BASE_REPLACED(Field) = 1;
}
}
}
if (TYPE_SIZE (type) && TREE_CODE(TYPE_SIZE(type)) == INTEGER_CST) {
tree size_type = TREE_TYPE(TYPE_SIZE(type));
tree alignm1 = fold_build2(PLUS_EXPR, size_type,
build_int_cst(size_type, TYPE_ALIGN(type)),
fold_build1(NEGATE_EXPR, size_type,
build_int_cst(size_type, 1)));
tree lhs = fold_build2(PLUS_EXPR, size_type, TYPE_SIZE(type), alignm1);
tree rhs = fold_build1(BIT_NOT_EXPR, size_type, alignm1);
TYPE_SIZE(type) = fold_build2(BIT_AND_EXPR, size_type, lhs, rhs);
size_type = TREE_TYPE(TYPE_SIZE_UNIT(type));
alignm1 = fold_build2(PLUS_EXPR, size_type,
build_int_cst(size_type, TYPE_ALIGN_UNIT(type)),
fold_build1(NEGATE_EXPR, size_type,
build_int_cst(size_type, 1)));
lhs = fold_build2(PLUS_EXPR, size_type, TYPE_SIZE_UNIT(type), alignm1);
rhs = fold_build1(BIT_NOT_EXPR, size_type, alignm1);
TYPE_SIZE_UNIT(type) = fold_build2(BIT_AND_EXPR, size_type, lhs, rhs);
}
}
static void RestoreOriginalFields(tree type) {
if (TREE_CODE(type)!=RECORD_TYPE)
return;
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
if (TREE_CODE(Field) == FIELD_DECL) {
if (DECL_FIELD_BASE_REPLACED(Field)) {
tree &oldTy = BaseTypesMap[std::make_pair(TREE_TYPE(Field), 0U)];
assert(oldTy);
TREE_TYPE(Field) = oldTy;
DECL_FIELD_BASE_REPLACED(Field) = 0;
}
}
}
}
bool TypeConverter::DecodeStructFields(tree Field,
StructTypeConversionInfo &Info) {
if (TREE_CODE(Field) != FIELD_DECL ||
TREE_CODE(DECL_FIELD_OFFSET(Field)) != INTEGER_CST)
return true;
if (isBitfield(Field)) {
if (!Info.isPacked()) {
if (!DECL_NAME(Field))
return false;
if (DECL_PACKED(Field))
return false;
if (TYPE_USER_ALIGN(DECL_BIT_FIELD_TYPE(Field))) {
const Type *Ty = ConvertType(getDeclaredType(Field));
if (TYPE_ALIGN(DECL_BIT_FIELD_TYPE(Field)) !=
8 * Info.getTypeAlignment(Ty))
return false;
}
}
DecodeStructBitField(Field, Info);
return true;
}
Info.allFieldsAreNotBitFields();
uint64_t StartOffsetInBits = getFieldOffsetInBits(Field);
assert((StartOffsetInBits & 7) == 0 && "Non-bit-field has non-byte offset!");
uint64_t StartOffsetInBytes = StartOffsetInBits/8;
const Type *Ty = ConvertType(getDeclaredType(Field));
if (DECL_PACKED(Field) && !Info.isPacked())
return false;
else if (!Info.ResizeLastElementIfOverlapsWith(StartOffsetInBytes, Field, Ty)) {
return false;
}
else if (TYPE_USER_ALIGN(TREE_TYPE(Field))
&& (unsigned)DECL_ALIGN(Field) != 8 * Info.getTypeAlignment(Ty)
&& !Info.isPacked()) {
return false;
} else
Info.addElement(Ty, StartOffsetInBytes, Info.getTypeSize(Ty));
return true;
}
void TypeConverter::DecodeStructBitField(tree_node *Field,
StructTypeConversionInfo &Info) {
unsigned FieldSizeInBits = TREE_INT_CST_LOW(DECL_SIZE(Field));
if (FieldSizeInBits == 0) return;
uint64_t StartOffsetInBits = getFieldOffsetInBits(Field);
uint64_t EndBitOffset = FieldSizeInBits+StartOffsetInBits;
if (!Info.Elements.empty()) {
uint64_t LastFieldBitOffset = Info.ElementOffsetInBytes.back()*8;
unsigned LastFieldBitSize = Info.ElementSizeInBytes.back()*8;
assert(LastFieldBitOffset <= StartOffsetInBits &&
"This bitfield isn't part of the last field!");
if (EndBitOffset <= LastFieldBitOffset+LastFieldBitSize &&
LastFieldBitOffset+LastFieldBitSize >= StartOffsetInBits) {
Info.extraBitsAvailable(Info.getEndUnallocatedByte()*8 - EndBitOffset);
return;
}
}
unsigned ExtraSizeInBits = 0;
tree LastBitField = 0;
for (tree f = TREE_CHAIN(Field); f; f = TREE_CHAIN(f)) {
if (TREE_CODE(f) != FIELD_DECL ||
TREE_CODE(DECL_FIELD_OFFSET(f)) != INTEGER_CST)
break;
if (isBitfield(f))
LastBitField = f;
else {
LastBitField = 0;
ExtraSizeInBits = getFieldOffsetInBits(f) - EndBitOffset;
break;
}
}
if (LastBitField)
ExtraSizeInBits = RoundUpToAlignment(getFieldOffsetInBits(LastBitField) +
TREE_INT_CST_LOW(DECL_SIZE(LastBitField)), 8) - EndBitOffset;
uint64_t FirstUnallocatedByte = Info.getEndUnallocatedByte();
uint64_t StartOffsetFromByteBoundry = StartOffsetInBits & 7;
if (StartOffsetInBits < FirstUnallocatedByte*8) {
uint64_t AvailableBits = FirstUnallocatedByte * 8 - StartOffsetInBits;
if (StartOffsetFromByteBoundry == 0) {
unsigned NumBitsToAdd = FieldSizeInBits - AvailableBits;
Info.addNewBitField(NumBitsToAdd, ExtraSizeInBits, FirstUnallocatedByte);
return;
}
unsigned prevFieldTypeSizeInBits =
Info.ElementSizeInBytes[Info.Elements.size() - 1] * 8;
unsigned NumBitsRequired = prevFieldTypeSizeInBits
+ (FieldSizeInBits - AvailableBits);
if (NumBitsRequired > 64) {
NumBitsRequired = FieldSizeInBits - AvailableBits;
} else {
Info.RemoveFieldsAfter(Info.Elements.size() - 1);
for (unsigned idx = 0; idx < (prevFieldTypeSizeInBits/8); ++idx)
FirstUnallocatedByte--;
}
Info.addNewBitField(NumBitsRequired, ExtraSizeInBits, FirstUnallocatedByte);
Info.lastFieldStartsAtNonByteBoundry(true);
return;
}
if (StartOffsetInBits > FirstUnallocatedByte*8) {
unsigned PadBytes = 0;
unsigned PadBits = 0;
if (StartOffsetFromByteBoundry != 0) {
PadBits = StartOffsetInBits - (FirstUnallocatedByte*8);
PadBytes = PadBits/8;
PadBits = PadBits - PadBytes*8;
} else
PadBytes = StartOffsetInBits/8-FirstUnallocatedByte;
if (PadBytes) {
const Type *Pad = Type::getInt8Ty(Context);
if (PadBytes != 1)
Pad = ArrayType::get(Pad, PadBytes);
Info.addElement(Pad, FirstUnallocatedByte, PadBytes);
}
FirstUnallocatedByte = StartOffsetInBits/8;
if (StartOffsetFromByteBoundry != 0)
FieldSizeInBits += PadBits;
}
Info.addNewBitField(FieldSizeInBits, ExtraSizeInBits, FirstUnallocatedByte);
}
static bool UnionHasOnlyZeroOffsets(tree type) {
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
if (TREE_CODE(Field) != FIELD_DECL) continue;
if (getFieldOffsetInBits(Field) != 0)
return false;
}
return true;
}
void TypeConverter::SelectUnionMember(tree type,
StructTypeConversionInfo &Info) {
const Type *UnionTy = 0;
tree GccUnionTy = 0;
tree UnionField = 0;
unsigned MaxAlignSize = 0, MaxAlign = 0;
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
if (TREE_CODE(Field) != FIELD_DECL) continue;
assert(getFieldOffsetInBits(Field) == 0 && "Union with non-zero offset?");
if (TREE_CODE(type) == QUAL_UNION_TYPE &&
integer_zerop(DECL_QUALIFIER(Field)))
continue;
tree TheGccTy = TREE_TYPE(Field);
if (DECL_SIZE(Field) &&
TREE_CODE(DECL_SIZE(Field)) == INTEGER_CST &&
TREE_INT_CST_LOW(DECL_SIZE(Field)) == 0)
continue;
const Type *TheTy = ConvertType(TheGccTy);
unsigned Size = Info.getTypeSize(TheTy);
unsigned Align = Info.getTypeAlignment(TheTy);
adjustPaddingElement(GccUnionTy, TheGccTy);
if (UnionTy == 0 || Align > MaxAlign ||
(Align == MaxAlign && Size > MaxAlignSize)) {
UnionTy = TheTy;
UnionField = Field;
GccUnionTy = TheGccTy;
MaxAlignSize = Size;
MaxAlign = Align;
}
if (TREE_CODE(type) == QUAL_UNION_TYPE &&
integer_onep(DECL_QUALIFIER(Field)))
break;
}
if (UnionTy) { if (8 * Info.getTypeAlignment(UnionTy) > TYPE_ALIGN(type))
Info.markAsPacked();
if (isBitfield(UnionField)) {
unsigned FieldSizeInBits = TREE_INT_CST_LOW(DECL_SIZE(UnionField));
Info.addNewBitField(FieldSizeInBits, 0, 0);
} else {
Info.allFieldsAreNotBitFields();
Info.addElement(UnionTy, 0, Info.getTypeSize(UnionTy));
}
}
}
const Type *TypeConverter::ConvertRECORD(tree type, tree orig_type) {
if (const Type *Ty = GET_TYPE_LLVM(type)) {
if (!Ty->isOpaqueTy() || TYPE_SIZE(type) == 0)
return Ty;
}
bool IsStruct = (TREE_CODE(type) == RECORD_TYPE);
if (TYPE_SIZE(type) == 0) { const Type *Ty = OpaqueType::get(Context);
TheModule->addTypeName(GetTypeName(IsStruct ? "struct." : "union.",
orig_type), Ty);
return TypeDB.setType(type, Ty);
}
bool OldConvertingStruct = ConvertingStruct;
ConvertingStruct = true;
StructTypeConversionInfo *Info =
new StructTypeConversionInfo(*TheTarget, TYPE_ALIGN(type) / 8,
TYPE_PACKED(type));
if (IsStruct)
FixUpFields(type);
bool HasOnlyZeroOffsets = (!IsStruct && UnionHasOnlyZeroOffsets(type));
if (HasOnlyZeroOffsets) {
SelectUnionMember(type, *Info);
} else {
bool retryAsPackedStruct = false;
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
if (DecodeStructFields(Field, *Info) == false) {
retryAsPackedStruct = true;
break;
}
}
if (retryAsPackedStruct) {
delete Info;
Info = new StructTypeConversionInfo(*TheTarget, TYPE_ALIGN(type) / 8,
true);
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
if (DecodeStructFields(Field, *Info) == false) {
assert(0 && "Unable to decode struct fields.");
}
}
}
}
if (TYPE_SIZE(type) && TREE_CODE(TYPE_SIZE(type)) == INTEGER_CST) {
uint64_t GCCTypeSize = getInt64(TYPE_SIZE_UNIT(type), true);
uint64_t LLVMStructSize = Info->getSizeAsLLVMStruct();
if (LLVMStructSize > GCCTypeSize) {
Info->RemoveExtraBytes();
LLVMStructSize = Info->getSizeAsLLVMStruct();
}
if (LLVMStructSize != GCCTypeSize) {
assert(LLVMStructSize < GCCTypeSize &&
"LLVM type size doesn't match GCC type size!");
uint64_t LLVMLastElementEnd = Info->getNewElementByteOffset(1);
if (GCCTypeSize-LLVMLastElementEnd == 1)
Info->addElement(Type::getInt8Ty(Context), 1, 1);
else {
if (((GCCTypeSize-LLVMStructSize) % 4) == 0 &&
(Info->getAlignmentAsLLVMStruct() %
Info->getTypeAlignment(Type::getInt32Ty(Context))) == 0) {
unsigned Int32ArraySize = (GCCTypeSize-LLVMStructSize) / 4;
const Type *PadTy =
ArrayType::get(Type::getInt32Ty(Context), Int32ArraySize);
Info->addElement(PadTy, GCCTypeSize - LLVMLastElementEnd,
Int32ArraySize, true );
} else {
const Type *PadTy = ArrayType::get(Type::getInt8Ty(Context),
GCCTypeSize-LLVMStructSize);
Info->addElement(PadTy, GCCTypeSize - LLVMLastElementEnd,
GCCTypeSize - LLVMLastElementEnd,
true );
}
}
}
} else
Info->RemoveExtraBytes();
unsigned CurFieldNo = 0;
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field))
if (TREE_CODE(Field) == FIELD_DECL &&
TREE_CODE(DECL_FIELD_OFFSET(Field)) == INTEGER_CST) {
if (HasOnlyZeroOffsets) {
SET_LLVM_FIELD_INDEX(Field, 0);
} else {
uint64_t FieldOffsetInBits = getFieldOffsetInBits(Field);
tree FieldType = getDeclaredType(Field);
const Type *FieldTy = ConvertType(FieldType);
if (isBitfield(Field)) {
unsigned BitAlignment = Info->getTypeAlignment(FieldTy)*8;
FieldOffsetInBits &= ~(BitAlignment-1ULL);
CurFieldNo = 0;
if (integer_zerop(DECL_SIZE(Field)))
continue;
}
bool isZeroSizeField = FieldTy->isSized() &&
getTargetData().getTypeSizeInBits(FieldTy) == 0;
unsigned FieldNo =
Info->getLLVMFieldFor(FieldOffsetInBits, CurFieldNo, isZeroSizeField);
SET_LLVM_FIELD_INDEX(Field, FieldNo);
assert((isBitfield(Field) || FieldNo == ~0U ||
FieldOffsetInBits == 8*Info->ElementOffsetInBytes[FieldNo]) &&
"Wrong LLVM field offset!");
}
}
if (IsStruct)
RestoreOriginalFields(type);
const Type *ResultTy = Info->getLLVMType();
StructTypeInfoMap[type] = Info;
const OpaqueType *OldTy = cast_or_null<OpaqueType>(GET_TYPE_LLVM(type));
TypeDB.setType(type, ResultTy);
if (OldTy)
const_cast<OpaqueType*>(OldTy)->refineAbstractTypeTo(ResultTy);
TheModule->addTypeName(GetTypeName(IsStruct ? "struct." : "union.",
orig_type), GET_TYPE_LLVM(type));
ConvertingStruct = OldConvertingStruct;
if (!ConvertingStruct) {
while (!PointersToReresolve.empty()) {
if (tree PtrTy = PointersToReresolve.back()) {
ConvertType(PtrTy); assert((PointersToReresolve.empty() ||
PointersToReresolve.back() != PtrTy) &&
"Something went wrong with pointer resolution!");
} else {
PointersToReresolve.pop_back();
}
}
}
return GET_TYPE_LLVM(type);
}