#define DEBUG_TYPE "sroa"
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/PtrUseVisitor.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/DIBuilder.h"
#include "llvm/DebugInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Operator.h"
#include "llvm/InstVisitor.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
using namespace llvm;
STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement");
STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced");
STATISTIC(NumPromoted, "Number of allocas promoted to SSA values");
STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion");
STATISTIC(NumDeleted, "Number of instructions deleted");
STATISTIC(NumVectorized, "Number of vectorized aggregates");
static cl::opt<bool>
ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden);
namespace {
template <bool preserveNames = true>
class IRBuilderPrefixedInserter :
public IRBuilderDefaultInserter<preserveNames> {
std::string Prefix;
public:
void SetNamePrefix(const Twine &P) { Prefix = P.str(); }
protected:
void InsertHelper(Instruction *I, const Twine &Name, BasicBlock *BB,
BasicBlock::iterator InsertPt) const {
IRBuilderDefaultInserter<preserveNames>::InsertHelper(
I, Name.isTriviallyEmpty() ? Name : Prefix + Name, BB, InsertPt);
}
};
template <>
class IRBuilderPrefixedInserter<false> :
public IRBuilderDefaultInserter<false> {
public:
void SetNamePrefix(const Twine &P) {}
};
#ifndef NDEBUG
typedef llvm::IRBuilder<true, ConstantFolder,
IRBuilderPrefixedInserter<true> > IRBuilderTy;
#else
typedef llvm::IRBuilder<false, ConstantFolder,
IRBuilderPrefixedInserter<false> > IRBuilderTy;
#endif
}
namespace {
struct ByteRange {
uint64_t BeginOffset;
uint64_t EndOffset;
ByteRange() : BeginOffset(), EndOffset() {}
ByteRange(uint64_t BeginOffset, uint64_t EndOffset)
: BeginOffset(BeginOffset), EndOffset(EndOffset) {}
bool operator<(const ByteRange &RHS) const {
if (BeginOffset < RHS.BeginOffset) return true;
if (BeginOffset > RHS.BeginOffset) return false;
if (EndOffset > RHS.EndOffset) return true;
return false;
}
friend bool operator<(const ByteRange &LHS, uint64_t RHSOffset) {
return LHS.BeginOffset < RHSOffset;
}
friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset,
const ByteRange &RHS) {
return LHSOffset < RHS.BeginOffset;
}
bool operator==(const ByteRange &RHS) const {
return BeginOffset == RHS.BeginOffset && EndOffset == RHS.EndOffset;
}
bool operator!=(const ByteRange &RHS) const { return !operator==(RHS); }
};
struct Partition : public ByteRange {
bool IsSplittable;
bool isDead() const {
if (BeginOffset == UINT64_MAX) {
assert(EndOffset == UINT64_MAX);
return true;
}
return false;
}
void kill() {
assert(!isDead() && "He's Dead, Jim!");
BeginOffset = EndOffset = UINT64_MAX;
}
Partition() : ByteRange(), IsSplittable() {}
Partition(uint64_t BeginOffset, uint64_t EndOffset, bool IsSplittable)
: ByteRange(BeginOffset, EndOffset), IsSplittable(IsSplittable) {}
};
class PartitionUse : public ByteRange {
PointerIntPair<Use*, 1, bool> UsePtrAndIsSplit;
public:
PartitionUse() : ByteRange(), UsePtrAndIsSplit() {}
PartitionUse(uint64_t BeginOffset, uint64_t EndOffset, Use *U,
bool IsSplit)
: ByteRange(BeginOffset, EndOffset), UsePtrAndIsSplit(U, IsSplit) {}
Use *getUse() const { return UsePtrAndIsSplit.getPointer(); }
void setUse(Use *U) { UsePtrAndIsSplit.setPointer(U); }
bool isSplit() const { return UsePtrAndIsSplit.getInt(); }
};
}
namespace llvm {
template <> struct isPodLike<Partition> : llvm::true_type {};
template <> struct isPodLike<PartitionUse> : llvm::true_type {};
}
namespace {
class AllocaPartitioning {
public:
AllocaPartitioning(const DataLayout &TD, AllocaInst &AI);
bool isEscaped() const { return PointerEscapingInstr; }
typedef SmallVectorImpl<Partition>::iterator iterator;
iterator begin() { return Partitions.begin(); }
iterator end() { return Partitions.end(); }
typedef SmallVectorImpl<Partition>::const_iterator const_iterator;
const_iterator begin() const { return Partitions.begin(); }
const_iterator end() const { return Partitions.end(); }
typedef SmallVectorImpl<PartitionUse>::iterator use_iterator;
use_iterator use_begin(unsigned Idx) { return Uses[Idx].begin(); }
use_iterator use_begin(const_iterator I) { return Uses[I - begin()].begin(); }
use_iterator use_end(unsigned Idx) { return Uses[Idx].end(); }
use_iterator use_end(const_iterator I) { return Uses[I - begin()].end(); }
typedef SmallVectorImpl<PartitionUse>::const_iterator const_use_iterator;
const_use_iterator use_begin(unsigned Idx) const { return Uses[Idx].begin(); }
const_use_iterator use_begin(const_iterator I) const {
return Uses[I - begin()].begin();
}
const_use_iterator use_end(unsigned Idx) const { return Uses[Idx].end(); }
const_use_iterator use_end(const_iterator I) const {
return Uses[I - begin()].end();
}
unsigned use_size(unsigned Idx) const { return Uses[Idx].size(); }
unsigned use_size(const_iterator I) const { return Uses[I - begin()].size(); }
const PartitionUse &getUse(unsigned PIdx, unsigned UIdx) const {
return Uses[PIdx][UIdx];
}
const PartitionUse &getUse(const_iterator I, unsigned UIdx) const {
return Uses[I - begin()][UIdx];
}
void use_push_back(unsigned Idx, const PartitionUse &PU) {
Uses[Idx].push_back(PU);
}
void use_push_back(const_iterator I, const PartitionUse &PU) {
Uses[I - begin()].push_back(PU);
}
typedef SmallVectorImpl<Instruction *>::const_iterator dead_user_iterator;
dead_user_iterator dead_user_begin() const { return DeadUsers.begin(); }
dead_user_iterator dead_user_end() const { return DeadUsers.end(); }
typedef SmallVectorImpl<Use *>::const_iterator dead_op_iterator;
dead_op_iterator dead_op_begin() const { return DeadOperands.begin(); }
dead_op_iterator dead_op_end() const { return DeadOperands.end(); }
struct MemTransferOffsets {
uint64_t DestBegin, DestEnd;
uint64_t SourceBegin, SourceEnd;
bool IsSplittable;
};
MemTransferOffsets getMemTransferOffsets(MemTransferInst &II) const {
return MemTransferInstData.lookup(&II);
}
iterator findPartitionForPHIOrSelectOperand(Use *U) {
SmallDenseMap<Use *, std::pair<unsigned, unsigned> >::const_iterator MapIt
= PHIOrSelectOpMap.find(U);
if (MapIt == PHIOrSelectOpMap.end())
return end();
return begin() + MapIt->second.first;
}
use_iterator findPartitionUseForPHIOrSelectOperand(Use *U) {
SmallDenseMap<Use *, std::pair<unsigned, unsigned> >::const_iterator MapIt
= PHIOrSelectOpMap.find(U);
assert(MapIt != PHIOrSelectOpMap.end());
return Uses[MapIt->second.first].begin() + MapIt->second.second;
}
Type *getCommonType(iterator I) const;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const;
void printUsers(raw_ostream &OS, const_iterator I,
StringRef Indent = " ") const;
void print(raw_ostream &OS) const;
void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump(const_iterator I) const;
void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump() const;
#endif
private:
template <typename DerivedT, typename RetT = void> class BuilderBase;
class PartitionBuilder;
friend class AllocaPartitioning::PartitionBuilder;
class UseBuilder;
friend class AllocaPartitioning::UseBuilder;
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
AllocaInst &AI;
#endif
Instruction *PointerEscapingInstr;
SmallVector<Partition, 8> Partitions;
SmallVector<SmallVector<PartitionUse, 2>, 8> Uses;
SmallVector<Instruction *, 8> DeadUsers;
SmallVector<Use *, 8> DeadOperands;
SmallDenseMap<MemTransferInst *, MemTransferOffsets, 4> MemTransferInstData;
SmallDenseMap<Instruction *, std::pair<uint64_t, bool> > PHIOrSelectSizes;
SmallDenseMap<Use *, std::pair<unsigned, unsigned>, 4> PHIOrSelectOpMap;
void splitAndMergePartitions();
};
}
static Value *foldSelectInst(SelectInst &SI) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(SI.getCondition()))
return SI.getOperand(1+CI->isZero());
if (SI.getOperand(1) == SI.getOperand(2))
return SI.getOperand(1);
return 0;
}
class AllocaPartitioning::PartitionBuilder
: public PtrUseVisitor<PartitionBuilder> {
friend class PtrUseVisitor<PartitionBuilder>;
friend class InstVisitor<PartitionBuilder>;
typedef PtrUseVisitor<PartitionBuilder> Base;
const uint64_t AllocSize;
AllocaPartitioning &P;
SmallDenseMap<Instruction *, unsigned> MemTransferPartitionMap;
public:
PartitionBuilder(const DataLayout &DL, AllocaInst &AI, AllocaPartitioning &P)
: PtrUseVisitor<PartitionBuilder>(DL),
AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())),
P(P) {}
private:
void insertUse(Instruction &I, const APInt &Offset, uint64_t Size,
bool IsSplittable = false) {
if (Size == 0 || Offset.isNegative() || Offset.uge(AllocSize)) {
DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
<< " which has zero size or starts outside of the "
<< AllocSize << " byte alloca:\n"
<< " alloca: " << P.AI << "\n"
<< " use: " << I << "\n");
return;
}
uint64_t BeginOffset = Offset.getZExtValue();
uint64_t EndOffset = BeginOffset + Size;
assert(AllocSize >= BeginOffset); if (Size > AllocSize - BeginOffset) {
DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
<< " to remain within the " << AllocSize << " byte alloca:\n"
<< " alloca: " << P.AI << "\n"
<< " use: " << I << "\n");
EndOffset = AllocSize;
}
Partition New(BeginOffset, EndOffset, IsSplittable);
P.Partitions.push_back(New);
}
void handleLoadOrStore(Type *Ty, Instruction &I, const APInt &Offset,
uint64_t Size, bool IsVolatile) {
bool IsSplittable =
Ty->isIntegerTy() && !IsVolatile && Offset == 0 && Size >= AllocSize;
insertUse(I, Offset, Size, IsSplittable);
}
void visitLoadInst(LoadInst &LI) {
assert((!LI.isSimple() || LI.getType()->isSingleValueType()) &&
"All simple FCA loads should have been pre-split");
if (!IsOffsetKnown)
return PI.setAborted(&LI);
uint64_t Size = DL.getTypeStoreSize(LI.getType());
return handleLoadOrStore(LI.getType(), LI, Offset, Size, LI.isVolatile());
}
void visitStoreInst(StoreInst &SI) {
Value *ValOp = SI.getValueOperand();
if (ValOp == *U)
return PI.setEscapedAndAborted(&SI);
if (!IsOffsetKnown)
return PI.setAborted(&SI);
uint64_t Size = DL.getTypeStoreSize(ValOp->getType());
if (Offset.isNegative() || Size > AllocSize ||
Offset.ugt(AllocSize - Size)) {
DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset
<< " which extends past the end of the " << AllocSize
<< " byte alloca:\n"
<< " alloca: " << P.AI << "\n"
<< " use: " << SI << "\n");
return;
}
assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) &&
"All simple FCA stores should have been pre-split");
handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile());
}
void visitMemSetInst(MemSetInst &II) {
assert(II.getRawDest() == *U && "Pointer use is not the destination?");
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
if ((Length && Length->getValue() == 0) ||
(IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
return;
if (!IsOffsetKnown)
return PI.setAborted(&II);
insertUse(II, Offset,
Length ? Length->getLimitedValue()
: AllocSize - Offset.getLimitedValue(),
(bool)Length);
}
void visitMemTransferInst(MemTransferInst &II) {
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
if ((Length && Length->getValue() == 0) ||
(IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
return;
if (!IsOffsetKnown)
return PI.setAborted(&II);
uint64_t RawOffset = Offset.getLimitedValue();
uint64_t Size = Length ? Length->getLimitedValue()
: AllocSize - RawOffset;
MemTransferOffsets &Offsets = P.MemTransferInstData[&II];
Offsets.IsSplittable = Length;
if (*U == II.getRawDest()) {
Offsets.DestBegin = RawOffset;
Offsets.DestEnd = RawOffset + Size;
}
if (*U == II.getRawSource()) {
Offsets.SourceBegin = RawOffset;
Offsets.SourceEnd = RawOffset + Size;
}
bool SeenBothEnds = Offsets.SourceEnd && Offsets.DestEnd;
if (SeenBothEnds && II.getRawDest() != II.getRawSource()) {
unsigned PrevIdx = MemTransferPartitionMap[&II];
if (!II.isVolatile() && Offsets.SourceBegin == Offsets.DestBegin) {
P.Partitions[PrevIdx].kill();
return;
}
P.Partitions[PrevIdx].IsSplittable = Offsets.IsSplittable = false;
} else if (SeenBothEnds) {
assert(II.getRawDest() == II.getRawSource());
if (!II.isVolatile())
return;
Offsets.IsSplittable = false;
}
insertUse(II, Offset, Size, Offsets.IsSplittable);
if (!SeenBothEnds) {
unsigned NewIdx = P.Partitions.size() - 1;
bool Inserted
= MemTransferPartitionMap.insert(std::make_pair(&II, NewIdx)).second;
assert(Inserted &&
"Already have intrinsic in map but haven't seen both ends");
(void)Inserted;
}
}
void visitIntrinsicInst(IntrinsicInst &II) {
if (!IsOffsetKnown)
return PI.setAborted(&II);
if (II.getIntrinsicID() == Intrinsic::lifetime_start ||
II.getIntrinsicID() == Intrinsic::lifetime_end) {
ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
uint64_t Size = std::min(AllocSize - Offset.getLimitedValue(),
Length->getLimitedValue());
insertUse(II, Offset, Size, true);
return;
}
Base::visitIntrinsicInst(II);
}
Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) {
SmallPtrSet<Instruction *, 4> Visited;
SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses;
Visited.insert(Root);
Uses.push_back(std::make_pair(cast<Instruction>(*U), Root));
Size = 0;
do {
Instruction *I, *UsedI;
llvm::tie(UsedI, I) = Uses.pop_back_val();
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
Size = std::max(Size, DL.getTypeStoreSize(LI->getType()));
continue;
}
if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
Value *Op = SI->getOperand(0);
if (Op == UsedI)
return SI;
Size = std::max(Size, DL.getTypeStoreSize(Op->getType()));
continue;
}
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
if (!GEP->hasAllZeroIndices())
return GEP;
} else if (!isa<BitCastInst>(I) && !isa<PHINode>(I) &&
!isa<SelectInst>(I)) {
return I;
}
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
++UI)
if (Visited.insert(cast<Instruction>(*UI)))
Uses.push_back(std::make_pair(I, cast<Instruction>(*UI)));
} while (!Uses.empty());
return 0;
}
void visitPHINode(PHINode &PN) {
if (PN.use_empty())
return;
if (!IsOffsetKnown)
return PI.setAborted(&PN);
std::pair<uint64_t, bool> &PHIInfo = P.PHIOrSelectSizes[&PN];
if (PHIInfo.first) {
PHIInfo.second = true;
insertUse(PN, Offset, PHIInfo.first);
return;
}
if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&PN, PHIInfo.first))
return PI.setAborted(UnsafeI);
insertUse(PN, Offset, PHIInfo.first);
}
void visitSelectInst(SelectInst &SI) {
if (SI.use_empty())
return;
if (Value *Result = foldSelectInst(SI)) {
if (Result == *U)
enqueueUsers(SI);
return;
}
if (!IsOffsetKnown)
return PI.setAborted(&SI);
std::pair<uint64_t, bool> &SelectInfo = P.PHIOrSelectSizes[&SI];
if (SelectInfo.first) {
SelectInfo.second = true;
insertUse(SI, Offset, SelectInfo.first);
return;
}
if (Instruction *UnsafeI = hasUnsafePHIOrSelectUse(&SI, SelectInfo.first))
return PI.setAborted(UnsafeI);
insertUse(SI, Offset, SelectInfo.first);
}
void visitInstruction(Instruction &I) {
PI.setAborted(&I);
}
};
class AllocaPartitioning::UseBuilder : public PtrUseVisitor<UseBuilder> {
friend class PtrUseVisitor<UseBuilder>;
friend class InstVisitor<UseBuilder>;
typedef PtrUseVisitor<UseBuilder> Base;
const uint64_t AllocSize;
AllocaPartitioning &P;
SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
public:
UseBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
: PtrUseVisitor<UseBuilder>(TD),
AllocSize(TD.getTypeAllocSize(AI.getAllocatedType())),
P(P) {}
private:
void markAsDead(Instruction &I) {
if (VisitedDeadInsts.insert(&I))
P.DeadUsers.push_back(&I);
}
void insertUse(Instruction &User, const APInt &Offset, uint64_t Size) {
if (Size == 0 || Offset.isNegative() || Offset.uge(AllocSize))
return markAsDead(User);
uint64_t BeginOffset = Offset.getZExtValue();
uint64_t EndOffset = BeginOffset + Size;
assert(AllocSize >= BeginOffset); if (Size > AllocSize - BeginOffset)
EndOffset = AllocSize;
iterator I = std::lower_bound(P.begin(), P.end(), BeginOffset);
if (I != P.begin() && llvm::prior(I)->EndOffset > BeginOffset)
I = llvm::prior(I);
iterator E = P.end();
bool IsSplit = llvm::next(I) != E && llvm::next(I)->BeginOffset < EndOffset;
for (; I != E && I->BeginOffset < EndOffset; ++I) {
PartitionUse NewPU(std::max(I->BeginOffset, BeginOffset),
std::min(I->EndOffset, EndOffset), U, IsSplit);
P.use_push_back(I, NewPU);
if (isa<PHINode>(U->getUser()) || isa<SelectInst>(U->getUser()))
P.PHIOrSelectOpMap[U]
= std::make_pair(I - P.begin(), P.Uses[I - P.begin()].size() - 1);
}
}
void visitBitCastInst(BitCastInst &BC) {
if (BC.use_empty())
return markAsDead(BC);
return Base::visitBitCastInst(BC);
}
void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
if (GEPI.use_empty())
return markAsDead(GEPI);
return Base::visitGetElementPtrInst(GEPI);
}
void visitLoadInst(LoadInst &LI) {
assert(IsOffsetKnown);
uint64_t Size = DL.getTypeStoreSize(LI.getType());
insertUse(LI, Offset, Size);
}
void visitStoreInst(StoreInst &SI) {
assert(IsOffsetKnown);
uint64_t Size = DL.getTypeStoreSize(SI.getOperand(0)->getType());
if (Offset.isNegative() || Size > AllocSize ||
Offset.ugt(AllocSize - Size))
return markAsDead(SI);
insertUse(SI, Offset, Size);
}
void visitMemSetInst(MemSetInst &II) {
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
if ((Length && Length->getValue() == 0) ||
(IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
return markAsDead(II);
assert(IsOffsetKnown);
insertUse(II, Offset, Length ? Length->getLimitedValue()
: AllocSize - Offset.getLimitedValue());
}
void visitMemTransferInst(MemTransferInst &II) {
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
if ((Length && Length->getValue() == 0) ||
(IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize)))
return markAsDead(II);
assert(IsOffsetKnown);
uint64_t Size = Length ? Length->getLimitedValue()
: AllocSize - Offset.getLimitedValue();
MemTransferOffsets &Offsets = P.MemTransferInstData[&II];
if (!II.isVolatile() && Offsets.DestEnd && Offsets.SourceEnd &&
Offsets.DestBegin == Offsets.SourceBegin)
return markAsDead(II);
insertUse(II, Offset, Size);
}
void visitIntrinsicInst(IntrinsicInst &II) {
assert(IsOffsetKnown);
assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
II.getIntrinsicID() == Intrinsic::lifetime_end);
ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
insertUse(II, Offset, std::min(Length->getLimitedValue(),
AllocSize - Offset.getLimitedValue()));
}
void insertPHIOrSelect(Instruction &User, const APInt &Offset) {
uint64_t Size = P.PHIOrSelectSizes.lookup(&User).first;
if ((Offset.isNegative() && Offset.uge(Size)) ||
(!Offset.isNegative() && Offset.uge(AllocSize))) {
P.DeadOperands.push_back(U);
return;
}
insertUse(User, Offset, Size);
}
void visitPHINode(PHINode &PN) {
if (PN.use_empty())
return markAsDead(PN);
assert(IsOffsetKnown);
insertPHIOrSelect(PN, Offset);
}
void visitSelectInst(SelectInst &SI) {
if (SI.use_empty())
return markAsDead(SI);
if (Value *Result = foldSelectInst(SI)) {
if (Result == *U)
enqueueUsers(SI);
else
P.DeadOperands.push_back(U);
return;
}
assert(IsOffsetKnown);
insertPHIOrSelect(SI, Offset);
}
void visitInstruction(Instruction &I) {
llvm_unreachable("Unhandled instruction in use builder.");
}
};
void AllocaPartitioning::splitAndMergePartitions() {
size_t NumDeadPartitions = 0;
uint64_t SplitEndOffset = 0ull;
Partition New(0ull, 0ull, false);
for (unsigned i = 0, j = i, e = Partitions.size(); i != e; i = j) {
++j;
if (!Partitions[i].IsSplittable || New.BeginOffset == New.EndOffset) {
assert(New.BeginOffset == New.EndOffset);
New = Partitions[i];
} else {
assert(New.IsSplittable);
New.EndOffset = std::max(New.EndOffset, Partitions[i].EndOffset);
}
assert(New.BeginOffset != New.EndOffset);
while (j != e && New.EndOffset > Partitions[j].BeginOffset) {
if (New.IsSplittable && !Partitions[j].IsSplittable)
break;
if (New.IsSplittable == Partitions[j].IsSplittable) {
New.EndOffset = std::max(New.EndOffset, Partitions[j].EndOffset);
} else {
assert(!New.IsSplittable);
assert(Partitions[j].IsSplittable);
SplitEndOffset = std::max(SplitEndOffset, Partitions[j].EndOffset);
}
Partitions[j].kill();
++NumDeadPartitions;
++j;
}
if (j != e && New.IsSplittable) {
SplitEndOffset = std::max(SplitEndOffset, New.EndOffset);
New.EndOffset = std::min(New.EndOffset, Partitions[j].BeginOffset);
}
if (New != Partitions[i]) {
if (New.BeginOffset != New.EndOffset)
Partitions.push_back(New);
Partitions[i].kill();
++NumDeadPartitions;
}
New.BeginOffset = New.EndOffset;
if (!New.IsSplittable) {
New.EndOffset = std::max(New.EndOffset, SplitEndOffset);
if (j != e && !Partitions[j].IsSplittable)
New.EndOffset = std::min(New.EndOffset, Partitions[j].BeginOffset);
New.IsSplittable = true;
if (New.BeginOffset < New.EndOffset &&
(j == e || !Partitions[j].IsSplittable ||
New.EndOffset < Partitions[j].BeginOffset)) {
Partitions.push_back(New);
New.BeginOffset = New.EndOffset = 0ull;
}
}
}
std::sort(Partitions.begin(), Partitions.end());
if (NumDeadPartitions) {
assert(Partitions.back().isDead());
assert((ptrdiff_t)NumDeadPartitions ==
std::count(Partitions.begin(), Partitions.end(), Partitions.back()));
}
Partitions.erase(Partitions.end() - NumDeadPartitions, Partitions.end());
}
AllocaPartitioning::AllocaPartitioning(const DataLayout &TD, AllocaInst &AI)
:
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
AI(AI),
#endif
PointerEscapingInstr(0) {
PartitionBuilder PB(TD, AI, *this);
PartitionBuilder::PtrInfo PtrI = PB.visitPtr(AI);
if (PtrI.isEscaped() || PtrI.isAborted()) {
PointerEscapingInstr = PtrI.getEscapingInst() ? PtrI.getEscapingInst()
: PtrI.getAbortingInst();
assert(PointerEscapingInstr && "Did not track a bad instruction");
return;
}
std::sort(Partitions.begin(), Partitions.end());
while (!Partitions.empty() && Partitions.back().isDead())
Partitions.pop_back();
if (Partitions.size() > 1) {
for (iterator I = Partitions.begin(), J = I, E = Partitions.end(); I != E;
I = J) {
++J;
while (J != E && *I == *J) {
I->IsSplittable &= J->IsSplittable;
++J;
}
}
Partitions.erase(std::unique(Partitions.begin(), Partitions.end()),
Partitions.end());
splitAndMergePartitions();
}
Uses.resize(Partitions.size());
UseBuilder UB(TD, AI, *this);
PtrI = UB.visitPtr(AI);
assert(!PtrI.isEscaped() && "Previously analyzed pointer now escapes!");
assert(!PtrI.isAborted() && "Early aborted the visit of the pointer.");
}
Type *AllocaPartitioning::getCommonType(iterator I) const {
Type *Ty = 0;
for (const_use_iterator UI = use_begin(I), UE = use_end(I); UI != UE; ++UI) {
Use *U = UI->getUse();
if (!U)
continue; if (isa<IntrinsicInst>(*U->getUser()))
continue;
if (UI->BeginOffset != I->BeginOffset || UI->EndOffset != I->EndOffset)
continue;
Type *UserTy = 0;
if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser()))
UserTy = LI->getType();
else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser()))
UserTy = SI->getValueOperand()->getType();
else
return 0;
if (IntegerType *ITy = dyn_cast<IntegerType>(UserTy)) {
if (ITy->getBitWidth() > (I->EndOffset - I->BeginOffset)*8)
continue;
return ITy;
}
if (Ty && Ty != UserTy)
return 0;
Ty = UserTy;
}
return Ty;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void AllocaPartitioning::print(raw_ostream &OS, const_iterator I,
StringRef Indent) const {
OS << Indent << "partition #" << (I - begin())
<< " [" << I->BeginOffset << "," << I->EndOffset << ")"
<< (I->IsSplittable ? " (splittable)" : "")
<< (Uses[I - begin()].empty() ? " (zero uses)" : "")
<< "\n";
}
void AllocaPartitioning::printUsers(raw_ostream &OS, const_iterator I,
StringRef Indent) const {
for (const_use_iterator UI = use_begin(I), UE = use_end(I); UI != UE; ++UI) {
if (!UI->getUse())
continue; OS << Indent << " [" << UI->BeginOffset << "," << UI->EndOffset << ") "
<< "used by: " << *UI->getUse()->getUser() << "\n";
if (MemTransferInst *II =
dyn_cast<MemTransferInst>(UI->getUse()->getUser())) {
const MemTransferOffsets &MTO = MemTransferInstData.lookup(II);
bool IsDest;
if (!MTO.IsSplittable)
IsDest = UI->BeginOffset == MTO.DestBegin;
else
IsDest = MTO.DestBegin != 0u;
OS << Indent << " (original " << (IsDest ? "dest" : "source") << ": "
<< "[" << (IsDest ? MTO.DestBegin : MTO.SourceBegin)
<< "," << (IsDest ? MTO.DestEnd : MTO.SourceEnd) << ")\n";
}
}
}
void AllocaPartitioning::print(raw_ostream &OS) const {
if (PointerEscapingInstr) {
OS << "No partitioning for alloca: " << AI << "\n"
<< " A pointer to this alloca escaped by:\n"
<< " " << *PointerEscapingInstr << "\n";
return;
}
OS << "Partitioning of alloca: " << AI << "\n";
for (const_iterator I = begin(), E = end(); I != E; ++I) {
print(OS, I);
printUsers(OS, I);
}
}
void AllocaPartitioning::dump(const_iterator I) const { print(dbgs(), I); }
void AllocaPartitioning::dump() const { print(dbgs()); }
#endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
namespace {
class AllocaPromoter : public LoadAndStorePromoter {
AllocaInst &AI;
DIBuilder &DIB;
SmallVector<DbgDeclareInst *, 4> DDIs;
SmallVector<DbgValueInst *, 4> DVIs;
public:
AllocaPromoter(const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
AllocaInst &AI, DIBuilder &DIB)
: LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {}
void run(const SmallVectorImpl<Instruction*> &Insts) {
if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) {
for (Value::use_iterator UI = DebugNode->use_begin(),
UE = DebugNode->use_end();
UI != UE; ++UI)
if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
DDIs.push_back(DDI);
else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(*UI))
DVIs.push_back(DVI);
}
LoadAndStorePromoter::run(Insts);
AI.eraseFromParent();
while (!DDIs.empty())
DDIs.pop_back_val()->eraseFromParent();
while (!DVIs.empty())
DVIs.pop_back_val()->eraseFromParent();
}
virtual bool isInstInList(Instruction *I,
const SmallVectorImpl<Instruction*> &Insts) const {
if (LoadInst *LI = dyn_cast<LoadInst>(I))
return LI->getOperand(0) == &AI;
return cast<StoreInst>(I)->getPointerOperand() == &AI;
}
virtual void updateDebugInfo(Instruction *Inst) const {
for (SmallVector<DbgDeclareInst *, 4>::const_iterator I = DDIs.begin(),
E = DDIs.end(); I != E; ++I) {
DbgDeclareInst *DDI = *I;
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
}
for (SmallVector<DbgValueInst *, 4>::const_iterator I = DVIs.begin(),
E = DVIs.end(); I != E; ++I) {
DbgValueInst *DVI = *I;
Value *Arg = 0;
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
Arg = dyn_cast<Argument>(ZExt->getOperand(0));
if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
Arg = dyn_cast<Argument>(SExt->getOperand(0));
if (!Arg)
Arg = SI->getOperand(0);
} else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
Arg = LI->getOperand(0);
} else {
continue;
}
Instruction *DbgVal =
DIB.insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()),
Inst);
DbgVal->setDebugLoc(DVI->getDebugLoc());
}
}
};
}
namespace {
class SROA : public FunctionPass {
const bool RequiresDomTree;
LLVMContext *C;
const DataLayout *TD;
DominatorTree *DT;
SetVector<AllocaInst *, SmallVector<AllocaInst *, 16> > Worklist;
SetVector<Instruction *, SmallVector<Instruction *, 8> > DeadInsts;
SetVector<AllocaInst *, SmallVector<AllocaInst *, 16> > PostPromotionWorklist;
std::vector<AllocaInst *> PromotableAllocas;
public:
SROA(bool RequiresDomTree = true)
: FunctionPass(ID), RequiresDomTree(RequiresDomTree),
C(0), TD(0), DT(0) {
initializeSROAPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F);
void getAnalysisUsage(AnalysisUsage &AU) const;
const char *getPassName() const { return "SROA"; }
static char ID;
private:
friend class PHIOrSelectSpeculator;
friend class AllocaPartitionRewriter;
friend class AllocaPartitionVectorRewriter;
bool rewriteAllocaPartition(AllocaInst &AI,
AllocaPartitioning &P,
AllocaPartitioning::iterator PI);
bool splitAlloca(AllocaInst &AI, AllocaPartitioning &P);
bool runOnAlloca(AllocaInst &AI);
void deleteDeadInstructions(SmallPtrSet<AllocaInst *, 4> &DeletedAllocas);
bool promoteAllocas(Function &F);
};
}
char SROA::ID = 0;
FunctionPass *llvm::createSROAPass(bool RequiresDomTree) {
return new SROA(RequiresDomTree);
}
INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates",
false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTree)
INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates",
false, false)
namespace {
class PHIOrSelectSpeculator : public InstVisitor<PHIOrSelectSpeculator> {
friend class llvm::InstVisitor<PHIOrSelectSpeculator>;
const DataLayout &TD;
AllocaPartitioning &P;
SROA &Pass;
public:
PHIOrSelectSpeculator(const DataLayout &TD, AllocaPartitioning &P, SROA &Pass)
: TD(TD), P(P), Pass(Pass) {}
void visitUsers(AllocaPartitioning::const_iterator PI) {
for (unsigned Idx = 0, Size = P.use_size(PI); Idx != Size; ++Idx) {
const PartitionUse &PU = P.getUse(PI, Idx);
if (!PU.getUse())
continue;
visit(cast<Instruction>(PU.getUse()->getUser()));
}
}
private:
void visitInstruction(Instruction &I) {}
bool isSafePHIToSpeculate(PHINode &PN, SmallVectorImpl<LoadInst *> &Loads) {
BasicBlock *BB = PN.getParent();
unsigned MaxAlign = 0;
for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end();
UI != UE; ++UI) {
LoadInst *LI = dyn_cast<LoadInst>(*UI);
if (LI == 0 || !LI->isSimple()) return false;
if (LI->getParent() != BB) return false;
for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI)
if (BBI->mayWriteToMemory())
return false;
MaxAlign = std::max(MaxAlign, LI->getAlignment());
Loads.push_back(LI);
}
for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator();
Value *InVal = PN.getIncomingValue(Idx);
if (TI == InVal || TI->mayHaveSideEffects())
return false;
if (TI->getNumSuccessors() == 1)
continue;
if (InVal->isDereferenceablePointer() ||
isSafeToLoadUnconditionally(InVal, TI, MaxAlign, &TD))
continue;
return false;
}
return true;
}
void visitPHINode(PHINode &PN) {
DEBUG(dbgs() << " original: " << PN << "\n");
SmallVector<LoadInst *, 4> Loads;
if (!isSafePHIToSpeculate(PN, Loads))
return;
assert(!Loads.empty());
Type *LoadTy = cast<PointerType>(PN.getType())->getElementType();
IRBuilderTy PHIBuilder(&PN);
PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
PN.getName() + ".sroa.speculated");
LoadInst *SomeLoad = cast<LoadInst>(Loads.back());
MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
unsigned Align = SomeLoad->getAlignment();
do {
LoadInst *LI = Loads.pop_back_val();
LI->replaceAllUsesWith(NewPN);
Pass.DeadInsts.insert(LI);
} while (!Loads.empty());
for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
BasicBlock *Pred = PN.getIncomingBlock(Idx);
TerminatorInst *TI = Pred->getTerminator();
Use *InUse = &PN.getOperandUse(PN.getOperandNumForIncomingValue(Idx));
Value *InVal = PN.getIncomingValue(Idx);
IRBuilderTy PredBuilder(TI);
LoadInst *Load
= PredBuilder.CreateLoad(InVal, (PN.getName() + ".sroa.speculate.load." +
Pred->getName()));
++NumLoadsSpeculated;
Load->setAlignment(Align);
if (TBAATag)
Load->setMetadata(LLVMContext::MD_tbaa, TBAATag);
NewPN->addIncoming(Load, Pred);
Instruction *Ptr = dyn_cast<Instruction>(InVal);
if (!Ptr)
continue;
AllocaPartitioning::iterator PI
= P.findPartitionForPHIOrSelectOperand(InUse);
if (PI == P.end())
continue;
AllocaPartitioning::use_iterator UI
= P.findPartitionUseForPHIOrSelectOperand(InUse);
assert(isa<PHINode>(*UI->getUse()->getUser()));
UI->setUse(&Load->getOperandUse(Load->getPointerOperandIndex()));
}
DEBUG(dbgs() << " speculated to: " << *NewPN << "\n");
}
bool isSafeSelectToSpeculate(SelectInst &SI,
SmallVectorImpl<LoadInst *> &Loads) {
Value *TValue = SI.getTrueValue();
Value *FValue = SI.getFalseValue();
bool TDerefable = TValue->isDereferenceablePointer();
bool FDerefable = FValue->isDereferenceablePointer();
for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end();
UI != UE; ++UI) {
LoadInst *LI = dyn_cast<LoadInst>(*UI);
if (LI == 0 || !LI->isSimple()) return false;
if (!TDerefable && !isSafeToLoadUnconditionally(TValue, LI,
LI->getAlignment(), &TD))
return false;
if (!FDerefable && !isSafeToLoadUnconditionally(FValue, LI,
LI->getAlignment(), &TD))
return false;
Loads.push_back(LI);
}
return true;
}
void visitSelectInst(SelectInst &SI) {
DEBUG(dbgs() << " original: " << SI << "\n");
SmallVector<LoadInst *, 4> Loads;
if (!isSafeSelectToSpeculate(SI, Loads))
return;
IRBuilderTy IRB(&SI);
Use *Ops[2] = { &SI.getOperandUse(1), &SI.getOperandUse(2) };
AllocaPartitioning::iterator PIs[2];
PartitionUse PUs[2];
for (unsigned i = 0, e = 2; i != e; ++i) {
PIs[i] = P.findPartitionForPHIOrSelectOperand(Ops[i]);
if (PIs[i] != P.end()) {
AllocaPartitioning::use_iterator UI
= P.findPartitionUseForPHIOrSelectOperand(Ops[i]);
PUs[i] = *UI;
UI->setUse(0);
}
}
Value *TV = SI.getTrueValue();
Value *FV = SI.getFalseValue();
while (!Loads.empty()) {
LoadInst *LI = Loads.pop_back_val();
IRB.SetInsertPoint(LI);
LoadInst *TL =
IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true");
LoadInst *FL =
IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false");
NumLoadsSpeculated += 2;
TL->setAlignment(LI->getAlignment());
FL->setAlignment(LI->getAlignment());
if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) {
TL->setMetadata(LLVMContext::MD_tbaa, Tag);
FL->setMetadata(LLVMContext::MD_tbaa, Tag);
}
Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
LI->getName() + ".sroa.speculated");
LoadInst *Loads[2] = { TL, FL };
for (unsigned i = 0, e = 2; i != e; ++i) {
if (PIs[i] != P.end()) {
Use *LoadUse = &Loads[i]->getOperandUse(0);
assert(PUs[i].getUse()->get() == LoadUse->get());
PUs[i].setUse(LoadUse);
P.use_push_back(PIs[i], PUs[i]);
}
}
DEBUG(dbgs() << " speculated to: " << *V << "\n");
LI->replaceAllUsesWith(V);
Pass.DeadInsts.insert(LI);
}
}
};
}
static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr,
SmallVectorImpl<Value *> &Indices) {
if (Indices.empty())
return BasePtr;
if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
return BasePtr;
return IRB.CreateInBoundsGEP(BasePtr, Indices, "idx");
}
static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &TD,
Value *BasePtr, Type *Ty, Type *TargetTy,
SmallVectorImpl<Value *> &Indices) {
if (Ty == TargetTy)
return buildGEP(IRB, BasePtr, Indices);
unsigned NumLayers = 0;
Type *ElementTy = Ty;
do {
if (ElementTy->isPointerTy())
break;
if (SequentialType *SeqTy = dyn_cast<SequentialType>(ElementTy)) {
ElementTy = SeqTy->getElementType();
Indices.push_back(IRB.getInt(APInt(TD.getPointerSizeInBits(0), 0)));
} else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
if (STy->element_begin() == STy->element_end())
break; ElementTy = *STy->element_begin();
Indices.push_back(IRB.getInt32(0));
} else {
break;
}
++NumLayers;
} while (ElementTy != TargetTy);
if (ElementTy != TargetTy)
Indices.erase(Indices.end() - NumLayers, Indices.end());
return buildGEP(IRB, BasePtr, Indices);
}
static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &TD,
Value *Ptr, Type *Ty, APInt &Offset,
Type *TargetTy,
SmallVectorImpl<Value *> &Indices) {
if (Offset == 0)
return getNaturalGEPWithType(IRB, TD, Ptr, Ty, TargetTy, Indices);
if (Ty->isPointerTy())
return 0;
if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) {
unsigned ElementSizeInBits = TD.getTypeSizeInBits(VecTy->getScalarType());
if (ElementSizeInBits % 8)
return 0; APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
APInt NumSkippedElements = Offset.sdiv(ElementSize);
if (NumSkippedElements.ugt(VecTy->getNumElements()))
return 0;
Offset -= NumSkippedElements * ElementSize;
Indices.push_back(IRB.getInt(NumSkippedElements));
return getNaturalGEPRecursively(IRB, TD, Ptr, VecTy->getElementType(),
Offset, TargetTy, Indices);
}
if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
Type *ElementTy = ArrTy->getElementType();
APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy));
APInt NumSkippedElements = Offset.sdiv(ElementSize);
if (NumSkippedElements.ugt(ArrTy->getNumElements()))
return 0;
Offset -= NumSkippedElements * ElementSize;
Indices.push_back(IRB.getInt(NumSkippedElements));
return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
Indices);
}
StructType *STy = dyn_cast<StructType>(Ty);
if (!STy)
return 0;
const StructLayout *SL = TD.getStructLayout(STy);
uint64_t StructOffset = Offset.getZExtValue();
if (StructOffset >= SL->getSizeInBytes())
return 0;
unsigned Index = SL->getElementContainingOffset(StructOffset);
Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index));
Type *ElementTy = STy->getElementType(Index);
if (Offset.uge(TD.getTypeAllocSize(ElementTy)))
return 0;
Indices.push_back(IRB.getInt32(Index));
return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
Indices);
}
static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &TD,
Value *Ptr, APInt Offset, Type *TargetTy,
SmallVectorImpl<Value *> &Indices) {
PointerType *Ty = cast<PointerType>(Ptr->getType());
if (Ty == IRB.getInt8PtrTy() && TargetTy->isIntegerTy(8))
return 0;
Type *ElementTy = Ty->getElementType();
if (!ElementTy->isSized())
return 0; APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy));
if (ElementSize == 0)
return 0; APInt NumSkippedElements = Offset.sdiv(ElementSize);
Offset -= NumSkippedElements * ElementSize;
Indices.push_back(IRB.getInt(NumSkippedElements));
return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy,
Indices);
}
static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &TD,
Value *Ptr, APInt Offset, Type *PointerTy) {
SmallPtrSet<Value *, 4> Visited;
Visited.insert(Ptr);
SmallVector<Value *, 4> Indices;
Value *OffsetPtr = 0;
Value *Int8Ptr = 0;
APInt Int8PtrOffset(Offset.getBitWidth(), 0);
Type *TargetTy = PointerTy->getPointerElementType();
do {
while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
APInt GEPOffset(Offset.getBitWidth(), 0);
if (!GEP->accumulateConstantOffset(TD, GEPOffset))
break;
Offset += GEPOffset;
Ptr = GEP->getPointerOperand();
if (!Visited.insert(Ptr))
break;
}
Indices.clear();
if (Value *P = getNaturalGEPWithOffset(IRB, TD, Ptr, Offset, TargetTy,
Indices)) {
if (P->getType() == PointerTy) {
if (OffsetPtr && OffsetPtr->use_empty())
if (Instruction *I = dyn_cast<Instruction>(OffsetPtr))
I->eraseFromParent();
return P;
}
if (!OffsetPtr) {
OffsetPtr = P;
}
}
if (Ptr->getType()->isIntegerTy(8)) {
Int8Ptr = Ptr;
Int8PtrOffset = Offset;
}
if (Operator::getOpcode(Ptr) == Instruction::BitCast) {
Ptr = cast<Operator>(Ptr)->getOperand(0);
} else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
if (GA->mayBeOverridden())
break;
Ptr = GA->getAliasee();
} else {
break;
}
assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!");
} while (Visited.insert(Ptr));
if (!OffsetPtr) {
if (!Int8Ptr) {
Int8Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(),
"raw_cast");
Int8PtrOffset = Offset;
}
OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr :
IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset),
"raw_idx");
}
Ptr = OffsetPtr;
if (Ptr->getType() != PointerTy)
Ptr = IRB.CreateBitCast(Ptr, PointerTy, "cast");
return Ptr;
}
static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) {
if (OldTy == NewTy)
return true;
if (IntegerType *OldITy = dyn_cast<IntegerType>(OldTy))
if (IntegerType *NewITy = dyn_cast<IntegerType>(NewTy))
if (NewITy->getBitWidth() >= OldITy->getBitWidth())
return true;
if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy))
return false;
if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType())
return false;
if (NewTy->isPointerTy() || OldTy->isPointerTy()) {
if (NewTy->isPointerTy() && OldTy->isPointerTy())
return true;
if (NewTy->isIntegerTy() || OldTy->isIntegerTy())
return true;
return false;
}
return true;
}
static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
Type *Ty) {
assert(canConvertValue(DL, V->getType(), Ty) &&
"Value not convertable to type");
if (V->getType() == Ty)
return V;
if (IntegerType *OldITy = dyn_cast<IntegerType>(V->getType()))
if (IntegerType *NewITy = dyn_cast<IntegerType>(Ty))
if (NewITy->getBitWidth() > OldITy->getBitWidth())
return IRB.CreateZExt(V, NewITy);
if (V->getType()->isIntegerTy() && Ty->isPointerTy())
return IRB.CreateIntToPtr(V, Ty);
if (V->getType()->isPointerTy() && Ty->isIntegerTy())
return IRB.CreatePtrToInt(V, Ty);
return IRB.CreateBitCast(V, Ty);
}
static bool isVectorPromotionViable(const DataLayout &TD,
Type *AllocaTy,
AllocaPartitioning &P,
uint64_t PartitionBeginOffset,
uint64_t PartitionEndOffset,
AllocaPartitioning::const_use_iterator I,
AllocaPartitioning::const_use_iterator E) {
VectorType *Ty = dyn_cast<VectorType>(AllocaTy);
if (!Ty)
return false;
uint64_t ElementSize = TD.getTypeSizeInBits(Ty->getScalarType());
if (ElementSize % 8)
return false;
assert((TD.getTypeSizeInBits(Ty) % 8) == 0 &&
"vector size not a multiple of element size?");
ElementSize /= 8;
for (; I != E; ++I) {
Use *U = I->getUse();
if (!U)
continue;
uint64_t BeginOffset = I->BeginOffset - PartitionBeginOffset;
uint64_t BeginIndex = BeginOffset / ElementSize;
if (BeginIndex * ElementSize != BeginOffset ||
BeginIndex >= Ty->getNumElements())
return false;
uint64_t EndOffset = I->EndOffset - PartitionBeginOffset;
uint64_t EndIndex = EndOffset / ElementSize;
if (EndIndex * ElementSize != EndOffset ||
EndIndex > Ty->getNumElements())
return false;
assert(EndIndex > BeginIndex && "Empty vector!");
uint64_t NumElements = EndIndex - BeginIndex;
Type *PartitionTy
= (NumElements == 1) ? Ty->getElementType()
: VectorType::get(Ty->getElementType(), NumElements);
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
if (MI->isVolatile())
return false;
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U->getUser())) {
const AllocaPartitioning::MemTransferOffsets &MTO
= P.getMemTransferOffsets(*MTI);
if (!MTO.IsSplittable)
return false;
}
} else if (U->get()->getType()->getPointerElementType()->isStructTy()) {
return false;
} else if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
if (LI->isVolatile())
return false;
if (!canConvertValue(TD, PartitionTy, LI->getType()))
return false;
} else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
if (SI->isVolatile())
return false;
if (!canConvertValue(TD, SI->getValueOperand()->getType(), PartitionTy))
return false;
} else {
return false;
}
}
return true;
}
static bool isIntegerWideningViable(const DataLayout &TD,
Type *AllocaTy,
uint64_t AllocBeginOffset,
AllocaPartitioning &P,
AllocaPartitioning::const_use_iterator I,
AllocaPartitioning::const_use_iterator E) {
uint64_t SizeInBits = TD.getTypeSizeInBits(AllocaTy);
if (SizeInBits > IntegerType::MAX_INT_BITS)
return false;
if (SizeInBits != TD.getTypeStoreSizeInBits(AllocaTy))
return false;
Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits);
if (!canConvertValue(TD, AllocaTy, IntTy) ||
!canConvertValue(TD, IntTy, AllocaTy))
return false;
uint64_t Size = TD.getTypeStoreSize(AllocaTy);
bool WholeAllocaOp = false;
for (; I != E; ++I) {
Use *U = I->getUse();
if (!U)
continue;
uint64_t RelBegin = I->BeginOffset - AllocBeginOffset;
uint64_t RelEnd = I->EndOffset - AllocBeginOffset;
if (RelEnd > Size)
return false;
if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
if (LI->isVolatile())
return false;
if (RelBegin == 0 && RelEnd == Size)
WholeAllocaOp = true;
if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
if (ITy->getBitWidth() < TD.getTypeStoreSizeInBits(ITy))
return false;
continue;
}
if (RelBegin != 0 || RelEnd != Size ||
!canConvertValue(TD, AllocaTy, LI->getType()))
return false;
} else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
Type *ValueTy = SI->getValueOperand()->getType();
if (SI->isVolatile())
return false;
if (RelBegin == 0 && RelEnd == Size)
WholeAllocaOp = true;
if (IntegerType *ITy = dyn_cast<IntegerType>(ValueTy)) {
if (ITy->getBitWidth() < TD.getTypeStoreSizeInBits(ITy))
return false;
continue;
}
if (RelBegin != 0 || RelEnd != Size ||
!canConvertValue(TD, ValueTy, AllocaTy))
return false;
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U->getUser())) {
if (MI->isVolatile() || !isa<Constant>(MI->getLength()))
return false;
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U->getUser())) {
const AllocaPartitioning::MemTransferOffsets &MTO
= P.getMemTransferOffsets(*MTI);
if (!MTO.IsSplittable)
return false;
}
} else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U->getUser())) {
if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
II->getIntrinsicID() != Intrinsic::lifetime_end)
return false;
} else {
return false;
}
}
return WholeAllocaOp;
}
static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
IntegerType *Ty, uint64_t Offset,
const Twine &Name) {
DEBUG(dbgs() << " start: " << *V << "\n");
IntegerType *IntTy = cast<IntegerType>(V->getType());
assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
"Element extends past full value");
uint64_t ShAmt = 8*Offset;
if (DL.isBigEndian())
ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
if (ShAmt) {
V = IRB.CreateLShr(V, ShAmt, Name + ".shift");
DEBUG(dbgs() << " shifted: " << *V << "\n");
}
assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
"Cannot extract to a larger integer!");
if (Ty != IntTy) {
V = IRB.CreateTrunc(V, Ty, Name + ".trunc");
DEBUG(dbgs() << " trunced: " << *V << "\n");
}
return V;
}
static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old,
Value *V, uint64_t Offset, const Twine &Name) {
IntegerType *IntTy = cast<IntegerType>(Old->getType());
IntegerType *Ty = cast<IntegerType>(V->getType());
assert(Ty->getBitWidth() <= IntTy->getBitWidth() &&
"Cannot insert a larger integer!");
DEBUG(dbgs() << " start: " << *V << "\n");
if (Ty != IntTy) {
V = IRB.CreateZExt(V, IntTy, Name + ".ext");
DEBUG(dbgs() << " extended: " << *V << "\n");
}
assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) &&
"Element store outside of alloca store");
uint64_t ShAmt = 8*Offset;
if (DL.isBigEndian())
ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset);
if (ShAmt) {
V = IRB.CreateShl(V, ShAmt, Name + ".shift");
DEBUG(dbgs() << " shifted: " << *V << "\n");
}
if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) {
APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt);
Old = IRB.CreateAnd(Old, Mask, Name + ".mask");
DEBUG(dbgs() << " masked: " << *Old << "\n");
V = IRB.CreateOr(Old, V, Name + ".insert");
DEBUG(dbgs() << " inserted: " << *V << "\n");
}
return V;
}
static Value *extractVector(IRBuilderTy &IRB, Value *V,
unsigned BeginIndex, unsigned EndIndex,
const Twine &Name) {
VectorType *VecTy = cast<VectorType>(V->getType());
unsigned NumElements = EndIndex - BeginIndex;
assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
if (NumElements == VecTy->getNumElements())
return V;
if (NumElements == 1) {
V = IRB.CreateExtractElement(V, IRB.getInt32(BeginIndex),
Name + ".extract");
DEBUG(dbgs() << " extract: " << *V << "\n");
return V;
}
SmallVector<Constant*, 8> Mask;
Mask.reserve(NumElements);
for (unsigned i = BeginIndex; i != EndIndex; ++i)
Mask.push_back(IRB.getInt32(i));
V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
ConstantVector::get(Mask),
Name + ".extract");
DEBUG(dbgs() << " shuffle: " << *V << "\n");
return V;
}
static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V,
unsigned BeginIndex, const Twine &Name) {
VectorType *VecTy = cast<VectorType>(Old->getType());
assert(VecTy && "Can only insert a vector into a vector");
VectorType *Ty = dyn_cast<VectorType>(V->getType());
if (!Ty) {
V = IRB.CreateInsertElement(Old, V, IRB.getInt32(BeginIndex),
Name + ".insert");
DEBUG(dbgs() << " insert: " << *V << "\n");
return V;
}
assert(Ty->getNumElements() <= VecTy->getNumElements() &&
"Too many elements!");
if (Ty->getNumElements() == VecTy->getNumElements()) {
assert(V->getType() == VecTy && "Vector type mismatch");
return V;
}
unsigned EndIndex = BeginIndex + Ty->getNumElements();
SmallVector<Constant*, 8> Mask;
Mask.reserve(VecTy->getNumElements());
for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
if (i >= BeginIndex && i < EndIndex)
Mask.push_back(IRB.getInt32(i - BeginIndex));
else
Mask.push_back(UndefValue::get(IRB.getInt32Ty()));
V = IRB.CreateShuffleVector(V, UndefValue::get(V->getType()),
ConstantVector::get(Mask),
Name + ".expand");
DEBUG(dbgs() << " shuffle: " << *V << "\n");
Mask.clear();
for (unsigned i = 0; i != VecTy->getNumElements(); ++i)
Mask.push_back(IRB.getInt1(i >= BeginIndex && i < EndIndex));
V = IRB.CreateSelect(ConstantVector::get(Mask), V, Old, Name + "blend");
DEBUG(dbgs() << " blend: " << *V << "\n");
return V;
}
namespace {
class AllocaPartitionRewriter : public InstVisitor<AllocaPartitionRewriter,
bool> {
friend class llvm::InstVisitor<AllocaPartitionRewriter, bool>;
const DataLayout &TD;
AllocaPartitioning &P;
SROA &Pass;
AllocaInst &OldAI, &NewAI;
const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset;
Type *NewAllocaTy;
VectorType *VecTy;
Type *ElementTy;
uint64_t ElementSize;
IntegerType *IntTy;
uint64_t BeginOffset, EndOffset;
bool IsSplit;
Use *OldUse;
Instruction *OldPtr;
IRBuilderTy IRB;
public:
AllocaPartitionRewriter(const DataLayout &TD, AllocaPartitioning &P,
AllocaPartitioning::iterator PI,
SROA &Pass, AllocaInst &OldAI, AllocaInst &NewAI,
uint64_t NewBeginOffset, uint64_t NewEndOffset)
: TD(TD), P(P), Pass(Pass),
OldAI(OldAI), NewAI(NewAI),
NewAllocaBeginOffset(NewBeginOffset),
NewAllocaEndOffset(NewEndOffset),
NewAllocaTy(NewAI.getAllocatedType()),
VecTy(), ElementTy(), ElementSize(), IntTy(),
BeginOffset(), EndOffset(), IsSplit(), OldUse(), OldPtr(),
IRB(NewAI.getContext(), ConstantFolder()) {
}
bool visitUsers(AllocaPartitioning::const_use_iterator I,
AllocaPartitioning::const_use_iterator E) {
if (isVectorPromotionViable(TD, NewAI.getAllocatedType(), P,
NewAllocaBeginOffset, NewAllocaEndOffset,
I, E)) {
++NumVectorized;
VecTy = cast<VectorType>(NewAI.getAllocatedType());
ElementTy = VecTy->getElementType();
assert((TD.getTypeSizeInBits(VecTy->getScalarType()) % 8) == 0 &&
"Only multiple-of-8 sized vector elements are viable");
ElementSize = TD.getTypeSizeInBits(VecTy->getScalarType()) / 8;
} else if (isIntegerWideningViable(TD, NewAI.getAllocatedType(),
NewAllocaBeginOffset, P, I, E)) {
IntTy = Type::getIntNTy(NewAI.getContext(),
TD.getTypeSizeInBits(NewAI.getAllocatedType()));
}
bool CanSROA = true;
for (; I != E; ++I) {
if (!I->getUse())
continue; BeginOffset = I->BeginOffset;
EndOffset = I->EndOffset;
IsSplit = I->isSplit();
OldUse = I->getUse();
OldPtr = cast<Instruction>(OldUse->get());
Instruction *OldUserI = cast<Instruction>(OldUse->getUser());
IRB.SetInsertPoint(OldUserI);
IRB.SetCurrentDebugLocation(OldUserI->getDebugLoc());
IRB.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) +
".");
CanSROA &= visit(cast<Instruction>(OldUse->getUser()));
}
if (VecTy) {
assert(CanSROA);
VecTy = 0;
ElementTy = 0;
ElementSize = 0;
}
if (IntTy) {
assert(CanSROA);
IntTy = 0;
}
return CanSROA;
}
private:
bool visitInstruction(Instruction &I) {
DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n");
llvm_unreachable("No rewrite rule for this instruction!");
}
Value *getAdjustedAllocaPtr(IRBuilderTy &IRB, Type *PointerTy) {
assert(BeginOffset >= NewAllocaBeginOffset);
APInt Offset(TD.getPointerSizeInBits(), BeginOffset - NewAllocaBeginOffset);
return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy);
}
unsigned getOffsetAlign(uint64_t Offset) {
unsigned NewAIAlign = NewAI.getAlignment();
if (!NewAIAlign)
NewAIAlign = TD.getABITypeAlignment(NewAI.getAllocatedType());
return MinAlign(NewAIAlign, Offset);
}
unsigned getPartitionAlign() {
return getOffsetAlign(BeginOffset - NewAllocaBeginOffset);
}
unsigned getOffsetTypeAlign(Type *Ty, uint64_t Offset) {
unsigned Align = getOffsetAlign(Offset);
return Align == TD.getABITypeAlignment(Ty) ? 0 : Align;
}
unsigned getPartitionTypeAlign(Type *Ty) {
return getOffsetTypeAlign(Ty, BeginOffset - NewAllocaBeginOffset);
}
unsigned getIndex(uint64_t Offset) {
assert(VecTy && "Can only call getIndex when rewriting a vector");
uint64_t RelOffset = Offset - NewAllocaBeginOffset;
assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds");
uint32_t Index = RelOffset / ElementSize;
assert(Index * ElementSize == RelOffset);
return Index;
}
void deleteIfTriviallyDead(Value *V) {
Instruction *I = cast<Instruction>(V);
if (isInstructionTriviallyDead(I))
Pass.DeadInsts.insert(I);
}
Value *rewriteVectorizedLoadInst() {
unsigned BeginIndex = getIndex(BeginOffset);
unsigned EndIndex = getIndex(EndOffset);
assert(EndIndex > BeginIndex && "Empty vector!");
Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
"load");
return extractVector(IRB, V, BeginIndex, EndIndex, "vec");
}
Value *rewriteIntegerLoad(LoadInst &LI) {
assert(IntTy && "We cannot insert an integer to the alloca");
assert(!LI.isVolatile());
Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
"load");
V = convertValue(TD, IRB, V, IntTy);
assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
if (Offset > 0 || EndOffset < NewAllocaEndOffset)
V = extractInteger(TD, IRB, V, cast<IntegerType>(LI.getType()), Offset,
"extract");
return V;
}
bool visitLoadInst(LoadInst &LI) {
DEBUG(dbgs() << " original: " << LI << "\n");
Value *OldOp = LI.getOperand(0);
assert(OldOp == OldPtr);
uint64_t Size = EndOffset - BeginOffset;
Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), Size * 8)
: LI.getType();
bool IsPtrAdjusted = false;
Value *V;
if (VecTy) {
V = rewriteVectorizedLoadInst();
} else if (IntTy && LI.getType()->isIntegerTy()) {
V = rewriteIntegerLoad(LI);
} else if (BeginOffset == NewAllocaBeginOffset &&
canConvertValue(TD, NewAllocaTy, LI.getType())) {
V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
LI.isVolatile(), "load");
} else {
Type *LTy = TargetTy->getPointerTo();
V = IRB.CreateAlignedLoad(getAdjustedAllocaPtr(IRB, LTy),
getPartitionTypeAlign(TargetTy),
LI.isVolatile(), "load");
IsPtrAdjusted = true;
}
V = convertValue(TD, IRB, V, TargetTy);
if (IsSplit) {
assert(!LI.isVolatile());
assert(LI.getType()->isIntegerTy() &&
"Only integer type loads and stores are split");
assert(Size < TD.getTypeStoreSize(LI.getType()) &&
"Split load isn't smaller than original load");
assert(LI.getType()->getIntegerBitWidth() ==
TD.getTypeStoreSizeInBits(LI.getType()) &&
"Non-byte-multiple bit width");
IRB.SetInsertPoint(llvm::next(BasicBlock::iterator(&LI)));
Value *Placeholder
= new LoadInst(UndefValue::get(LI.getType()->getPointerTo()));
V = insertInteger(TD, IRB, Placeholder, V, BeginOffset,
"insert");
LI.replaceAllUsesWith(V);
Placeholder->replaceAllUsesWith(&LI);
delete Placeholder;
} else {
LI.replaceAllUsesWith(V);
}
Pass.DeadInsts.insert(&LI);
deleteIfTriviallyDead(OldOp);
DEBUG(dbgs() << " to: " << *V << "\n");
return !LI.isVolatile() && !IsPtrAdjusted;
}
bool rewriteVectorizedStoreInst(Value *V,
StoreInst &SI, Value *OldOp) {
unsigned BeginIndex = getIndex(BeginOffset);
unsigned EndIndex = getIndex(EndOffset);
assert(EndIndex > BeginIndex && "Empty vector!");
unsigned NumElements = EndIndex - BeginIndex;
assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
Type *PartitionTy
= (NumElements == 1) ? ElementTy
: VectorType::get(ElementTy, NumElements);
if (V->getType() != PartitionTy)
V = convertValue(TD, IRB, V, PartitionTy);
Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
"load");
V = insertVector(IRB, Old, V, BeginIndex, "vec");
StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
Pass.DeadInsts.insert(&SI);
(void)Store;
DEBUG(dbgs() << " to: " << *Store << "\n");
return true;
}
bool rewriteIntegerStore(Value *V, StoreInst &SI) {
assert(IntTy && "We cannot extract an integer from the alloca");
assert(!SI.isVolatile());
if (TD.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) {
Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
"oldload");
Old = convertValue(TD, IRB, Old, IntTy);
assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
V = insertInteger(TD, IRB, Old, SI.getValueOperand(), Offset,
"insert");
}
V = convertValue(TD, IRB, V, NewAllocaTy);
StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
Pass.DeadInsts.insert(&SI);
(void)Store;
DEBUG(dbgs() << " to: " << *Store << "\n");
return true;
}
bool visitStoreInst(StoreInst &SI) {
DEBUG(dbgs() << " original: " << SI << "\n");
Value *OldOp = SI.getOperand(1);
assert(OldOp == OldPtr);
Value *V = SI.getValueOperand();
if (V->getType()->isPointerTy())
if (AllocaInst *AI = dyn_cast<AllocaInst>(V->stripInBoundsOffsets()))
Pass.PostPromotionWorklist.insert(AI);
uint64_t Size = EndOffset - BeginOffset;
if (Size < TD.getTypeStoreSize(V->getType())) {
assert(!SI.isVolatile());
assert(IsSplit && "A seemingly split store isn't splittable");
assert(V->getType()->isIntegerTy() &&
"Only integer type loads and stores are split");
assert(V->getType()->getIntegerBitWidth() ==
TD.getTypeStoreSizeInBits(V->getType()) &&
"Non-byte-multiple bit width");
IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), Size * 8);
V = extractInteger(TD, IRB, V, NarrowTy, BeginOffset,
"extract");
}
if (VecTy)
return rewriteVectorizedStoreInst(V, SI, OldOp);
if (IntTy && V->getType()->isIntegerTy())
return rewriteIntegerStore(V, SI);
StoreInst *NewSI;
if (BeginOffset == NewAllocaBeginOffset &&
EndOffset == NewAllocaEndOffset &&
canConvertValue(TD, V->getType(), NewAllocaTy)) {
V = convertValue(TD, IRB, V, NewAllocaTy);
NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
SI.isVolatile());
} else {
Value *NewPtr = getAdjustedAllocaPtr(IRB, V->getType()->getPointerTo());
NewSI = IRB.CreateAlignedStore(V, NewPtr,
getPartitionTypeAlign(V->getType()),
SI.isVolatile());
}
(void)NewSI;
Pass.DeadInsts.insert(&SI);
deleteIfTriviallyDead(OldOp);
DEBUG(dbgs() << " to: " << *NewSI << "\n");
return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile();
}
Value *getIntegerSplat(Value *V, unsigned Size) {
assert(Size > 0 && "Expected a positive number of bytes.");
IntegerType *VTy = cast<IntegerType>(V->getType());
assert(VTy->getBitWidth() == 8 && "Expected an i8 value for the byte");
if (Size == 1)
return V;
Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size*8);
V = IRB.CreateMul(IRB.CreateZExt(V, SplatIntTy, "zext"),
ConstantExpr::getUDiv(
Constant::getAllOnesValue(SplatIntTy),
ConstantExpr::getZExt(
Constant::getAllOnesValue(V->getType()),
SplatIntTy)),
"isplat");
return V;
}
Value *getVectorSplat(Value *V, unsigned NumElements) {
V = IRB.CreateVectorSplat(NumElements, V, "vsplat");
DEBUG(dbgs() << " splat: " << *V << "\n");
return V;
}
bool visitMemSetInst(MemSetInst &II) {
DEBUG(dbgs() << " original: " << II << "\n");
assert(II.getRawDest() == OldPtr);
if (!isa<Constant>(II.getLength())) {
II.setDest(getAdjustedAllocaPtr(IRB, II.getRawDest()->getType()));
Type *CstTy = II.getAlignmentCst()->getType();
II.setAlignment(ConstantInt::get(CstTy, getPartitionAlign()));
deleteIfTriviallyDead(OldPtr);
return false;
}
Pass.DeadInsts.insert(&II);
Type *AllocaTy = NewAI.getAllocatedType();
Type *ScalarTy = AllocaTy->getScalarType();
if (!VecTy && !IntTy &&
(BeginOffset != NewAllocaBeginOffset ||
EndOffset != NewAllocaEndOffset ||
!AllocaTy->isSingleValueType() ||
!TD.isLegalInteger(TD.getTypeSizeInBits(ScalarTy)) ||
TD.getTypeSizeInBits(ScalarTy)%8 != 0)) {
Type *SizeTy = II.getLength()->getType();
Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset);
CallInst *New
= IRB.CreateMemSet(getAdjustedAllocaPtr(IRB,
II.getRawDest()->getType()),
II.getValue(), Size, getPartitionAlign(),
II.isVolatile());
(void)New;
DEBUG(dbgs() << " to: " << *New << "\n");
return false;
}
Value *V;
if (VecTy) {
assert(ElementTy == ScalarTy);
unsigned BeginIndex = getIndex(BeginOffset);
unsigned EndIndex = getIndex(EndOffset);
assert(EndIndex > BeginIndex && "Empty vector!");
unsigned NumElements = EndIndex - BeginIndex;
assert(NumElements <= VecTy->getNumElements() && "Too many elements!");
Value *Splat =
getIntegerSplat(II.getValue(), TD.getTypeSizeInBits(ElementTy) / 8);
Splat = convertValue(TD, IRB, Splat, ElementTy);
if (NumElements > 1)
Splat = getVectorSplat(Splat, NumElements);
Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
"oldload");
V = insertVector(IRB, Old, Splat, BeginIndex, "vec");
} else if (IntTy) {
assert(!II.isVolatile());
uint64_t Size = EndOffset - BeginOffset;
V = getIntegerSplat(II.getValue(), Size);
if (IntTy && (BeginOffset != NewAllocaBeginOffset ||
EndOffset != NewAllocaBeginOffset)) {
Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
"oldload");
Old = convertValue(TD, IRB, Old, IntTy);
assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
V = insertInteger(TD, IRB, Old, V, Offset, "insert");
} else {
assert(V->getType() == IntTy &&
"Wrong type for an alloca wide integer!");
}
V = convertValue(TD, IRB, V, AllocaTy);
} else {
assert(BeginOffset == NewAllocaBeginOffset);
assert(EndOffset == NewAllocaEndOffset);
V = getIntegerSplat(II.getValue(), TD.getTypeSizeInBits(ScalarTy) / 8);
if (VectorType *AllocaVecTy = dyn_cast<VectorType>(AllocaTy))
V = getVectorSplat(V, AllocaVecTy->getNumElements());
V = convertValue(TD, IRB, V, AllocaTy);
}
Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
II.isVolatile());
(void)New;
DEBUG(dbgs() << " to: " << *New << "\n");
return !II.isVolatile();
}
bool visitMemTransferInst(MemTransferInst &II) {
DEBUG(dbgs() << " original: " << II << "\n");
assert(II.getRawSource() == OldPtr || II.getRawDest() == OldPtr);
bool IsDest = II.getRawDest() == OldPtr;
const AllocaPartitioning::MemTransferOffsets &MTO
= P.getMemTransferOffsets(II);
unsigned IntPtrWidth = TD.getPointerSizeInBits();
APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin
: MTO.SourceBegin));
unsigned Align = II.getAlignment();
if (Align > 1)
Align = MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(),
MinAlign(II.getAlignment(), getPartitionAlign()));
if (!MTO.IsSplittable) {
Value *OldOp = IsDest ? II.getRawDest() : II.getRawSource();
if (IsDest)
II.setDest(getAdjustedAllocaPtr(IRB, II.getRawDest()->getType()));
else
II.setSource(getAdjustedAllocaPtr(IRB, II.getRawSource()->getType()));
Type *CstTy = II.getAlignmentCst()->getType();
II.setAlignment(ConstantInt::get(CstTy, Align));
DEBUG(dbgs() << " to: " << II << "\n");
deleteIfTriviallyDead(OldOp);
return false;
}
bool EmitMemCpy
= !VecTy && !IntTy && (BeginOffset != NewAllocaBeginOffset ||
EndOffset != NewAllocaEndOffset ||
!NewAI.getAllocatedType()->isSingleValueType());
if (EmitMemCpy && &OldAI == &NewAI) {
uint64_t OrigBegin = IsDest ? MTO.DestBegin : MTO.SourceBegin;
uint64_t OrigEnd = IsDest ? MTO.DestEnd : MTO.SourceEnd;
assert(BeginOffset == OrigBegin);
(void)OrigBegin;
if (EndOffset != OrigEnd)
II.setLength(ConstantInt::get(II.getLength()->getType(),
EndOffset - BeginOffset));
return false;
}
Pass.DeadInsts.insert(&II);
Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest();
if (AllocaInst *AI
= dyn_cast<AllocaInst>(OtherPtr->stripInBoundsOffsets()))
Pass.Worklist.insert(AI);
if (EmitMemCpy) {
Type *OtherPtrTy = IsDest ? II.getRawSource()->getType()
: II.getRawDest()->getType();
OtherPtr = getAdjustedPtr(IRB, TD, OtherPtr, RelOffset, OtherPtrTy);
Value *OurPtr
= getAdjustedAllocaPtr(IRB, IsDest ? II.getRawDest()->getType()
: II.getRawSource()->getType());
Type *SizeTy = II.getLength()->getType();
Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset);
CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr,
IsDest ? OtherPtr : OurPtr,
Size, Align, II.isVolatile());
(void)New;
DEBUG(dbgs() << " to: " << *New << "\n");
return false;
}
if (!Align)
Align = 1;
bool IsWholeAlloca = BeginOffset == NewAllocaBeginOffset &&
EndOffset == NewAllocaEndOffset;
uint64_t Size = EndOffset - BeginOffset;
unsigned BeginIndex = VecTy ? getIndex(BeginOffset) : 0;
unsigned EndIndex = VecTy ? getIndex(EndOffset) : 0;
unsigned NumElements = EndIndex - BeginIndex;
IntegerType *SubIntTy
= IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : 0;
Type *OtherPtrTy = NewAI.getType();
if (VecTy && !IsWholeAlloca) {
if (NumElements == 1)
OtherPtrTy = VecTy->getElementType();
else
OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements);
OtherPtrTy = OtherPtrTy->getPointerTo();
} else if (IntTy && !IsWholeAlloca) {
OtherPtrTy = SubIntTy->getPointerTo();
}
Value *SrcPtr = getAdjustedPtr(IRB, TD, OtherPtr, RelOffset, OtherPtrTy);
Value *DstPtr = &NewAI;
if (!IsDest)
std::swap(SrcPtr, DstPtr);
Value *Src;
if (VecTy && !IsWholeAlloca && !IsDest) {
Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
"load");
Src = extractVector(IRB, Src, BeginIndex, EndIndex, "vec");
} else if (IntTy && !IsWholeAlloca && !IsDest) {
Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
"load");
Src = convertValue(TD, IRB, Src, IntTy);
assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
Src = extractInteger(TD, IRB, Src, SubIntTy, Offset, "extract");
} else {
Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(),
"copyload");
}
if (VecTy && !IsWholeAlloca && IsDest) {
Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
"oldload");
Src = insertVector(IRB, Old, Src, BeginIndex, "vec");
} else if (IntTy && !IsWholeAlloca && IsDest) {
Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
"oldload");
Old = convertValue(TD, IRB, Old, IntTy);
assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset");
uint64_t Offset = BeginOffset - NewAllocaBeginOffset;
Src = insertInteger(TD, IRB, Old, Src, Offset, "insert");
Src = convertValue(TD, IRB, Src, NewAllocaTy);
}
StoreInst *Store = cast<StoreInst>(
IRB.CreateAlignedStore(Src, DstPtr, Align, II.isVolatile()));
(void)Store;
DEBUG(dbgs() << " to: " << *Store << "\n");
return !II.isVolatile();
}
bool visitIntrinsicInst(IntrinsicInst &II) {
assert(II.getIntrinsicID() == Intrinsic::lifetime_start ||
II.getIntrinsicID() == Intrinsic::lifetime_end);
DEBUG(dbgs() << " original: " << II << "\n");
assert(II.getArgOperand(1) == OldPtr);
Pass.DeadInsts.insert(&II);
ConstantInt *Size
= ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
EndOffset - BeginOffset);
Value *Ptr = getAdjustedAllocaPtr(IRB, II.getArgOperand(1)->getType());
Value *New;
if (II.getIntrinsicID() == Intrinsic::lifetime_start)
New = IRB.CreateLifetimeStart(Ptr, Size);
else
New = IRB.CreateLifetimeEnd(Ptr, Size);
DEBUG(dbgs() << " to: " << *New << "\n");
return true;
}
bool visitPHINode(PHINode &PN) {
DEBUG(dbgs() << " original: " << PN << "\n");
IRBuilderTy PtrBuilder(cast<Instruction>(OldPtr));
PtrBuilder.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) +
".");
Value *NewPtr = getAdjustedAllocaPtr(PtrBuilder, OldPtr->getType());
std::replace(PN.op_begin(), PN.op_end(), cast<Value>(OldPtr), NewPtr);
DEBUG(dbgs() << " to: " << PN << "\n");
deleteIfTriviallyDead(OldPtr);
return false;
}
bool visitSelectInst(SelectInst &SI) {
DEBUG(dbgs() << " original: " << SI << "\n");
assert((SI.getTrueValue() == OldPtr || SI.getFalseValue() == OldPtr) &&
"Pointer isn't an operand!");
Value *NewPtr = getAdjustedAllocaPtr(IRB, OldPtr->getType());
if (SI.getOperand(1) == OldPtr)
SI.setOperand(1, NewPtr);
if (SI.getOperand(2) == OldPtr)
SI.setOperand(2, NewPtr);
DEBUG(dbgs() << " to: " << SI << "\n");
deleteIfTriviallyDead(OldPtr);
return false;
}
};
}
namespace {
class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
friend class llvm::InstVisitor<AggLoadStoreRewriter, bool>;
const DataLayout &TD;
SmallVector<Use *, 8> Queue;
SmallPtrSet<User *, 8> Visited;
Use *U;
public:
AggLoadStoreRewriter(const DataLayout &TD) : TD(TD) {}
bool rewrite(Instruction &I) {
DEBUG(dbgs() << " Rewriting FCA loads and stores...\n");
enqueueUsers(I);
bool Changed = false;
while (!Queue.empty()) {
U = Queue.pop_back_val();
Changed |= visit(cast<Instruction>(U->getUser()));
}
return Changed;
}
private:
void enqueueUsers(Instruction &I) {
for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE;
++UI)
if (Visited.insert(*UI))
Queue.push_back(&UI.getUse());
}
bool visitInstruction(Instruction &I) { return false; }
template <typename Derived>
class OpSplitter {
protected:
IRBuilderTy IRB;
SmallVector<unsigned, 4> Indices;
SmallVector<Value *, 4> GEPIndices;
Value *Ptr;
OpSplitter(Instruction *InsertionPoint, Value *Ptr)
: IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {}
public:
void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) {
if (Ty->isSingleValueType())
return static_cast<Derived *>(this)->emitFunc(Ty, Agg, Name);
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
unsigned OldSize = Indices.size();
(void)OldSize;
for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size;
++Idx) {
assert(Indices.size() == OldSize && "Did not return to the old size");
Indices.push_back(Idx);
GEPIndices.push_back(IRB.getInt32(Idx));
emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx));
GEPIndices.pop_back();
Indices.pop_back();
}
return;
}
if (StructType *STy = dyn_cast<StructType>(Ty)) {
unsigned OldSize = Indices.size();
(void)OldSize;
for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size;
++Idx) {
assert(Indices.size() == OldSize && "Did not return to the old size");
Indices.push_back(Idx);
GEPIndices.push_back(IRB.getInt32(Idx));
emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx));
GEPIndices.pop_back();
Indices.pop_back();
}
return;
}
llvm_unreachable("Only arrays and structs are aggregate loadable types");
}
};
struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> {
LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr)
: OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr) {}
void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
assert(Ty->isSingleValueType());
Value *GEP = IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep");
Value *Load = IRB.CreateLoad(GEP, Name + ".load");
Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
DEBUG(dbgs() << " to: " << *Load << "\n");
}
};
bool visitLoadInst(LoadInst &LI) {
assert(LI.getPointerOperand() == *U);
if (!LI.isSimple() || LI.getType()->isSingleValueType())
return false;
DEBUG(dbgs() << " original: " << LI << "\n");
LoadOpSplitter Splitter(&LI, *U);
Value *V = UndefValue::get(LI.getType());
Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca");
LI.replaceAllUsesWith(V);
LI.eraseFromParent();
return true;
}
struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> {
StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr)
: OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr) {}
void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
assert(Ty->isSingleValueType());
Value *Store = IRB.CreateStore(
IRB.CreateExtractValue(Agg, Indices, Name + ".extract"),
IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep"));
(void)Store;
DEBUG(dbgs() << " to: " << *Store << "\n");
}
};
bool visitStoreInst(StoreInst &SI) {
if (!SI.isSimple() || SI.getPointerOperand() != *U)
return false;
Value *V = SI.getValueOperand();
if (V->getType()->isSingleValueType())
return false;
DEBUG(dbgs() << " original: " << SI << "\n");
StoreOpSplitter Splitter(&SI, *U);
Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca");
SI.eraseFromParent();
return true;
}
bool visitBitCastInst(BitCastInst &BC) {
enqueueUsers(BC);
return false;
}
bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
enqueueUsers(GEPI);
return false;
}
bool visitPHINode(PHINode &PN) {
enqueueUsers(PN);
return false;
}
bool visitSelectInst(SelectInst &SI) {
enqueueUsers(SI);
return false;
}
};
}
static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) {
if (Ty->isSingleValueType())
return Ty;
uint64_t AllocSize = DL.getTypeAllocSize(Ty);
uint64_t TypeSize = DL.getTypeSizeInBits(Ty);
Type *InnerTy;
if (ArrayType *ArrTy = dyn_cast<ArrayType>(Ty)) {
InnerTy = ArrTy->getElementType();
} else if (StructType *STy = dyn_cast<StructType>(Ty)) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Index = SL->getElementContainingOffset(0);
InnerTy = STy->getElementType(Index);
} else {
return Ty;
}
if (AllocSize > DL.getTypeAllocSize(InnerTy) ||
TypeSize > DL.getTypeSizeInBits(InnerTy))
return Ty;
return stripAggregateTypeWrapping(DL, InnerTy);
}
static Type *getTypePartition(const DataLayout &TD, Type *Ty,
uint64_t Offset, uint64_t Size) {
if (Offset == 0 && TD.getTypeAllocSize(Ty) == Size)
return stripAggregateTypeWrapping(TD, Ty);
if (Offset > TD.getTypeAllocSize(Ty) ||
(TD.getTypeAllocSize(Ty) - Offset) < Size)
return 0;
if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) {
if (SeqTy->isPointerTy())
return 0;
Type *ElementTy = SeqTy->getElementType();
uint64_t ElementSize = TD.getTypeAllocSize(ElementTy);
uint64_t NumSkippedElements = Offset / ElementSize;
if (ArrayType *ArrTy = dyn_cast<ArrayType>(SeqTy))
if (NumSkippedElements >= ArrTy->getNumElements())
return 0;
if (VectorType *VecTy = dyn_cast<VectorType>(SeqTy))
if (NumSkippedElements >= VecTy->getNumElements())
return 0;
Offset -= NumSkippedElements * ElementSize;
if (Offset > 0 || Size < ElementSize) {
if ((Offset + Size) > ElementSize)
return 0;
return getTypePartition(TD, ElementTy, Offset, Size);
}
assert(Offset == 0);
if (Size == ElementSize)
return stripAggregateTypeWrapping(TD, ElementTy);
assert(Size > ElementSize);
uint64_t NumElements = Size / ElementSize;
if (NumElements * ElementSize != Size)
return 0;
return ArrayType::get(ElementTy, NumElements);
}
StructType *STy = dyn_cast<StructType>(Ty);
if (!STy)
return 0;
const StructLayout *SL = TD.getStructLayout(STy);
if (Offset >= SL->getSizeInBytes())
return 0;
uint64_t EndOffset = Offset + Size;
if (EndOffset > SL->getSizeInBytes())
return 0;
unsigned Index = SL->getElementContainingOffset(Offset);
Offset -= SL->getElementOffset(Index);
Type *ElementTy = STy->getElementType(Index);
uint64_t ElementSize = TD.getTypeAllocSize(ElementTy);
if (Offset >= ElementSize)
return 0;
if (Offset > 0 || Size < ElementSize) {
if ((Offset + Size) > ElementSize)
return 0;
return getTypePartition(TD, ElementTy, Offset, Size);
}
assert(Offset == 0);
if (Size == ElementSize)
return stripAggregateTypeWrapping(TD, ElementTy);
StructType::element_iterator EI = STy->element_begin() + Index,
EE = STy->element_end();
if (EndOffset < SL->getSizeInBytes()) {
unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
if (Index == EndIndex)
return 0;
if (SL->getElementOffset(EndIndex) != EndOffset)
return 0;
assert(Index < EndIndex);
EE = STy->element_begin() + EndIndex;
}
StructType *SubTy = StructType::get(STy->getContext(), makeArrayRef(EI, EE),
STy->isPacked());
const StructLayout *SubSL = TD.getStructLayout(SubTy);
if (Size != SubSL->getSizeInBytes())
return 0;
return SubTy;
}
bool SROA::rewriteAllocaPartition(AllocaInst &AI,
AllocaPartitioning &P,
AllocaPartitioning::iterator PI) {
uint64_t AllocaSize = PI->EndOffset - PI->BeginOffset;
bool IsLive = false;
for (AllocaPartitioning::use_iterator UI = P.use_begin(PI),
UE = P.use_end(PI);
UI != UE && !IsLive; ++UI)
if (UI->getUse())
IsLive = true;
if (!IsLive)
return false;
DEBUG(dbgs() << "Speculating PHIs and selects in partition "
<< "[" << PI->BeginOffset << "," << PI->EndOffset << ")\n");
PHIOrSelectSpeculator Speculator(*TD, P, *this);
DEBUG(dbgs() << " speculating ");
DEBUG(P.print(dbgs(), PI, ""));
Speculator.visitUsers(PI);
Type *AllocaTy = 0;
if (Type *PartitionTy = P.getCommonType(PI))
if (TD->getTypeAllocSize(PartitionTy) >= AllocaSize)
AllocaTy = PartitionTy;
if (!AllocaTy)
if (Type *PartitionTy = getTypePartition(*TD, AI.getAllocatedType(),
PI->BeginOffset, AllocaSize))
AllocaTy = PartitionTy;
if ((!AllocaTy ||
(AllocaTy->isArrayTy() &&
AllocaTy->getArrayElementType()->isIntegerTy())) &&
TD->isLegalInteger(AllocaSize * 8))
AllocaTy = Type::getIntNTy(*C, AllocaSize * 8);
if (!AllocaTy)
AllocaTy = ArrayType::get(Type::getInt8Ty(*C), AllocaSize);
assert(TD->getTypeAllocSize(AllocaTy) >= AllocaSize);
AllocaInst *NewAI;
if (AllocaTy == AI.getAllocatedType()) {
assert(PI->BeginOffset == 0 &&
"Non-zero begin offset but same alloca type");
assert(PI == P.begin() && "Begin offset is zero on later partition");
NewAI = &AI;
} else {
unsigned Alignment = AI.getAlignment();
if (!Alignment) {
Alignment = TD->getABITypeAlignment(AI.getAllocatedType());
}
Alignment = MinAlign(Alignment, PI->BeginOffset);
if (Alignment <= TD->getABITypeAlignment(AllocaTy))
Alignment = 0;
NewAI = new AllocaInst(AllocaTy, 0, Alignment,
AI.getName() + ".sroa." + Twine(PI - P.begin()),
&AI);
++NumNewAllocas;
}
DEBUG(dbgs() << "Rewriting alloca partition "
<< "[" << PI->BeginOffset << "," << PI->EndOffset << ") to: "
<< *NewAI << "\n");
unsigned PPWOldSize = PostPromotionWorklist.size();
AllocaPartitionRewriter Rewriter(*TD, P, PI, *this, AI, *NewAI,
PI->BeginOffset, PI->EndOffset);
DEBUG(dbgs() << " rewriting ");
DEBUG(P.print(dbgs(), PI, ""));
bool Promotable = Rewriter.visitUsers(P.use_begin(PI), P.use_end(PI));
if (Promotable) {
DEBUG(dbgs() << " and queuing for promotion\n");
PromotableAllocas.push_back(NewAI);
} else if (NewAI != &AI) {
Worklist.insert(NewAI);
}
if (!Promotable)
while (PostPromotionWorklist.size() > PPWOldSize)
PostPromotionWorklist.pop_back();
return true;
}
bool SROA::splitAlloca(AllocaInst &AI, AllocaPartitioning &P) {
bool Changed = false;
for (AllocaPartitioning::iterator PI = P.begin(), PE = P.end(); PI != PE;
++PI)
Changed |= rewriteAllocaPartition(AI, P, PI);
return Changed;
}
bool SROA::runOnAlloca(AllocaInst &AI) {
DEBUG(dbgs() << "SROA alloca: " << AI << "\n");
++NumAllocasAnalyzed;
if (AI.use_empty()) {
AI.eraseFromParent();
return true;
}
if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() ||
TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
return false;
bool Changed = false;
AggLoadStoreRewriter AggRewriter(*TD);
Changed |= AggRewriter.rewrite(AI);
AllocaPartitioning P(*TD, AI);
DEBUG(P.print(dbgs()));
if (P.isEscaped())
return Changed;
for (AllocaPartitioning::dead_user_iterator DI = P.dead_user_begin(),
DE = P.dead_user_end();
DI != DE; ++DI) {
Changed = true;
(*DI)->replaceAllUsesWith(UndefValue::get((*DI)->getType()));
DeadInsts.insert(*DI);
}
for (AllocaPartitioning::dead_op_iterator DO = P.dead_op_begin(),
DE = P.dead_op_end();
DO != DE; ++DO) {
Value *OldV = **DO;
**DO = UndefValue::get(OldV->getType());
if (Instruction *OldI = dyn_cast<Instruction>(OldV))
if (isInstructionTriviallyDead(OldI)) {
Changed = true;
DeadInsts.insert(OldI);
}
}
if (P.begin() == P.end())
return Changed;
return splitAlloca(AI, P) || Changed;
}
void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) {
while (!DeadInsts.empty()) {
Instruction *I = DeadInsts.pop_back_val();
DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
I->replaceAllUsesWith(UndefValue::get(I->getType()));
for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
if (Instruction *U = dyn_cast<Instruction>(*OI)) {
*OI = 0;
if (isInstructionTriviallyDead(U))
DeadInsts.insert(U);
}
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
DeletedAllocas.insert(AI);
++NumDeleted;
I->eraseFromParent();
}
}
bool SROA::promoteAllocas(Function &F) {
if (PromotableAllocas.empty())
return false;
NumPromoted += PromotableAllocas.size();
if (DT && !ForceSSAUpdater) {
DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
PromoteMemToReg(PromotableAllocas, *DT);
PromotableAllocas.clear();
return true;
}
DEBUG(dbgs() << "Promoting allocas with SSAUpdater...\n");
SSAUpdater SSA;
DIBuilder DIB(*F.getParent());
SmallVector<Instruction*, 64> Insts;
for (unsigned Idx = 0, Size = PromotableAllocas.size(); Idx != Size; ++Idx) {
AllocaInst *AI = PromotableAllocas[Idx];
for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end();
UI != UE;) {
Instruction *I = cast<Instruction>(*UI++);
if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) {
assert(onlyUsedByLifetimeMarkers(I) &&
"Found a bitcast used outside of a lifetime marker.");
while (!I->use_empty())
cast<Instruction>(*I->use_begin())->eraseFromParent();
I->eraseFromParent();
continue;
}
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
assert(II->getIntrinsicID() == Intrinsic::lifetime_start ||
II->getIntrinsicID() == Intrinsic::lifetime_end);
II->eraseFromParent();
continue;
}
Insts.push_back(I);
}
AllocaPromoter(Insts, SSA, *AI, DIB).run(Insts);
Insts.clear();
}
PromotableAllocas.clear();
return true;
}
namespace {
class IsAllocaInSet {
typedef SmallPtrSet<AllocaInst *, 4> SetType;
const SetType &Set;
public:
typedef AllocaInst *argument_type;
IsAllocaInSet(const SetType &Set) : Set(Set) {}
bool operator()(AllocaInst *AI) const { return Set.count(AI); }
};
}
bool SROA::runOnFunction(Function &F) {
DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
C = &F.getContext();
TD = getAnalysisIfAvailable<DataLayout>();
if (!TD) {
DEBUG(dbgs() << " Skipping SROA -- no target data!\n");
return false;
}
DT = getAnalysisIfAvailable<DominatorTree>();
BasicBlock &EntryBB = F.getEntryBlock();
for (BasicBlock::iterator I = EntryBB.begin(), E = llvm::prior(EntryBB.end());
I != E; ++I)
if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
Worklist.insert(AI);
bool Changed = false;
SmallPtrSet<AllocaInst *, 4> DeletedAllocas;
do {
while (!Worklist.empty()) {
Changed |= runOnAlloca(*Worklist.pop_back_val());
deleteDeadInstructions(DeletedAllocas);
if (!DeletedAllocas.empty()) {
Worklist.remove_if(IsAllocaInSet(DeletedAllocas));
PostPromotionWorklist.remove_if(IsAllocaInSet(DeletedAllocas));
PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(),
PromotableAllocas.end(),
IsAllocaInSet(DeletedAllocas)),
PromotableAllocas.end());
DeletedAllocas.clear();
}
}
Changed |= promoteAllocas(F);
Worklist = PostPromotionWorklist;
PostPromotionWorklist.clear();
} while (!Worklist.empty());
return Changed;
}
void SROA::getAnalysisUsage(AnalysisUsage &AU) const {
if (RequiresDomTree)
AU.addRequired<DominatorTree>();
AU.setPreservesCFG();
}