ARM64LoadStoreOptimizer.cpp [plain text]
#define DEBUG_TYPE "arm64-ldst-opt"
#include "ARM64InstrInfo.h"
#include "MCTargetDesc/ARM64AddressingModes.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/Statistic.h"
using namespace llvm;
STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
STATISTIC(NumPostFolded, "Number of post-index updates folded");
STATISTIC(NumPreFolded, "Number of pre-index updates folded");
STATISTIC(NumUnscaledPairCreated,
"Number of load/store from unscaled generated");
static cl::opt<bool>
DoLoadStoreOpt("arm64-load-store-opt", cl::init(true), cl::Hidden);
static cl::opt<unsigned>
ScanLimit("arm64-load-store-scan-limit", cl::init(20), cl::Hidden);
static cl::opt<bool>
EnableARM64UnscaledMemOp("arm64-unscaled-mem-op", cl::Hidden,
cl::desc("Allow ARM64 unscaled load/store combining"),
cl::init(true));
namespace {
struct ARM64LoadStoreOpt : public MachineFunctionPass {
static char ID;
ARM64LoadStoreOpt() : MachineFunctionPass(ID) {}
const ARM64InstrInfo *TII;
const TargetRegisterInfo *TRI;
MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
bool &mergeForward,
unsigned Limit);
MachineBasicBlock::iterator mergePairedInsns(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator Paired,
bool mergeForward);
MachineBasicBlock::iterator
findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
unsigned Limit, int Value);
MachineBasicBlock::iterator
findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I,
unsigned Limit);
MachineBasicBlock::iterator
mergePreIdxUpdateInsn(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator Update);
MachineBasicBlock::iterator
mergePostIdxUpdateInsn(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator Update);
bool optimizeBlock(MachineBasicBlock &MBB);
virtual bool runOnMachineFunction(MachineFunction &Fn);
virtual const char *getPassName() const {
return "ARM64 load / store optimization pass";
}
private:
int getMemSize(MachineInstr *MemMI);
};
char ARM64LoadStoreOpt::ID = 0;
}
static bool isUnscaledLdst(unsigned Opc) {
switch (Opc) {
default:
return false;
case ARM64::STURSi: return true;
case ARM64::STURDi: return true;
case ARM64::STURQi: return true;
case ARM64::STURWi: return true;
case ARM64::STURXi: return true;
case ARM64::LDURSi: return true;
case ARM64::LDURDi: return true;
case ARM64::LDURQi: return true;
case ARM64::LDURWi: return true;
case ARM64::LDURXi: return true;
}
}
int ARM64LoadStoreOpt::getMemSize(MachineInstr *MemMI) {
switch (MemMI->getOpcode()) {
default:
llvm_unreachable("Opcode has has unknown size!");
case ARM64::STRSui: case ARM64::STURSi: return 4;
case ARM64::STRDui: case ARM64::STURDi: return 8;
case ARM64::STRQui: case ARM64::STURQi: return 16;
case ARM64::STRWui: case ARM64::STURWi: return 4;
case ARM64::STRXui: case ARM64::STURXi: return 8;
case ARM64::LDRSui: case ARM64::LDURSi: return 4;
case ARM64::LDRDui: case ARM64::LDURDi: return 8;
case ARM64::LDRQui: case ARM64::LDURQi: return 16;
case ARM64::LDRWui: case ARM64::LDURWi: return 4;
case ARM64::LDRXui: case ARM64::LDURXi: return 8;
}
}
static unsigned getMatchingPairOpcode(unsigned Opc) {
switch (Opc) {
default:
llvm_unreachable("Opcode has no pairwise equivalent!");
case ARM64::STRSui: case ARM64::STURSi: return ARM64::STPSi;
case ARM64::STRDui: case ARM64::STURDi: return ARM64::STPDi;
case ARM64::STRQui: case ARM64::STURQi: return ARM64::STPQi;
case ARM64::STRWui: case ARM64::STURWi: return ARM64::STPWi;
case ARM64::STRXui: case ARM64::STURXi: return ARM64::STPXi;
case ARM64::LDRSui: case ARM64::LDURSi: return ARM64::LDPSi;
case ARM64::LDRDui: case ARM64::LDURDi: return ARM64::LDPDi;
case ARM64::LDRQui: case ARM64::LDURQi: return ARM64::LDPQi;
case ARM64::LDRWui: case ARM64::LDURWi: return ARM64::LDPWi;
case ARM64::LDRXui: case ARM64::LDURXi: return ARM64::LDPXi;
}
}
static unsigned getPreIndexedOpcode(unsigned Opc) {
switch (Opc) {
default:
llvm_unreachable("Opcode has no pre-indexed equivalent!");
case ARM64::STRSui: return ARM64::STRSpre;
case ARM64::STRDui: return ARM64::STRDpre;
case ARM64::STRQui: return ARM64::STRQpre;
case ARM64::STRWui: return ARM64::STRWpre;
case ARM64::STRXui: return ARM64::STRXpre;
case ARM64::LDRSui: return ARM64::LDRSpre;
case ARM64::LDRDui: return ARM64::LDRDpre;
case ARM64::LDRQui: return ARM64::LDRQpre;
case ARM64::LDRWui: return ARM64::LDRWpre;
case ARM64::LDRXui: return ARM64::LDRXpre;
}
}
static unsigned getPostIndexedOpcode(unsigned Opc) {
switch (Opc) {
default:
llvm_unreachable("Opcode has no post-indexed wise equivalent!");
case ARM64::STRSui: return ARM64::STRSpost;
case ARM64::STRDui: return ARM64::STRDpost;
case ARM64::STRQui: return ARM64::STRQpost;
case ARM64::STRWui: return ARM64::STRWpost;
case ARM64::STRXui: return ARM64::STRXpost;
case ARM64::LDRSui: return ARM64::LDRSpost;
case ARM64::LDRDui: return ARM64::LDRDpost;
case ARM64::LDRQui: return ARM64::LDRQpost;
case ARM64::LDRWui: return ARM64::LDRWpost;
case ARM64::LDRXui: return ARM64::LDRXpost;
}
}
MachineBasicBlock::iterator
ARM64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator Paired,
bool mergeForward) {
MachineBasicBlock::iterator NextI = I;
++NextI;
if (NextI == Paired)
++NextI;
bool IsUnscaled = isUnscaledLdst(I->getOpcode());
int OffsetStride = IsUnscaled && EnableARM64UnscaledMemOp ? getMemSize(I) : 1;
unsigned NewOpc = getMatchingPairOpcode(I->getOpcode());
MachineBasicBlock::iterator InsertionPoint = mergeForward ? Paired : I;
MachineOperand &BaseRegOp = mergeForward ? Paired->getOperand(1) :
I->getOperand(1);
MachineInstr *RtMI, *Rt2MI;
if (I->getOperand(2).getImm() ==
Paired->getOperand(2).getImm() + OffsetStride) {
RtMI = Paired;
Rt2MI = I;
} else {
RtMI = I;
Rt2MI = Paired;
}
int OffsetImm = RtMI->getOperand(2).getImm();
if (IsUnscaled && EnableARM64UnscaledMemOp)
OffsetImm /= OffsetStride;
MachineInstrBuilder MIB = BuildMI(*I->getParent(), InsertionPoint,
I->getDebugLoc(),
TII->get(NewOpc))
.addOperand(RtMI->getOperand(0))
.addOperand(Rt2MI->getOperand(0))
.addOperand(BaseRegOp)
.addImm(OffsetImm);
(void)MIB;
DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n ");
DEBUG(I->print(dbgs()));
DEBUG(dbgs() << " ");
DEBUG(Paired->print(dbgs()));
DEBUG(dbgs() << " with instruction:\n ");
DEBUG(((MachineInstr*)MIB)->print(dbgs()));
DEBUG(dbgs() << "\n");
I->eraseFromParent();
Paired->eraseFromParent();
return NextI;
}
static void trackRegDefsUses(MachineInstr *MI,
BitVector &ModifiedRegs, BitVector &UsedRegs,
const TargetRegisterInfo *TRI) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isRegMask())
ModifiedRegs.setBitsNotInMask(MO.getRegMask());
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (MO.isDef()) {
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
ModifiedRegs.set(*AI);
} else {
assert (MO.isUse() && "Reg operand not a def and not a use?!?");
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
UsedRegs.set(*AI);
}
}
}
static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
if (!IsUnscaled && (Offset > 63 || Offset < -64))
return false;
if (IsUnscaled) {
int elemOffset = Offset/OffsetStride;
if (elemOffset > 63 || elemOffset < -64) return false;
}
return true;
}
static int alignTo(int Num, int PowOf2) {
return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
}
MachineBasicBlock::iterator
ARM64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
bool &mergeForward,
unsigned Limit) {
MachineBasicBlock::iterator E = I->getParent()->end();
MachineBasicBlock::iterator MBBI = I;
MachineInstr *FirstMI = I;
++MBBI;
int Opc = FirstMI->getOpcode();
bool mayLoad = FirstMI->mayLoad();
bool IsUnscaled = isUnscaledLdst(Opc);
unsigned Reg = FirstMI->getOperand(0).getReg();
unsigned BaseReg = FirstMI->getOperand(1).getReg();
int Offset = FirstMI->getOperand(2).getImm();
if (FirstMI->modifiesRegister(BaseReg, TRI))
return E;
int OffsetStride =
IsUnscaled && EnableARM64UnscaledMemOp ? getMemSize(FirstMI) : 1;
if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
return E;
BitVector ModifiedRegs, UsedRegs;
ModifiedRegs.resize(TRI->getNumRegs());
UsedRegs.resize(TRI->getNumRegs());
for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
MachineInstr *MI = MBBI;
if (MI->isDebugValue())
continue;
++Count;
if (Opc == MI->getOpcode() && MI->getOperand(2).isImm()) {
unsigned MIBaseReg = MI->getOperand(1).getReg();
int MIOffset = MI->getOperand(2).getImm();
if (BaseReg == MIBaseReg &&
((Offset == MIOffset + OffsetStride) ||
(Offset + OffsetStride == MIOffset))) {
int MinOffset = Offset < MIOffset ? Offset : MIOffset;
if (MI->hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
return E;
bool MIIsUnscaled = isUnscaledLdst(MI->getOpcode());
if (!inBoundsForPair(MIIsUnscaled, MinOffset, OffsetStride)) {
trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
continue;
}
if (IsUnscaled && EnableARM64UnscaledMemOp &&
(alignTo(MinOffset, OffsetStride) != MinOffset)) {
trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
continue;
}
if (mayLoad && Reg == MI->getOperand(0).getReg()) {
trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
continue;
}
if (!ModifiedRegs[MI->getOperand(0).getReg()] &&
!UsedRegs[MI->getOperand(0).getReg()]) {
mergeForward = false;
return MBBI;
}
if (!ModifiedRegs[FirstMI->getOperand(0).getReg()] &&
!UsedRegs[FirstMI->getOperand(0).getReg()]) {
mergeForward = true;
return MBBI;
}
}
}
if (MI->mayStore() || MI->isCall())
return E;
if (FirstMI->mayStore() && MI->mayLoad())
return E;
trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
if (ModifiedRegs[BaseReg])
return E;
}
return E;
}
MachineBasicBlock::iterator
ARM64LoadStoreOpt::mergePreIdxUpdateInsn(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator Update) {
assert ((Update->getOpcode() == ARM64::ADDXri ||
Update->getOpcode() == ARM64::SUBXri) &&
"Unexpected base register update instruction to merge!");
MachineBasicBlock::iterator NextI = I;
if (++NextI == Update)
++NextI;
int Value = Update->getOperand(2).getImm();
assert(ARM64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
"Can't merge 1 << 12 offset into pre-indexed load / store");
if (Update->getOpcode() == ARM64::SUBXri)
Value = -Value;
unsigned NewOpc = getPreIndexedOpcode(I->getOpcode());
MachineInstrBuilder MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(),
TII->get(NewOpc))
.addOperand(I->getOperand(0))
.addOperand(I->getOperand(1))
.addImm(Value);
(void)MIB;
DEBUG(dbgs() << "Creating pre-indexed load/store.");
DEBUG(dbgs() << " Replacing instructions:\n ");
DEBUG(I->print(dbgs()));
DEBUG(dbgs() << " ");
DEBUG(Update->print(dbgs()));
DEBUG(dbgs() << " with instruction:\n ");
DEBUG(((MachineInstr*)MIB)->print(dbgs()));
DEBUG(dbgs() << "\n");
I->eraseFromParent();
Update->eraseFromParent();
return NextI;
}
MachineBasicBlock::iterator
ARM64LoadStoreOpt::mergePostIdxUpdateInsn(MachineBasicBlock::iterator I,
MachineBasicBlock::iterator Update) {
assert ((Update->getOpcode() == ARM64::ADDXri ||
Update->getOpcode() == ARM64::SUBXri) &&
"Unexpected base register update instruction to merge!");
MachineBasicBlock::iterator NextI = I;
if (++NextI == Update)
++NextI;
int Value = Update->getOperand(2).getImm();
assert(ARM64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
"Can't merge 1 << 12 offset into post-indexed load / store");
if (Update->getOpcode() == ARM64::SUBXri)
Value = -Value;
unsigned NewOpc = getPostIndexedOpcode(I->getOpcode());
MachineInstrBuilder MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(),
TII->get(NewOpc))
.addOperand(I->getOperand(0))
.addOperand(I->getOperand(1))
.addImm(Value);
(void)MIB;
DEBUG(dbgs() << "Creating post-indexed load/store.");
DEBUG(dbgs() << " Replacing instructions:\n ");
DEBUG(I->print(dbgs()));
DEBUG(dbgs() << " ");
DEBUG(Update->print(dbgs()));
DEBUG(dbgs() << " with instruction:\n ");
DEBUG(((MachineInstr*)MIB)->print(dbgs()));
DEBUG(dbgs() << "\n");
I->eraseFromParent();
Update->eraseFromParent();
return NextI;
}
static bool isMatchingUpdateInsn(MachineInstr *MI, unsigned BaseReg,
int Offset) {
switch (MI->getOpcode()) {
default:
break;
case ARM64::SUBXri:
Offset *= -1;
case ARM64::ADDXri:
if (!MI->getOperand(2).isImm())
break;
if (ARM64_AM::getShiftValue(MI->getOperand(3).getImm()))
break;
if (MI->getOperand(0).getReg() == BaseReg &&
MI->getOperand(1).getReg() == BaseReg &&
MI->getOperand(2).getImm() <= 255 &&
MI->getOperand(2).getImm() >= -256) {
if (!Offset || Offset == MI->getOperand(2).getImm())
return true;
}
break;
}
return false;
}
MachineBasicBlock::iterator
ARM64LoadStoreOpt::findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
unsigned Limit, int Value) {
MachineBasicBlock::iterator E = I->getParent()->end();
MachineInstr *MemMI = I;
MachineBasicBlock::iterator MBBI = I;
const MachineFunction &MF = *MemMI->getParent()->getParent();
unsigned DestReg = MemMI->getOperand(0).getReg();
unsigned BaseReg = MemMI->getOperand(1).getReg();
int Offset = MemMI->getOperand(2).getImm() *
TII->getRegClass(MemMI->getDesc(), 0, TRI, MF)->getSize();
if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
return E;
if (Offset != Value)
return E;
BitVector ModifiedRegs, UsedRegs;
ModifiedRegs.resize(TRI->getNumRegs());
UsedRegs.resize(TRI->getNumRegs());
++MBBI;
for (unsigned Count = 0; MBBI != E; ++MBBI) {
MachineInstr *MI = MBBI;
if (MI->isDebugValue())
continue;
++Count;
if (isMatchingUpdateInsn(MI, BaseReg, Value))
return MBBI;
trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
return E;
}
return E;
}
MachineBasicBlock::iterator
ARM64LoadStoreOpt::findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I,
unsigned Limit) {
MachineBasicBlock::iterator B = I->getParent()->begin();
MachineBasicBlock::iterator E = I->getParent()->end();
MachineInstr *MemMI = I;
MachineBasicBlock::iterator MBBI = I;
const MachineFunction &MF = *MemMI->getParent()->getParent();
unsigned DestReg = MemMI->getOperand(0).getReg();
unsigned BaseReg = MemMI->getOperand(1).getReg();
int Offset = MemMI->getOperand(2).getImm();
unsigned RegSize = TII->getRegClass(MemMI->getDesc(), 0, TRI, MF)->getSize();
if (MBBI == B || Offset != 0)
return E;
if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
return E;
BitVector ModifiedRegs, UsedRegs;
ModifiedRegs.resize(TRI->getNumRegs());
UsedRegs.resize(TRI->getNumRegs());
--MBBI;
for (unsigned Count = 0; MBBI != B; --MBBI) {
MachineInstr *MI = MBBI;
if (MI->isDebugValue())
continue;
++Count;
if (isMatchingUpdateInsn(MI, BaseReg, RegSize))
return MBBI;
trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
return E;
}
return E;
}
bool ARM64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB) {
bool Modified = false;
for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
MBBI != E;) {
MachineInstr *MI = MBBI;
switch (MI->getOpcode()) {
default:
++MBBI;
break;
case ARM64::STRSui:
case ARM64::STRDui:
case ARM64::STRQui:
case ARM64::STRXui:
case ARM64::STRWui:
case ARM64::LDRSui:
case ARM64::LDRDui:
case ARM64::LDRQui:
case ARM64::LDRXui:
case ARM64::LDRWui:
case ARM64::STURSi:
case ARM64::STURDi:
case ARM64::STURQi:
case ARM64::STURWi:
case ARM64::STURXi:
case ARM64::LDURSi:
case ARM64::LDURDi:
case ARM64::LDURQi:
case ARM64::LDURWi:
case ARM64::LDURXi: {
if (MI->hasOrderedMemoryRef()) {
++MBBI;
break;
}
if (!MI->getOperand(2).isImm()) {
++MBBI;
break;
}
if (TII->isLdStPairSuppressed(MI)) {
++MBBI;
break;
}
bool mergeForward = false;
MachineBasicBlock::iterator Paired =
findMatchingInsn(MBBI, mergeForward, ScanLimit);
if (Paired != E) {
MBBI = mergePairedInsns(MBBI, Paired, mergeForward);
Modified = true;
++NumPairCreated;
if (isUnscaledLdst(MI->getOpcode())) ++NumUnscaledPairCreated;
break;
}
++MBBI;
break;
}
}
}
for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
MBBI != E;) {
MachineInstr *MI = MBBI;
int Opc = MI->getOpcode();
switch (Opc) {
default:
++MBBI;
break;
case ARM64::STRSui:
case ARM64::STRDui:
case ARM64::STRQui:
case ARM64::STRXui:
case ARM64::STRWui:
case ARM64::LDRSui:
case ARM64::LDRDui:
case ARM64::LDRQui:
case ARM64::LDRXui:
case ARM64::LDRWui:
case ARM64::STURSi:
case ARM64::STURDi:
case ARM64::STURQi:
case ARM64::STURWi:
case ARM64::STURXi:
case ARM64::LDURSi:
case ARM64::LDURDi:
case ARM64::LDURQi:
case ARM64::LDURWi:
case ARM64::LDURXi: {
if (!MI->getOperand(2).isImm()) {
++MBBI;
break;
}
MachineBasicBlock::iterator Update =
findMatchingUpdateInsnForward(MBBI, ScanLimit, 0);
if (Update != E) {
MBBI = mergePostIdxUpdateInsn(MBBI, Update);
Modified = true;
++NumPostFolded;
break;
}
if (isUnscaledLdst(Opc)) {
++MBBI;
break;
}
Update = findMatchingUpdateInsnBackward(MBBI, ScanLimit);
if (Update != E) {
MBBI = mergePreIdxUpdateInsn(MBBI, Update);
Modified = true;
++NumPreFolded;
break;
}
int Value = MI->getOperand(2).getImm() *
TII->getRegClass(MI->getDesc(), 0, TRI, *(MBB.getParent()))->getSize();
Update = findMatchingUpdateInsnForward(MBBI, ScanLimit, Value);
if (Update != E) {
MBBI = mergePreIdxUpdateInsn(MBBI, Update);
Modified = true;
++NumPreFolded;
break;
}
++MBBI;
break;
}
}
}
return Modified;
}
bool ARM64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
if (!DoLoadStoreOpt)
return false;
const TargetMachine &TM = Fn.getTarget();
TII = static_cast<const ARM64InstrInfo*>(TM.getInstrInfo());
TRI = TM.getRegisterInfo();
bool Modified = false;
for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
++MFI) {
MachineBasicBlock &MBB = *MFI;
Modified |= optimizeBlock(MBB);
}
return Modified;
}
FunctionPass *llvm::createARM64LoadStoreOptimizationPass() {
return new ARM64LoadStoreOpt();
}