ARM64AdvSIMDScalarPass.cpp [plain text]
#define DEBUG_TYPE "arm64-simd-scalar"
#include "ARM64.h"
#include "ARM64InstrInfo.h"
#include "ARM64RegisterInfo.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
static cl::opt<bool>
AdvSIMDScalar("arm64-simd-scalar",
cl::desc("enable use of AdvSIMD scalar integer instructions"),
cl::init(false), cl::Hidden);
static cl::opt<bool>
TransformAll("arm64-simd-scalar-force-all",
cl::desc("Force use of AdvSIMD scalar instructions everywhere"),
cl::init(false), cl::Hidden);
STATISTIC(NumScalarInsnsUsed, "Number of scalar instructions used");
STATISTIC(NumCopiesDeleted, "Number of cross-class copies deleted");
STATISTIC(NumCopiesInserted, "Number of cross-class copies inserted");
namespace {
class ARM64AdvSIMDScalar : public MachineFunctionPass {
MachineRegisterInfo *MRI;
const ARM64InstrInfo *TII;
private:
bool isProfitableToTransform(const MachineInstr *MI) const;
void transformInstruction(MachineInstr *MI);
bool processMachineBasicBlock(MachineBasicBlock *MBB);
public:
static char ID; explicit ARM64AdvSIMDScalar() : MachineFunctionPass(ID) {}
virtual bool runOnMachineFunction(MachineFunction &F);
const char *getPassName() const {
return "AdvSIMD scalar operation optimization";
}
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
};
char ARM64AdvSIMDScalar::ID = 0;
}
static bool isGPR64(unsigned Reg, unsigned SubReg,
const MachineRegisterInfo *MRI) {
if (SubReg) return false;
if (TargetRegisterInfo::isVirtualRegister(Reg))
return MRI->getRegClass(Reg)->hasSuperClassEq(&ARM64::GPR64RegClass);
return ARM64::GPR64RegClass.contains(Reg);
}
static bool isFPR64(unsigned Reg, unsigned SubReg,
const MachineRegisterInfo *MRI) {
if (TargetRegisterInfo::isVirtualRegister(Reg))
return (MRI->getRegClass(Reg)->hasSuperClassEq(&ARM64::FPR64RegClass) &&
SubReg == 0) ||
(MRI->getRegClass(Reg)->hasSuperClassEq(&ARM64::FPR128RegClass) &&
SubReg == ARM64::dsub);
return (ARM64::FPR64RegClass.contains(Reg) && SubReg == 0) ||
(ARM64::FPR128RegClass.contains(Reg) && SubReg == ARM64::dsub);
}
static unsigned getSrcFromCopy(const MachineInstr *MI,
const MachineRegisterInfo *MRI,
unsigned &SubReg) {
SubReg = 0;
if (MI->getOpcode() == ARM64::FMOVDXr || MI->getOpcode() == ARM64::FMOVXDr)
return MI->getOperand(1).getReg();
if (MI->getOpcode() == ARM64::UMOVvi64 && MI->getOperand(2).getImm() == 0) {
SubReg = ARM64::dsub;
return MI->getOperand(1).getReg();
}
if (MI->getOpcode() == ARM64::COPY) {
if (isFPR64(MI->getOperand(0).getReg(),
MI->getOperand(0).getSubReg(), MRI) &&
isGPR64(MI->getOperand(1).getReg(),
MI->getOperand(1).getSubReg(), MRI))
return MI->getOperand(1).getReg();
if (isGPR64(MI->getOperand(0).getReg(),
MI->getOperand(0).getSubReg(), MRI) &&
isFPR64(MI->getOperand(1).getReg(),
MI->getOperand(1).getSubReg(), MRI)) {
SubReg = ARM64::dsub;
return MI->getOperand(1).getReg();
}
}
return 0;
}
static int getTransformOpcode(unsigned Opc) {
switch (Opc) {
default: break;
case ARM64::ADDXrr: return ARM64::ADDv1i64;
case ARM64::SUBXrr: return ARM64::SUBv1i64;
}
return Opc;
}
static bool isTransformable(const MachineInstr *MI) {
int Opc = MI->getOpcode();
return Opc != getTransformOpcode(Opc);
}
bool ARM64AdvSIMDScalar::isProfitableToTransform(const MachineInstr *MI) const {
if (!isTransformable(MI))
return false;
unsigned NumNewCopies = 3;
unsigned NumRemovableCopies = 0;
unsigned OrigSrc0 = MI->getOperand(1).getReg();
unsigned OrigSrc1 = MI->getOperand(2).getReg();
unsigned Src0 = 0, SubReg0;
unsigned Src1 = 0, SubReg1;
if (!MRI->def_empty(OrigSrc0)) {
MachineRegisterInfo::def_iterator Def = MRI->def_begin(OrigSrc0);
assert(llvm::next(Def) == MRI->def_end() && "Multiple def in SSA!");
Src0 = getSrcFromCopy(&*Def, MRI, SubReg0);
if (Src0)
--NumNewCopies;
if (Src0 && MRI->hasOneNonDBGUse(OrigSrc0))
++NumRemovableCopies;
}
if (!MRI->def_empty(OrigSrc1)) {
MachineRegisterInfo::def_iterator Def = MRI->def_begin(OrigSrc1);
assert(llvm::next(Def) == MRI->def_end() && "Multiple def in SSA!");
Src1 = getSrcFromCopy(&*Def, MRI, SubReg1);
if (Src1)
--NumNewCopies;
if (Src1 && MRI->hasOneNonDBGUse(OrigSrc1))
++NumRemovableCopies;
}
unsigned Dst = MI->getOperand(0).getReg();
bool AllUsesAreCopies = true;
for (MachineRegisterInfo::use_nodbg_iterator Use = MRI->use_nodbg_begin(Dst),
E = MRI->use_nodbg_end(); Use != E; ++Use) {
unsigned SubReg;
if (getSrcFromCopy(&*Use, MRI, SubReg) || isTransformable(&*Use))
++NumRemovableCopies;
else if (Use->getOpcode() == ARM64::INSERT_SUBREG ||
Use->getOpcode() == ARM64::INSvi64gpr)
;
else
AllUsesAreCopies = false;
}
if (AllUsesAreCopies)
--NumNewCopies;
if (NumNewCopies <= NumRemovableCopies)
return true;
return TransformAll;
}
static MachineInstr *insertCopy(const ARM64InstrInfo *TII, MachineInstr *MI,
unsigned Dst, unsigned Src, bool IsKill) {
MachineInstrBuilder MIB = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
TII->get(ARM64::COPY), Dst)
.addReg(Src, getKillRegState(IsKill));
DEBUG(dbgs() << " adding copy: " << *MIB);
++NumCopiesInserted;
return MIB;
}
void ARM64AdvSIMDScalar::transformInstruction(MachineInstr *MI) {
DEBUG(dbgs() << "Scalar transform: " << *MI);
MachineBasicBlock *MBB = MI->getParent();
int OldOpc = MI->getOpcode();
int NewOpc = getTransformOpcode(OldOpc);
assert(OldOpc != NewOpc && "transform an instruction to itself?!");
unsigned OrigSrc0 = MI->getOperand(1).getReg();
unsigned OrigSrc1 = MI->getOperand(2).getReg();
unsigned Src0 = 0, SubReg0;
unsigned Src1 = 0, SubReg1;
if (!MRI->def_empty(OrigSrc0)) {
MachineRegisterInfo::def_iterator Def = MRI->def_begin(OrigSrc0);
assert(llvm::next(Def) == MRI->def_end() && "Multiple def in SSA!");
Src0 = getSrcFromCopy(&*Def, MRI, SubReg0);
if (Src0 && MRI->hasOneNonDBGUse(OrigSrc0)) {
assert(Src0 && "Can't delete copy w/o a valid original source!");
Def->eraseFromParent();
++NumCopiesDeleted;
}
}
if (!MRI->def_empty(OrigSrc1)) {
MachineRegisterInfo::def_iterator Def = MRI->def_begin(OrigSrc1);
assert(llvm::next(Def) == MRI->def_end() && "Multiple def in SSA!");
Src1 = getSrcFromCopy(&*Def, MRI, SubReg1);
if (Src1 && MRI->hasOneNonDBGUse(OrigSrc1)) {
assert(Src1 && "Can't delete copy w/o a valid original source!");
Def->eraseFromParent();
++NumCopiesDeleted;
}
}
if (!Src0) {
SubReg0 = 0;
Src0 = MRI->createVirtualRegister(&ARM64::FPR64RegClass);
insertCopy(TII, MI, Src0, OrigSrc0, true);
}
if (!Src1) {
SubReg1 = 0;
Src1 = MRI->createVirtualRegister(&ARM64::FPR64RegClass);
insertCopy(TII, MI, Src1, OrigSrc1, true);
}
unsigned Dst = MRI->createVirtualRegister(&ARM64::FPR64RegClass);
BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(NewOpc), Dst)
.addReg(Src0, getKillRegState(true), SubReg0)
.addReg(Src1, getKillRegState(true), SubReg1);
insertCopy(TII, MI, MI->getOperand(0).getReg(), Dst, true);
MI->eraseFromParent();
++NumScalarInsnsUsed;
}
bool ARM64AdvSIMDScalar::
processMachineBasicBlock(MachineBasicBlock *MBB) {
bool Changed = false;
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;) {
MachineInstr *MI = I;
++I;
if (isProfitableToTransform(MI)) {
transformInstruction(MI);
Changed = true;
}
}
return Changed;
}
bool ARM64AdvSIMDScalar::runOnMachineFunction(MachineFunction &mf) {
if (!AdvSIMDScalar)
return false;
bool Changed = false;
DEBUG(dbgs() << "***** ARM64AdvSIMDScalar *****\n");
const TargetMachine &TM = mf.getTarget();
MRI = &mf.getRegInfo();
TII = static_cast<const ARM64InstrInfo*>(TM.getInstrInfo());
for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I)
if (processMachineBasicBlock(I))
Changed = true;
return Changed;
}
FunctionPass *llvm::createARM64AdvSIMDScalar() {
return new ARM64AdvSIMDScalar();
}