X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FARM%2FARMBaseInstrInfo.cpp;h=24cd228279c38a7c576a48c8ea40600a35a26d08;hb=90b7b12f012d9234488277a323231e0b7a8d12ac;hp=32570650086514517dbc29c2ccd90c17cc1d441f;hpb=40a5eb18b031fa1a5e9697e21e251e613d441cc5;p=oota-llvm.git diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index 32570650086..24cd228279c 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -13,11 +13,11 @@ #include "ARMBaseInstrInfo.h" #include "ARM.h" -#include "ARMAddressingModes.h" #include "ARMConstantPoolValue.h" +#include "ARMHazardRecognizer.h" #include "ARMMachineFunctionInfo.h" #include "ARMRegisterInfo.h" -#include "ARMGenInstrInfo.inc" +#include "MCTargetDesc/ARMAddressingModes.h" #include "llvm/Constants.h" #include "llvm/Function.h" #include "llvm/GlobalValue.h" @@ -29,11 +29,17 @@ #include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/PseudoSourceValue.h" +#include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/MC/MCAsmInfo.h" +#include "llvm/Support/BranchProbability.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/ADT/STLExtras.h" + +#define GET_INSTRINFO_CTOR +#include "ARMGenInstrInfo.inc" + using namespace llvm; static cl::opt @@ -41,12 +47,71 @@ EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden, cl::desc("Enable ARM 2-addr to 3-addr conv")); static cl::opt -OldARMIfCvt("old-arm-ifcvt", cl::Hidden, - cl::desc("Use old-style ARM if-conversion heuristics")); +WidenVMOVS("widen-vmovs", cl::Hidden, + cl::desc("Widen ARM vmovs to vmovd when possible")); + +/// ARM_MLxEntry - Record information about MLA / MLS instructions. +struct ARM_MLxEntry { + unsigned MLxOpc; // MLA / MLS opcode + unsigned MulOpc; // Expanded multiplication opcode + unsigned AddSubOpc; // Expanded add / sub opcode + bool NegAcc; // True if the acc is negated before the add / sub. + bool HasLane; // True if instruction has an extra "lane" operand. +}; + +static const ARM_MLxEntry ARM_MLxTable[] = { + // MLxOpc, MulOpc, AddSubOpc, NegAcc, HasLane + // fp scalar ops + { ARM::VMLAS, ARM::VMULS, ARM::VADDS, false, false }, + { ARM::VMLSS, ARM::VMULS, ARM::VSUBS, false, false }, + { ARM::VMLAD, ARM::VMULD, ARM::VADDD, false, false }, + { ARM::VMLSD, ARM::VMULD, ARM::VSUBD, false, false }, + { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS, true, false }, + { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS, true, false }, + { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD, true, false }, + { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD, true, false }, + + // fp SIMD ops + { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd, false, false }, + { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd, false, false }, + { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq, false, false }, + { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq, false, false }, + { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd, false, true }, + { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd, false, true }, + { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq, false, true }, + { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true }, +}; ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI) - : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)), + : ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP), Subtarget(STI) { + for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) { + if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second) + assert(false && "Duplicated entries?"); + MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc); + MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc); + } +} + +// Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl +// currently defaults to no prepass hazard recognizer. +ScheduleHazardRecognizer *ARMBaseInstrInfo:: +CreateTargetHazardRecognizer(const TargetMachine *TM, + const ScheduleDAG *DAG) const { + if (usePreRAHazardRecognizer()) { + const InstrItineraryData *II = TM->getInstrItineraryData(); + return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched"); + } + return TargetInstrInfoImpl::CreateTargetHazardRecognizer(TM, DAG); +} + +ScheduleHazardRecognizer *ARMBaseInstrInfo:: +CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, + const ScheduleDAG *DAG) const { + if (Subtarget.isThumb2() || Subtarget.hasVFP2()) + return (ScheduleHazardRecognizer *) + new ARMHazardRecognizer(II, *this, getRegisterInfo(), Subtarget, DAG); + return TargetInstrInfoImpl::CreateTargetPostRAHazardRecognizer(II, DAG); } MachineInstr * @@ -80,9 +145,9 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineInstr *UpdateMI = NULL; MachineInstr *MemMI = NULL; unsigned AddrMode = (TSFlags & ARMII::AddrModeMask); - const TargetInstrDesc &TID = MI->getDesc(); - unsigned NumOps = TID.getNumOperands(); - bool isLoad = !TID.mayStore(); + const MCInstrDesc &MCID = MI->getDesc(); + unsigned NumOps = MCID.getNumOperands(); + bool isLoad = !MCID.mayStore(); const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0); const MachineOperand &Base = MI->getOperand(2); const MachineOperand &Offset = MI->getOperand(NumOps-3); @@ -111,7 +176,7 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm); unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt); UpdateMI = BuildMI(MF, MI->getDebugLoc(), - get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg) + get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg) .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc) .addImm(Pred).addReg(0).addReg(0); } else @@ -170,8 +235,7 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, if (LV) { for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { MachineOperand &MO = MI->getOperand(i); - if (MO.isReg() && MO.getReg() && - TargetRegisterInfo::isVirtualRegister(MO.getReg())) { + if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) { unsigned Reg = MO.getReg(); LiveVariables::VarInfo &VI = LV->getVarInfo(Reg); @@ -201,43 +265,6 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return NewMIs[0]; } -bool -ARMBaseInstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MI, - const std::vector &CSI, - const TargetRegisterInfo *TRI) const { - if (CSI.empty()) - return false; - - DebugLoc DL; - if (MI != MBB.end()) DL = MI->getDebugLoc(); - - for (unsigned i = 0, e = CSI.size(); i != e; ++i) { - unsigned Reg = CSI[i].getReg(); - bool isKill = true; - - // Add the callee-saved register as live-in unless it's LR and - // @llvm.returnaddress is called. If LR is returned for @llvm.returnaddress - // then it's already added to the function and entry block live-in sets. - if (Reg == ARM::LR) { - MachineFunction &MF = *MBB.getParent(); - if (MF.getFrameInfo()->isReturnAddressTaken() && - MF.getRegInfo().isLiveIn(Reg)) - isKill = false; - } - - if (isKill) - MBB.addLiveIn(Reg); - - // Insert the spill to the stack frame. The register is killed at the spill - // - const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); - storeRegToStackSlot(MBB, MI, Reg, isKill, - CSI[i].getFrameIdx(), RC, TRI); - } - return true; -} - // Branch analysis. bool ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, @@ -377,6 +404,7 @@ ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB); int BccOpc = !AFI->isThumbFunction() ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc); + bool isThumb = AFI->isThumbFunction() || AFI->isThumb2Function(); // Shouldn't be a fall through. assert(TBB && "InsertBranch must not be told to insert a fallthrough"); @@ -384,9 +412,12 @@ ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, "ARM branch conditions have two components!"); if (FBB == 0) { - if (Cond.empty()) // Unconditional branch? - BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB); - else + if (Cond.empty()) { // Unconditional branch? + if (isThumb) + BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).addImm(ARMCC::AL).addReg(0); + else + BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB); + } else BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB) .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()); return 1; @@ -395,7 +426,10 @@ ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, // Two-way conditional branch. BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB) .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()); - BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB); + if (isThumb) + BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).addImm(ARMCC::AL).addReg(0); + else + BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB); return 2; } @@ -457,8 +491,8 @@ SubsumesPredicate(const SmallVectorImpl &Pred1, bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI, std::vector &Pred) const { // FIXME: This confuses implicit_def with optional CPSR def. - const TargetInstrDesc &TID = MI->getDesc(); - if (!TID.getImplicitDefs() && !TID.hasOptionalDef()) + const MCInstrDesc &MCID = MI->getDesc(); + if (!MCID.getImplicitDefs() && !MCID.hasOptionalDef()) return false; bool Found = false; @@ -477,11 +511,11 @@ bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI, /// By default, this returns true for every instruction with a /// PredicateOperand. bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const { - const TargetInstrDesc &TID = MI->getDesc(); - if (!TID.isPredicable()) + const MCInstrDesc &MCID = MI->getDesc(); + if (!MCID.isPredicable()) return false; - if ((TID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) { + if ((MCID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) { ARMFunctionInfo *AFI = MI->getParent()->getParent()->getInfo(); return AFI->isThumb2Function(); @@ -506,35 +540,28 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { const MachineFunction *MF = MBB.getParent(); const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo(); - // Basic size info comes from the TSFlags field. - const TargetInstrDesc &TID = MI->getDesc(); - uint64_t TSFlags = TID.TSFlags; + const MCInstrDesc &MCID = MI->getDesc(); + if (MCID.getSize()) + return MCID.getSize(); - unsigned Opc = MI->getOpcode(); - switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) { - default: { // If this machine instr is an inline asm, measure it. if (MI->getOpcode() == ARM::INLINEASM) return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI); if (MI->isLabel()) return 0; + unsigned Opc = MI->getOpcode(); switch (Opc) { - default: - llvm_unreachable("Unknown or unset size field for instr!"); case TargetOpcode::IMPLICIT_DEF: case TargetOpcode::KILL: case TargetOpcode::PROLOG_LABEL: case TargetOpcode::EH_LABEL: case TargetOpcode::DBG_VALUE: return 0; - } - break; - } - case ARMII::Size8Bytes: return 8; // ARM instruction x 2. - case ARMII::Size4Bytes: return 4; // ARM / Thumb2 instruction. - case ARMII::Size2Bytes: return 2; // Thumb1 instruction. - case ARMII::SizeSpecial: { - switch (Opc) { + case ARM::MOVi16_ga_pcrel: + case ARM::MOVTi16_ga_pcrel: + case ARM::t2MOVi16_ga_pcrel: + case ARM::t2MOVTi16_ga_pcrel: + return 4; case ARM::MOVi32imm: case ARM::t2MOVi32imm: return 8; @@ -558,16 +585,16 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { case ARM::BR_JTadd: case ARM::tBR_JTr: case ARM::t2BR_JT: - case ARM::t2TBB: - case ARM::t2TBH: { + case ARM::t2TBB_JT: + case ARM::t2TBH_JT: { // These are jumptable branches, i.e. a branch followed by an inlined // jumptable. The size is 4 + 4 * number of entries. For TBB, each // entry is one byte; TBH two byte each. - unsigned EntrySize = (Opc == ARM::t2TBB) - ? 1 : ((Opc == ARM::t2TBH) ? 2 : 4); - unsigned NumOps = TID.getNumOperands(); + unsigned EntrySize = (Opc == ARM::t2TBB_JT) + ? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4); + unsigned NumOps = MCID.getNumOperands(); MachineOperand JTOP = - MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2)); + MI->getOperand(NumOps - (MCID.isPredicable() ? 3 : 2)); unsigned JTI = JTOP.getIndex(); const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo(); assert(MJTI != 0); @@ -582,7 +609,7 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { // alignment issue. unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4; unsigned NumEntries = getNumJTEntries(JT, JTI); - if (Opc == ARM::t2TBB && (NumEntries & 1)) + if (Opc == ARM::t2TBB_JT && (NumEntries & 1)) // Make sure the instruction that follows TBB is 2-byte aligned. // FIXME: Constant island pass should insert an "ALIGN" instruction // instead. @@ -593,8 +620,6 @@ unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { // Otherwise, pseudo-instruction sizes are zero. return 0; } - } - } return 0; // Not reached } @@ -614,7 +639,7 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, bool SPRDest = ARM::SPRRegClass.contains(DestReg); bool SPRSrc = ARM::SPRRegClass.contains(SrcReg); - unsigned Opc; + unsigned Opc = 0; if (SPRDest && SPRSrc) Opc = ARM::VMOVS; else if (GPRDest && SPRSrc) @@ -624,18 +649,41 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, else if (ARM::DPRRegClass.contains(DestReg, SrcReg)) Opc = ARM::VMOVD; else if (ARM::QPRRegClass.contains(DestReg, SrcReg)) - Opc = ARM::VMOVQ; - else if (ARM::QQPRRegClass.contains(DestReg, SrcReg)) - Opc = ARM::VMOVQQ; - else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) - Opc = ARM::VMOVQQQQ; - else - llvm_unreachable("Impossible reg-to-reg copy"); + Opc = ARM::VORRq; - MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg); - MIB.addReg(SrcReg, getKillRegState(KillSrc)); - if (Opc != ARM::VMOVQQ && Opc != ARM::VMOVQQQQ) + if (Opc) { + MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg); + MIB.addReg(SrcReg, getKillRegState(KillSrc)); + if (Opc == ARM::VORRq) + MIB.addReg(SrcReg, getKillRegState(KillSrc)); AddDefaultPred(MIB); + return; + } + + // Generate instructions for VMOVQQ and VMOVQQQQ pseudos in place. + if (ARM::QQPRRegClass.contains(DestReg, SrcReg) || + ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) { + const TargetRegisterInfo *TRI = &getRegisterInfo(); + assert(ARM::qsub_0 + 3 == ARM::qsub_3 && "Expected contiguous enum."); + unsigned EndSubReg = ARM::QQPRRegClass.contains(DestReg, SrcReg) ? + ARM::qsub_1 : ARM::qsub_3; + for (unsigned i = ARM::qsub_0, e = EndSubReg + 1; i != e; ++i) { + unsigned Dst = TRI->getSubReg(DestReg, i); + unsigned Src = TRI->getSubReg(SrcReg, i); + MachineInstrBuilder Mov = + AddDefaultPred(BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VORRq)) + .addReg(Dst, RegState::Define) + .addReg(Src, getKillRegState(KillSrc)) + .addReg(Src, getKillRegState(KillSrc))); + if (i == EndSubReg) { + Mov->addRegisterDefined(DestReg, TRI); + if (KillSrc) + Mov->addRegisterKilled(SrcReg, TRI); + } + } + return; + } + llvm_unreachable("Impossible reg-to-reg copy"); } static const @@ -668,85 +716,84 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MFI.getObjectSize(FI), Align); - // tGPR is used sometimes in ARM instructions that need to avoid using - // certain registers. Just treat it as GPR here. Likewise, rGPR. - if (RC == ARM::tGPRRegisterClass || RC == ARM::tcGPRRegisterClass - || RC == ARM::rGPRRegisterClass) - RC = ARM::GPRRegisterClass; - - switch (RC->getID()) { - case ARM::GPRRegClassID: - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STRi12)) + switch (RC->getSize()) { + case 4: + if (ARM::GPRRegClass.hasSubClassEq(RC)) { + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STRi12)) .addReg(SrcReg, getKillRegState(isKill)) .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); - break; - case ARM::SPRRegClassID: - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS)) + } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS)) .addReg(SrcReg, getKillRegState(isKill)) .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); - break; - case ARM::DPRRegClassID: - case ARM::DPR_VFP2RegClassID: - case ARM::DPR_8RegClassID: - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD)) + } else + llvm_unreachable("Unknown reg class!"); + break; + case 8: + if (ARM::DPRRegClass.hasSubClassEq(RC)) { + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD)) .addReg(SrcReg, getKillRegState(isKill)) .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); - break; - case ARM::QPRRegClassID: - case ARM::QPR_VFP2RegClassID: - case ARM::QPR_8RegClassID: - if (Align >= 16 && getRegisterInfo().needsStackRealignment(MF)) { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64Pseudo)) + } else + llvm_unreachable("Unknown reg class!"); + break; + case 16: + if (ARM::QPRRegClass.hasSubClassEq(RC)) { + if (Align >= 16 && getRegisterInfo().needsStackRealignment(MF)) { + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64Pseudo)) .addFrameIndex(FI).addImm(16) .addReg(SrcReg, getKillRegState(isKill)) .addMemOperand(MMO)); - } else { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMQ)) + } else { + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMQIA)) .addReg(SrcReg, getKillRegState(isKill)) .addFrameIndex(FI) - .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia)) .addMemOperand(MMO)); - } - break; - case ARM::QQPRRegClassID: - case ARM::QQPR_VFP2RegClassID: - if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { - // FIXME: It's possible to only store part of the QQ register if the - // spilled def has a sub-register index. - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64QPseudo)) + } + } else + llvm_unreachable("Unknown reg class!"); + break; + case 32: + if (ARM::QQPRRegClass.hasSubClassEq(RC)) { + if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { + // FIXME: It's possible to only store part of the QQ register if the + // spilled def has a sub-register index. + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64QPseudo)) .addFrameIndex(FI).addImm(16) .addReg(SrcReg, getKillRegState(isKill)) .addMemOperand(MMO)); - } else { - MachineInstrBuilder MIB = - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMD)) - .addFrameIndex(FI) - .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))) - .addMemOperand(MMO); - MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); - MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); - MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); - AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); - } - break; - case ARM::QQQQPRRegClassID: { - MachineInstrBuilder MIB = - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMD)) - .addFrameIndex(FI) - .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))) - .addMemOperand(MMO); - MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); - MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); - MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); - MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); - MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI); - MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI); - MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI); - AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI); - break; - } - default: - llvm_unreachable("Unknown regclass!"); + } else { + MachineInstrBuilder MIB = + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) + .addFrameIndex(FI)) + .addMemOperand(MMO); + MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); + MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); + MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); + AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); + } + } else + llvm_unreachable("Unknown reg class!"); + break; + case 64: + if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { + MachineInstrBuilder MIB = + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA)) + .addFrameIndex(FI)) + .addMemOperand(MMO); + MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI); + MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI); + MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI); + MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI); + MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI); + MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI); + MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI); + AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI); + } else + llvm_unreachable("Unknown reg class!"); + break; + default: + llvm_unreachable("Unknown reg class!"); } } @@ -768,7 +815,7 @@ ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI, break; case ARM::STRi12: case ARM::t2STRi12: - case ARM::tSpill: + case ARM::tSTRspi: case ARM::VSTRD: case ARM::VSTRS: if (MI->getOperand(1).isFI() && @@ -785,10 +832,8 @@ ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI, return MI->getOperand(2).getReg(); } break; - case ARM::VSTMQ: + case ARM::VSTMQIA: if (MI->getOperand(1).isFI() && - MI->getOperand(2).isImm() && - MI->getOperand(2).getImm() == ARM_AM::getAM4ModeImm(ARM_AM::ia) && MI->getOperand(0).getSubReg() == 0) { FrameIndex = MI->getOperand(1).getIndex(); return MI->getOperand(0).getReg(); @@ -799,6 +844,12 @@ ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI, return 0; } +unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI, + int &FrameIndex) const { + const MachineMemOperand *Dummy; + return MI->getDesc().mayStore() && hasStoreToStackSlot(MI, Dummy, FrameIndex); +} + void ARMBaseInstrInfo:: loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned DestReg, int FI, @@ -816,75 +867,77 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MFI.getObjectSize(FI), Align); - // tGPR is used sometimes in ARM instructions that need to avoid using - // certain registers. Just treat it as GPR here. - if (RC == ARM::tGPRRegisterClass || RC == ARM::tcGPRRegisterClass - || RC == ARM::rGPRRegisterClass) - RC = ARM::GPRRegisterClass; - - switch (RC->getID()) { - case ARM::GPRRegClassID: - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg) + switch (RC->getSize()) { + case 4: + if (ARM::GPRRegClass.hasSubClassEq(RC)) { + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); - break; - case ARM::SPRRegClassID: - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg) + + } else if (ARM::SPRRegClass.hasSubClassEq(RC)) { + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); + } else + llvm_unreachable("Unknown reg class!"); break; - case ARM::DPRRegClassID: - case ARM::DPR_VFP2RegClassID: - case ARM::DPR_8RegClassID: - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg) + case 8: + if (ARM::DPRRegClass.hasSubClassEq(RC)) { + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); + } else + llvm_unreachable("Unknown reg class!"); break; - case ARM::QPRRegClassID: - case ARM::QPR_VFP2RegClassID: - case ARM::QPR_8RegClassID: - if (Align >= 16 && getRegisterInfo().needsStackRealignment(MF)) { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64Pseudo), DestReg) + case 16: + if (ARM::QPRRegClass.hasSubClassEq(RC)) { + if (Align >= 16 && getRegisterInfo().needsStackRealignment(MF)) { + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64Pseudo), DestReg) .addFrameIndex(FI).addImm(16) .addMemOperand(MMO)); - } else { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQ), DestReg) - .addFrameIndex(FI) - .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia)) - .addMemOperand(MMO)); - } + } else { + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg) + .addFrameIndex(FI) + .addMemOperand(MMO)); + } + } else + llvm_unreachable("Unknown reg class!"); break; - case ARM::QQPRRegClassID: - case ARM::QQPR_VFP2RegClassID: - if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg) + case 32: + if (ARM::QQPRRegClass.hasSubClassEq(RC)) { + if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) { + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg) .addFrameIndex(FI).addImm(16) .addMemOperand(MMO)); - } else { + } else { + MachineInstrBuilder MIB = + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) + .addFrameIndex(FI)) + .addMemOperand(MMO); + MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI); + MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI); + MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI); + MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI); + MIB.addReg(DestReg, RegState::Define | RegState::Implicit); + } + } else + llvm_unreachable("Unknown reg class!"); + break; + case 64: + if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) { MachineInstrBuilder MIB = - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMD)) - .addFrameIndex(FI) - .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))) - .addMemOperand(MMO); + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA)) + .addFrameIndex(FI)) + .addMemOperand(MMO); MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI); MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI); MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI); - AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI); - } - break; - case ARM::QQQQPRRegClassID: { - MachineInstrBuilder MIB = - AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMD)) - .addFrameIndex(FI) - .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))) - .addMemOperand(MMO); - MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI); - MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI); - MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI); - MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI); - MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::Define, TRI); - MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::Define, TRI); - MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::Define, TRI); - AddDReg(MIB, DestReg, ARM::dsub_7, RegState::Define, TRI); + MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI); + MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::Define, TRI); + MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::Define, TRI); + MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::Define, TRI); + MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::Define, TRI); + MIB.addReg(DestReg, RegState::Define | RegState::Implicit); + } else + llvm_unreachable("Unknown reg class!"); break; - } default: llvm_unreachable("Unknown regclass!"); } @@ -908,7 +961,7 @@ ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, break; case ARM::LDRi12: case ARM::t2LDRi12: - case ARM::tRestore: + case ARM::tLDRspi: case ARM::VLDRD: case ARM::VLDRS: if (MI->getOperand(1).isFI() && @@ -925,10 +978,8 @@ ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, return MI->getOperand(0).getReg(); } break; - case ARM::VLDMQ: + case ARM::VLDMQIA: if (MI->getOperand(1).isFI() && - MI->getOperand(2).isImm() && - MI->getOperand(2).getImm() == ARM_AM::getAM4ModeImm(ARM_AM::ia) && MI->getOperand(0).getSubReg() == 0) { FrameIndex = MI->getOperand(1).getIndex(); return MI->getOperand(0).getReg(); @@ -939,6 +990,78 @@ ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, return 0; } +unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI, + int &FrameIndex) const { + const MachineMemOperand *Dummy; + return MI->getDesc().mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex); +} + +bool ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const{ + // This hook gets to expand COPY instructions before they become + // copyPhysReg() calls. Look for VMOVS instructions that can legally be + // widened to VMOVD. We prefer the VMOVD when possible because it may be + // changed into a VORR that can go down the NEON pipeline. + if (!WidenVMOVS || !MI->isCopy()) + return false; + + // Look for a copy between even S-registers. That is where we keep floats + // when using NEON v2f32 instructions for f32 arithmetic. + unsigned DstRegS = MI->getOperand(0).getReg(); + unsigned SrcRegS = MI->getOperand(1).getReg(); + if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS)) + return false; + + const TargetRegisterInfo *TRI = &getRegisterInfo(); + unsigned DstRegD = TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0, + &ARM::DPRRegClass); + unsigned SrcRegD = TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0, + &ARM::DPRRegClass); + if (!DstRegD || !SrcRegD) + return false; + + // We want to widen this into a DstRegD = VMOVD SrcRegD copy. This is only + // legal if the COPY already defines the full DstRegD, and it isn't a + // sub-register insertion. + if (!MI->definesRegister(DstRegD, TRI) || MI->readsRegister(DstRegD, TRI)) + return false; + + // A dead copy shouldn't show up here, but reject it just in case. + if (MI->getOperand(0).isDead()) + return false; + + // All clear, widen the COPY. + DEBUG(dbgs() << "widening: " << *MI); + + // Get rid of the old of DstRegD. Leave it if it defines a Q-reg + // or some other super-register. + int ImpDefIdx = MI->findRegisterDefOperandIdx(DstRegD); + if (ImpDefIdx != -1) + MI->RemoveOperand(ImpDefIdx); + + // Change the opcode and operands. + MI->setDesc(get(ARM::VMOVD)); + MI->getOperand(0).setReg(DstRegD); + MI->getOperand(1).setReg(SrcRegD); + AddDefaultPred(MachineInstrBuilder(MI)); + + // We are now reading SrcRegD instead of SrcRegS. This may upset the + // register scavenger and machine verifier, so we need to indicate that we + // are reading an undefined value from SrcRegD, but a proper value from + // SrcRegS. + MI->getOperand(1).setIsUndef(); + MachineInstrBuilder(MI).addReg(SrcRegS, RegState::Implicit); + + // SrcRegD may actually contain an unrelated value in the ssub_1 + // sub-register. Don't kill it. Only kill the ssub_0 sub-register. + if (MI->getOperand(1).isKill()) { + MI->getOperand(1).setIsKill(false); + MI->addRegisterKilled(SrcRegS, TRI, true); + } + + DEBUG(dbgs() << "replaced by: " << *MI); + return true; +} + MachineInstr* ARMBaseInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx, uint64_t Offset, @@ -961,7 +1084,7 @@ static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) { ARMConstantPoolValue *ACPV = static_cast(MCPE.Val.MachineCPVal); - unsigned PCLabelId = AFI->createConstPoolEntryUId(); + unsigned PCLabelId = AFI->createPICLabelUId(); ARMConstantPoolValue *NewCPV = 0; // FIXME: The below assumes PIC relocation model and that the function // is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and @@ -969,17 +1092,24 @@ static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) { // instructions, so that's probably OK, but is PIC always correct when // we get here? if (ACPV->isGlobalValue()) - NewCPV = new ARMConstantPoolValue(ACPV->getGV(), PCLabelId, - ARMCP::CPValue, 4); + NewCPV = ARMConstantPoolConstant:: + Create(cast(ACPV)->getGV(), PCLabelId, + ARMCP::CPValue, 4); else if (ACPV->isExtSymbol()) - NewCPV = new ARMConstantPoolValue(MF.getFunction()->getContext(), - ACPV->getSymbol(), PCLabelId, 4); + NewCPV = ARMConstantPoolSymbol:: + Create(MF.getFunction()->getContext(), + cast(ACPV)->getSymbol(), PCLabelId, 4); else if (ACPV->isBlockAddress()) - NewCPV = new ARMConstantPoolValue(ACPV->getBlockAddress(), PCLabelId, - ARMCP::CPBlockAddress, 4); + NewCPV = ARMConstantPoolConstant:: + Create(cast(ACPV)->getBlockAddress(), PCLabelId, + ARMCP::CPBlockAddress, 4); else if (ACPV->isLSDA()) - NewCPV = new ARMConstantPoolValue(MF.getFunction(), PCLabelId, - ARMCP::CPLSDA, 4); + NewCPV = ARMConstantPoolConstant::Create(MF.getFunction(), PCLabelId, + ARMCP::CPLSDA, 4); + else if (ACPV->isMachineBasicBlock()) + NewCPV = ARMConstantPoolMBB:: + Create(MF.getFunction()->getContext(), + cast(ACPV)->getMBB(), PCLabelId, 4); else llvm_unreachable("Unexpected ARM constantpool value type!!"); CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment()); @@ -1008,7 +1138,7 @@ reMaterialize(MachineBasicBlock &MBB, MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode), DestReg) .addConstantPoolIndex(CPI).addImm(PCLabelId); - (*MIB).setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end()); + MIB->setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end()); break; } } @@ -1031,12 +1161,18 @@ ARMBaseInstrInfo::duplicate(MachineInstr *Orig, MachineFunction &MF) const { } bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0, - const MachineInstr *MI1) const { + const MachineInstr *MI1, + const MachineRegisterInfo *MRI) const { int Opcode = MI0->getOpcode(); if (Opcode == ARM::t2LDRpci || Opcode == ARM::t2LDRpci_pic || Opcode == ARM::tLDRpci || - Opcode == ARM::tLDRpci_pic) { + Opcode == ARM::tLDRpci_pic || + Opcode == ARM::MOV_ga_dyn || + Opcode == ARM::MOV_ga_pcrel || + Opcode == ARM::MOV_ga_pcrel_ldr || + Opcode == ARM::t2MOV_ga_dyn || + Opcode == ARM::t2MOV_ga_pcrel) { if (MI1->getOpcode() != Opcode) return false; if (MI0->getNumOperands() != MI1->getNumOperands()) @@ -1047,17 +1183,63 @@ bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0, if (MO0.getOffset() != MO1.getOffset()) return false; + if (Opcode == ARM::MOV_ga_dyn || + Opcode == ARM::MOV_ga_pcrel || + Opcode == ARM::MOV_ga_pcrel_ldr || + Opcode == ARM::t2MOV_ga_dyn || + Opcode == ARM::t2MOV_ga_pcrel) + // Ignore the PC labels. + return MO0.getGlobal() == MO1.getGlobal(); + const MachineFunction *MF = MI0->getParent()->getParent(); const MachineConstantPool *MCP = MF->getConstantPool(); int CPI0 = MO0.getIndex(); int CPI1 = MO1.getIndex(); const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0]; const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1]; - ARMConstantPoolValue *ACPV0 = - static_cast(MCPE0.Val.MachineCPVal); - ARMConstantPoolValue *ACPV1 = - static_cast(MCPE1.Val.MachineCPVal); - return ACPV0->hasSameValue(ACPV1); + bool isARMCP0 = MCPE0.isMachineConstantPoolEntry(); + bool isARMCP1 = MCPE1.isMachineConstantPoolEntry(); + if (isARMCP0 && isARMCP1) { + ARMConstantPoolValue *ACPV0 = + static_cast(MCPE0.Val.MachineCPVal); + ARMConstantPoolValue *ACPV1 = + static_cast(MCPE1.Val.MachineCPVal); + return ACPV0->hasSameValue(ACPV1); + } else if (!isARMCP0 && !isARMCP1) { + return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal; + } + return false; + } else if (Opcode == ARM::PICLDR) { + if (MI1->getOpcode() != Opcode) + return false; + if (MI0->getNumOperands() != MI1->getNumOperands()) + return false; + + unsigned Addr0 = MI0->getOperand(1).getReg(); + unsigned Addr1 = MI1->getOperand(1).getReg(); + if (Addr0 != Addr1) { + if (!MRI || + !TargetRegisterInfo::isVirtualRegister(Addr0) || + !TargetRegisterInfo::isVirtualRegister(Addr1)) + return false; + + // This assumes SSA form. + MachineInstr *Def0 = MRI->getVRegDef(Addr0); + MachineInstr *Def1 = MRI->getVRegDef(Addr1); + // Check if the loaded value, e.g. a constantpool of a global address, are + // the same. + if (!produceSameValue(Def0, Def1, MRI)) + return false; + } + + for (unsigned i = 3, e = MI0->getNumOperands(); i != e; ++i) { + // %vreg12 = PICLDR %vreg11, 0, pred:14, pred:%noreg + const MachineOperand &MO0 = MI0->getOperand(i); + const MachineOperand &MO1 = MI1->getOperand(i); + if (!MO0.isIdenticalTo(MO1)) + return false; + } + return true; } return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); @@ -1136,7 +1318,7 @@ bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, } /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to -/// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should +/// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should /// be scheduled togther. On some targets if two loads are loading from /// addresses in the same cache line, it's better if they are scheduled /// together. This function takes two integers that represent the load offsets @@ -1204,54 +1386,44 @@ bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI, return false; } -bool ARMBaseInstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB, - unsigned NumInstrs, - float Probability, - float Confidence) const { - if (!NumInstrs) +bool ARMBaseInstrInfo:: +isProfitableToIfCvt(MachineBasicBlock &MBB, + unsigned NumCycles, unsigned ExtraPredCycles, + const BranchProbability &Probability) const { + if (!NumCycles) return false; - // Use old-style heuristics - if (OldARMIfCvt) { - if (Subtarget.getCPUString() == "generic") - // Generic (and overly aggressive) if-conversion limits for testing. - return NumInstrs <= 10; - if (Subtarget.hasV7Ops()) - return NumInstrs <= 3; - return NumInstrs <= 2; - } - // Attempt to estimate the relative costs of predication versus branching. - float UnpredCost = Probability * NumInstrs; - UnpredCost += 1.0; // The branch itself - UnpredCost += (1.0 - Confidence) * Subtarget.getMispredictionPenalty(); - - float PredCost = NumInstrs; - - return PredCost < UnpredCost; + unsigned UnpredCost = Probability.getNumerator() * NumCycles; + UnpredCost /= Probability.getDenominator(); + UnpredCost += 1; // The branch itself + UnpredCost += Subtarget.getMispredictionPenalty() / 10; + return (NumCycles + ExtraPredCycles) <= UnpredCost; } bool ARMBaseInstrInfo:: -isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumT, - MachineBasicBlock &FMBB, unsigned NumF, - float Probability, float Confidence) const { - // Use old-style if-conversion heuristics - if (OldARMIfCvt) { - return NumT && NumF && NumT <= 2 && NumF <= 2; - } - - if (!NumT || !NumF) +isProfitableToIfCvt(MachineBasicBlock &TMBB, + unsigned TCycles, unsigned TExtra, + MachineBasicBlock &FMBB, + unsigned FCycles, unsigned FExtra, + const BranchProbability &Probability) const { + if (!TCycles || !FCycles) return false; // Attempt to estimate the relative costs of predication versus branching. - float UnpredCost = Probability * NumT + (1.0 - Probability) * NumF; - UnpredCost += 1.0; // The branch itself - UnpredCost += (1.0 - Confidence) * Subtarget.getMispredictionPenalty(); + unsigned TUnpredCost = Probability.getNumerator() * TCycles; + TUnpredCost /= Probability.getDenominator(); + + uint32_t Comp = Probability.getDenominator() - Probability.getNumerator(); + unsigned FUnpredCost = Comp * FCycles; + FUnpredCost /= Probability.getDenominator(); - float PredCost = NumT + NumF; + unsigned UnpredCost = TUnpredCost + FUnpredCost; + UnpredCost += 1; // The branch itself + UnpredCost += Subtarget.getMispredictionPenalty() / 10; - return PredCost < UnpredCost; + return (TCycles + FCycles + TExtra + FExtra) <= UnpredCost; } /// getInstrPredicate - If instruction is predicated, returns its predicate @@ -1283,11 +1455,61 @@ int llvm::getMatchingCondBranchOpcode(int Opc) { } +/// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the +/// instruction is encoded with an 'S' bit is determined by the optional CPSR +/// def operand. +/// +/// This will go away once we can teach tblgen how to set the optional CPSR def +/// operand itself. +struct AddSubFlagsOpcodePair { + unsigned PseudoOpc; + unsigned MachineOpc; +}; + +static AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = { + {ARM::ADDSri, ARM::ADDri}, + {ARM::ADDSrr, ARM::ADDrr}, + {ARM::ADDSrsi, ARM::ADDrsi}, + {ARM::ADDSrsr, ARM::ADDrsr}, + + {ARM::SUBSri, ARM::SUBri}, + {ARM::SUBSrr, ARM::SUBrr}, + {ARM::SUBSrsi, ARM::SUBrsi}, + {ARM::SUBSrsr, ARM::SUBrsr}, + + {ARM::RSBSri, ARM::RSBri}, + {ARM::RSBSrsi, ARM::RSBrsi}, + {ARM::RSBSrsr, ARM::RSBrsr}, + + {ARM::t2ADDSri, ARM::t2ADDri}, + {ARM::t2ADDSrr, ARM::t2ADDrr}, + {ARM::t2ADDSrs, ARM::t2ADDrs}, + + {ARM::t2SUBSri, ARM::t2SUBri}, + {ARM::t2SUBSrr, ARM::t2SUBrr}, + {ARM::t2SUBSrs, ARM::t2SUBrs}, + + {ARM::t2RSBSri, ARM::t2RSBri}, + {ARM::t2RSBSrs, ARM::t2RSBrs}, +}; + +unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) { + static const int NPairs = + sizeof(AddSubFlagsOpcodeMap) / sizeof(AddSubFlagsOpcodePair); + for (AddSubFlagsOpcodePair *OpcPair = &AddSubFlagsOpcodeMap[0], + *End = &AddSubFlagsOpcodeMap[NPairs]; OpcPair != End; ++OpcPair) { + if (OldOpc == OpcPair->PseudoOpc) { + return OpcPair->MachineOpc; + } + } + return 0; +} + void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, DebugLoc dl, unsigned DestReg, unsigned BaseReg, int NumBytes, ARMCC::CondCodes Pred, unsigned PredReg, - const ARMBaseInstrInfo &TII) { + const ARMBaseInstrInfo &TII, unsigned MIFlags) { bool isSub = NumBytes < 0; if (isSub) NumBytes = -NumBytes; @@ -1305,7 +1527,8 @@ void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB, unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri; BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg) .addReg(BaseReg, RegState::Kill).addImm(ThisVal) - .addImm((unsigned)Pred).addReg(PredReg).addReg(0); + .addImm((unsigned)Pred).addReg(PredReg).addReg(0) + .setMIFlags(MIFlags); BaseReg = DestReg; } } @@ -1314,7 +1537,7 @@ bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, int &Offset, const ARMBaseInstrInfo &TII) { unsigned Opcode = MI.getOpcode(); - const TargetInstrDesc &Desc = MI.getDesc(); + const MCInstrDesc &Desc = MI.getDesc(); unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask); bool isSub = false; @@ -1457,9 +1680,7 @@ AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg, int &CmpMask, switch (MI->getOpcode()) { default: break; case ARM::CMPri: - case ARM::CMPzri: case ARM::t2CMPri: - case ARM::t2CMPzri: SrcReg = MI->getOperand(0).getReg(); CmpMask = ~0; CmpValue = MI->getOperand(1).getImm(); @@ -1505,12 +1726,10 @@ static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg, } /// OptimizeCompareInstr - Convert the instruction supplying the argument to the -/// comparison into one that sets the zero bit in the flags register. Update the -/// iterator *only* if a transformation took place. +/// comparison into one that sets the zero bit in the flags register. bool ARMBaseInstrInfo:: OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask, - int CmpValue, const MachineRegisterInfo *MRI, - MachineBasicBlock::iterator &MII) const { + int CmpValue, const MachineRegisterInfo *MRI) const { if (CmpValue != 0) return false; @@ -1573,30 +1792,192 @@ OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask, // Set the "zero" bit in CPSR. switch (MI->getOpcode()) { default: break; + case ARM::RSBrr: + case ARM::RSBri: + case ARM::RSCrr: + case ARM::RSCri: + case ARM::ADDrr: case ARM::ADDri: - case ARM::ANDri: - case ARM::t2ANDri: + case ARM::ADCrr: + case ARM::ADCri: + case ARM::SUBrr: case ARM::SUBri: + case ARM::SBCrr: + case ARM::SBCri: + case ARM::t2RSBri: + case ARM::t2ADDrr: case ARM::t2ADDri: + case ARM::t2ADCrr: + case ARM::t2ADCri: + case ARM::t2SUBrr: case ARM::t2SUBri: - MI->RemoveOperand(5); - MachineInstrBuilder(MI) - .addReg(ARM::CPSR, RegState::Define | RegState::Implicit); - MII = llvm::next(MachineBasicBlock::iterator(CmpInstr)); + case ARM::t2SBCrr: + case ARM::t2SBCri: + case ARM::ANDrr: + case ARM::ANDri: + case ARM::t2ANDrr: + case ARM::t2ANDri: + case ARM::ORRrr: + case ARM::ORRri: + case ARM::t2ORRrr: + case ARM::t2ORRri: + case ARM::EORrr: + case ARM::EORri: + case ARM::t2EORrr: + case ARM::t2EORri: { + // Scan forward for the use of CPSR, if it's a conditional code requires + // checking of V bit, then this is not safe to do. If we can't find the + // CPSR use (i.e. used in another block), then it's not safe to perform + // the optimization. + bool isSafe = false; + I = CmpInstr; + E = MI->getParent()->end(); + while (!isSafe && ++I != E) { + const MachineInstr &Instr = *I; + for (unsigned IO = 0, EO = Instr.getNumOperands(); + !isSafe && IO != EO; ++IO) { + const MachineOperand &MO = Instr.getOperand(IO); + if (!MO.isReg() || MO.getReg() != ARM::CPSR) + continue; + if (MO.isDef()) { + isSafe = true; + break; + } + // Condition code is after the operand before CPSR. + ARMCC::CondCodes CC = (ARMCC::CondCodes)Instr.getOperand(IO-1).getImm(); + switch (CC) { + default: + isSafe = true; + break; + case ARMCC::VS: + case ARMCC::VC: + case ARMCC::GE: + case ARMCC::LT: + case ARMCC::GT: + case ARMCC::LE: + return false; + } + } + } + + if (!isSafe) + return false; + + // Toggle the optional operand to CPSR. + MI->getOperand(5).setReg(ARM::CPSR); + MI->getOperand(5).setIsDef(true); CmpInstr->eraseFromParent(); return true; } + } return false; } +bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI, + MachineInstr *DefMI, unsigned Reg, + MachineRegisterInfo *MRI) const { + // Fold large immediates into add, sub, or, xor. + unsigned DefOpc = DefMI->getOpcode(); + if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm) + return false; + if (!DefMI->getOperand(1).isImm()) + // Could be t2MOVi32imm + return false; + + if (!MRI->hasOneNonDBGUse(Reg)) + return false; + + unsigned UseOpc = UseMI->getOpcode(); + unsigned NewUseOpc = 0; + uint32_t ImmVal = (uint32_t)DefMI->getOperand(1).getImm(); + uint32_t SOImmValV1 = 0, SOImmValV2 = 0; + bool Commute = false; + switch (UseOpc) { + default: return false; + case ARM::SUBrr: + case ARM::ADDrr: + case ARM::ORRrr: + case ARM::EORrr: + case ARM::t2SUBrr: + case ARM::t2ADDrr: + case ARM::t2ORRrr: + case ARM::t2EORrr: { + Commute = UseMI->getOperand(2).getReg() != Reg; + switch (UseOpc) { + default: break; + case ARM::SUBrr: { + if (Commute) + return false; + ImmVal = -ImmVal; + NewUseOpc = ARM::SUBri; + // Fallthrough + } + case ARM::ADDrr: + case ARM::ORRrr: + case ARM::EORrr: { + if (!ARM_AM::isSOImmTwoPartVal(ImmVal)) + return false; + SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal); + SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal); + switch (UseOpc) { + default: break; + case ARM::ADDrr: NewUseOpc = ARM::ADDri; break; + case ARM::ORRrr: NewUseOpc = ARM::ORRri; break; + case ARM::EORrr: NewUseOpc = ARM::EORri; break; + } + break; + } + case ARM::t2SUBrr: { + if (Commute) + return false; + ImmVal = -ImmVal; + NewUseOpc = ARM::t2SUBri; + // Fallthrough + } + case ARM::t2ADDrr: + case ARM::t2ORRrr: + case ARM::t2EORrr: { + if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal)) + return false; + SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal); + SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal); + switch (UseOpc) { + default: break; + case ARM::t2ADDrr: NewUseOpc = ARM::t2ADDri; break; + case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break; + case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break; + } + break; + } + } + } + } + + unsigned OpIdx = Commute ? 2 : 1; + unsigned Reg1 = UseMI->getOperand(OpIdx).getReg(); + bool isKill = UseMI->getOperand(OpIdx).isKill(); + unsigned NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg)); + AddDefaultCC(AddDefaultPred(BuildMI(*UseMI->getParent(), + *UseMI, UseMI->getDebugLoc(), + get(NewUseOpc), NewReg) + .addReg(Reg1, getKillRegState(isKill)) + .addImm(SOImmValV1))); + UseMI->setDesc(get(NewUseOpc)); + UseMI->getOperand(1).setReg(NewReg); + UseMI->getOperand(1).setIsKill(); + UseMI->getOperand(2).ChangeToImmediate(SOImmValV2); + DefMI->eraseFromParent(); + return true; +} + unsigned -ARMBaseInstrInfo::getNumMicroOps(const MachineInstr *MI, - const InstrItineraryData *ItinData) const { +ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, + const MachineInstr *MI) const { if (!ItinData || ItinData->isEmpty()) return 1; - const TargetInstrDesc &Desc = MI->getDesc(); + const MCInstrDesc &Desc = MI->getDesc(); unsigned Class = Desc.getSchedClass(); unsigned UOps = ItinData->Itineraries[Class].NumMicroOps; if (UOps) @@ -1607,51 +1988,78 @@ ARMBaseInstrInfo::getNumMicroOps(const MachineInstr *MI, default: llvm_unreachable("Unexpected multi-uops instruction!"); break; - case ARM::VLDMQ: - case ARM::VSTMQ: + case ARM::VLDMQIA: + case ARM::VSTMQIA: return 2; // The number of uOps for load / store multiple are determined by the number // registers. + // // On Cortex-A8, each pair of register loads / stores can be scheduled on the // same cycle. The scheduling for the first load / store must be done // separately by assuming the the address is not 64-bit aligned. + // // On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address - // is not 64-bit aligned, then AGU would take an extra cycle. - // For VFP / NEON load / store multiple, the formula is - // (#reg / 2) + (#reg % 2) + 1. - case ARM::VLDMD: - case ARM::VLDMS: - case ARM::VLDMD_UPD: - case ARM::VLDMS_UPD: - case ARM::VSTMD: - case ARM::VSTMS: - case ARM::VSTMD_UPD: - case ARM::VSTMS_UPD: { + // is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON + // load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1. + case ARM::VLDMDIA: + case ARM::VLDMDIA_UPD: + case ARM::VLDMDDB_UPD: + case ARM::VLDMSIA: + case ARM::VLDMSIA_UPD: + case ARM::VLDMSDB_UPD: + case ARM::VSTMDIA: + case ARM::VSTMDIA_UPD: + case ARM::VSTMDDB_UPD: + case ARM::VSTMSIA: + case ARM::VSTMSIA_UPD: + case ARM::VSTMSDB_UPD: { unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands(); return (NumRegs / 2) + (NumRegs % 2) + 1; } - case ARM::LDM_RET: - case ARM::LDM: - case ARM::LDM_UPD: - case ARM::STM: - case ARM::STM_UPD: - case ARM::tLDM: - case ARM::tLDM_UPD: - case ARM::tSTM_UPD: + + case ARM::LDMIA_RET: + case ARM::LDMIA: + case ARM::LDMDA: + case ARM::LDMDB: + case ARM::LDMIB: + case ARM::LDMIA_UPD: + case ARM::LDMDA_UPD: + case ARM::LDMDB_UPD: + case ARM::LDMIB_UPD: + case ARM::STMIA: + case ARM::STMDA: + case ARM::STMDB: + case ARM::STMIB: + case ARM::STMIA_UPD: + case ARM::STMDA_UPD: + case ARM::STMDB_UPD: + case ARM::STMIB_UPD: + case ARM::tLDMIA: + case ARM::tLDMIA_UPD: + case ARM::tSTMIA_UPD: case ARM::tPOP_RET: case ARM::tPOP: case ARM::tPUSH: - case ARM::t2LDM_RET: - case ARM::t2LDM: - case ARM::t2LDM_UPD: - case ARM::t2STM: - case ARM::t2STM_UPD: { + case ARM::t2LDMIA_RET: + case ARM::t2LDMIA: + case ARM::t2LDMDB: + case ARM::t2LDMIA_UPD: + case ARM::t2LDMDB_UPD: + case ARM::t2STMIA: + case ARM::t2STMDB: + case ARM::t2STMIA_UPD: + case ARM::t2STMDB_UPD: { unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands() + 1; if (Subtarget.isCortexA8()) { - // 4 registers would be issued: 1, 2, 1. - // 5 registers would be issued: 1, 2, 2. - return 1 + (NumRegs / 2); + if (NumRegs < 4) + return 2; + // 4 registers would be issued: 2, 2. + // 5 registers would be issued: 2, 2, 1. + UOps = (NumRegs / 2); + if (NumRegs % 2) + ++UOps; + return UOps; } else if (Subtarget.isCortexA9()) { UOps = (NumRegs / 2); // If there are odd number of registers or if it's not 64-bit aligned, @@ -1671,10 +2079,10 @@ ARMBaseInstrInfo::getNumMicroOps(const MachineInstr *MI, int ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData, - const TargetInstrDesc &DefTID, + const MCInstrDesc &DefMCID, unsigned DefClass, unsigned DefIdx, unsigned DefAlign) const { - int RegNo = (int)(DefIdx+1) - DefTID.getNumOperands() + 1; + int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; if (RegNo <= 0) // Def is the address writeback. return ItinData->getOperandCycle(DefClass, DefIdx); @@ -1688,13 +2096,16 @@ ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData, } else if (Subtarget.isCortexA9()) { DefCycle = RegNo; bool isSLoad = false; - switch (DefTID.getOpcode()) { + + switch (DefMCID.getOpcode()) { default: break; - case ARM::VLDMS: - case ARM::VLDMS_UPD: + case ARM::VLDMSIA: + case ARM::VLDMSIA_UPD: + case ARM::VLDMSDB_UPD: isSLoad = true; break; } + // If there are odd number of 'S' registers or if it's not 64-bit aligned, // then it takes an extra cycle. if ((isSLoad && (RegNo % 2)) || DefAlign < 8) @@ -1709,10 +2120,10 @@ ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData, int ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData, - const TargetInstrDesc &DefTID, + const MCInstrDesc &DefMCID, unsigned DefClass, unsigned DefIdx, unsigned DefAlign) const { - int RegNo = (int)(DefIdx+1) - DefTID.getNumOperands() + 1; + int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1; if (RegNo <= 0) // Def is the address writeback. return ItinData->getOperandCycle(DefClass, DefIdx); @@ -1744,10 +2155,10 @@ ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData, int ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData, - const TargetInstrDesc &UseTID, + const MCInstrDesc &UseMCID, unsigned UseClass, unsigned UseIdx, unsigned UseAlign) const { - int RegNo = (int)(UseIdx+1) - UseTID.getNumOperands() + 1; + int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; if (RegNo <= 0) return ItinData->getOperandCycle(UseClass, UseIdx); @@ -1760,13 +2171,16 @@ ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData, } else if (Subtarget.isCortexA9()) { UseCycle = RegNo; bool isSStore = false; - switch (UseTID.getOpcode()) { + + switch (UseMCID.getOpcode()) { default: break; - case ARM::VSTMS: - case ARM::VSTMS_UPD: + case ARM::VSTMSIA: + case ARM::VSTMSIA_UPD: + case ARM::VSTMSDB_UPD: isSStore = true; break; } + // If there are odd number of 'S' registers or if it's not 64-bit aligned, // then it takes an extra cycle. if ((isSStore && (RegNo % 2)) || UseAlign < 8) @@ -1781,10 +2195,10 @@ ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData, int ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData, - const TargetInstrDesc &UseTID, + const MCInstrDesc &UseMCID, unsigned UseClass, unsigned UseIdx, unsigned UseAlign) const { - int RegNo = (int)(UseIdx+1) - UseTID.getNumOperands() + 1; + int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1; if (RegNo <= 0) return ItinData->getOperandCycle(UseClass, UseIdx); @@ -1810,14 +2224,14 @@ ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData, int ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, - const TargetInstrDesc &DefTID, + const MCInstrDesc &DefMCID, unsigned DefIdx, unsigned DefAlign, - const TargetInstrDesc &UseTID, + const MCInstrDesc &UseMCID, unsigned UseIdx, unsigned UseAlign) const { - unsigned DefClass = DefTID.getSchedClass(); - unsigned UseClass = UseTID.getSchedClass(); + unsigned DefClass = DefMCID.getSchedClass(); + unsigned UseClass = UseMCID.getSchedClass(); - if (DefIdx < DefTID.getNumDefs() && UseIdx < UseTID.getNumOperands()) + if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands()) return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); // This may be a def / use of a variable_ops instruction, the operand @@ -1825,59 +2239,79 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, // figure it out. int DefCycle = -1; bool LdmBypass = false; - switch (DefTID.getOpcode()) { + switch (DefMCID.getOpcode()) { default: DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); break; - case ARM::VLDMD: - case ARM::VLDMS: - case ARM::VLDMD_UPD: - case ARM::VLDMS_UPD: { - DefCycle = getVLDMDefCycle(ItinData, DefTID, DefClass, DefIdx, DefAlign); + + case ARM::VLDMDIA: + case ARM::VLDMDIA_UPD: + case ARM::VLDMDDB_UPD: + case ARM::VLDMSIA: + case ARM::VLDMSIA_UPD: + case ARM::VLDMSDB_UPD: + DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); break; - } - case ARM::LDM_RET: - case ARM::LDM: - case ARM::LDM_UPD: - case ARM::tLDM: - case ARM::tLDM_UPD: + + case ARM::LDMIA_RET: + case ARM::LDMIA: + case ARM::LDMDA: + case ARM::LDMDB: + case ARM::LDMIB: + case ARM::LDMIA_UPD: + case ARM::LDMDA_UPD: + case ARM::LDMDB_UPD: + case ARM::LDMIB_UPD: + case ARM::tLDMIA: + case ARM::tLDMIA_UPD: case ARM::tPUSH: - case ARM::t2LDM_RET: - case ARM::t2LDM: - case ARM::t2LDM_UPD: { + case ARM::t2LDMIA_RET: + case ARM::t2LDMIA: + case ARM::t2LDMDB: + case ARM::t2LDMIA_UPD: + case ARM::t2LDMDB_UPD: LdmBypass = 1; - DefCycle = getLDMDefCycle(ItinData, DefTID, DefClass, DefIdx, DefAlign); + DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign); break; } - } if (DefCycle == -1) // We can't seem to determine the result latency of the def, assume it's 2. DefCycle = 2; int UseCycle = -1; - switch (UseTID.getOpcode()) { + switch (UseMCID.getOpcode()) { default: UseCycle = ItinData->getOperandCycle(UseClass, UseIdx); break; - case ARM::VSTMD: - case ARM::VSTMS: - case ARM::VSTMD_UPD: - case ARM::VSTMS_UPD: { - UseCycle = getVSTMUseCycle(ItinData, UseTID, UseClass, UseIdx, UseAlign); + + case ARM::VSTMDIA: + case ARM::VSTMDIA_UPD: + case ARM::VSTMDDB_UPD: + case ARM::VSTMSIA: + case ARM::VSTMSIA_UPD: + case ARM::VSTMSDB_UPD: + UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); break; - } - case ARM::STM: - case ARM::STM_UPD: - case ARM::tSTM_UPD: + + case ARM::STMIA: + case ARM::STMDA: + case ARM::STMDB: + case ARM::STMIB: + case ARM::STMIA_UPD: + case ARM::STMDA_UPD: + case ARM::STMDB_UPD: + case ARM::STMIB_UPD: + case ARM::tSTMIA_UPD: case ARM::tPOP_RET: case ARM::tPOP: - case ARM::t2STM: - case ARM::t2STM_UPD: { - UseCycle = getSTMUseCycle(ItinData, UseTID, UseClass, UseIdx, UseAlign); + case ARM::t2STMIA: + case ARM::t2STMDB: + case ARM::t2STMIA_UPD: + case ARM::t2STMDB_UPD: + UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign); break; } - } if (UseCycle == -1) // Assume it's read in the first stage. @@ -1888,12 +2322,13 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, if (LdmBypass) { // It's a variable_ops instruction so we can't use DefIdx here. Just use // first def operand. - if (ItinData->hasPipelineForwarding(DefClass, DefTID.getNumOperands()-1, + if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1, UseClass, UseIdx)) --UseCycle; } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx, - UseClass, UseIdx)) + UseClass, UseIdx)) { --UseCycle; + } } return UseCycle; @@ -1907,12 +2342,11 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, DefMI->isRegSequence() || DefMI->isImplicitDef()) return 1; - const TargetInstrDesc &DefTID = DefMI->getDesc(); + const MCInstrDesc &DefMCID = DefMI->getDesc(); if (!ItinData || ItinData->isEmpty()) - return DefTID.mayLoad() ? 3 : 1; + return DefMCID.mayLoad() ? 3 : 1; - - const TargetInstrDesc &UseTID = UseMI->getDesc(); + const MCInstrDesc &UseMCID = UseMI->getDesc(); const MachineOperand &DefMO = DefMI->getOperand(DefIdx); if (DefMO.getReg() == ARM::CPSR) { if (DefMI->getOpcode() == ARM::FMSTAT) { @@ -1921,7 +2355,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, } // CPSR set and branch can be paired in the same cycle. - if (UseTID.isBranch()) + if (UseMCID.isBranch()) return 0; } @@ -1929,14 +2363,14 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, ? (*DefMI->memoperands_begin())->getAlignment() : 0; unsigned UseAlign = UseMI->hasOneMemOperand() ? (*UseMI->memoperands_begin())->getAlignment() : 0; - int Latency = getOperandLatency(ItinData, DefTID, DefIdx, DefAlign, - UseTID, UseIdx, UseAlign); + int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, + UseMCID, UseIdx, UseAlign); if (Latency > 1 && (Subtarget.isCortexA8() || Subtarget.isCortexA9())) { // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] // variants are one cycle cheaper. - switch (DefTID.getOpcode()) { + switch (DefMCID.getOpcode()) { default: break; case ARM::LDRrs: case ARM::LDRBrs: { @@ -1960,6 +2394,101 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, } } + if (DefAlign < 8 && Subtarget.isCortexA9()) + switch (DefMCID.getOpcode()) { + default: break; + case ARM::VLD1q8: + case ARM::VLD1q16: + case ARM::VLD1q32: + case ARM::VLD1q64: + case ARM::VLD1q8_UPD: + case ARM::VLD1q16_UPD: + case ARM::VLD1q32_UPD: + case ARM::VLD1q64_UPD: + case ARM::VLD2d8: + case ARM::VLD2d16: + case ARM::VLD2d32: + case ARM::VLD2q8: + case ARM::VLD2q16: + case ARM::VLD2q32: + case ARM::VLD2d8_UPD: + case ARM::VLD2d16_UPD: + case ARM::VLD2d32_UPD: + case ARM::VLD2q8_UPD: + case ARM::VLD2q16_UPD: + case ARM::VLD2q32_UPD: + case ARM::VLD3d8: + case ARM::VLD3d16: + case ARM::VLD3d32: + case ARM::VLD1d64T: + case ARM::VLD3d8_UPD: + case ARM::VLD3d16_UPD: + case ARM::VLD3d32_UPD: + case ARM::VLD1d64T_UPD: + case ARM::VLD3q8_UPD: + case ARM::VLD3q16_UPD: + case ARM::VLD3q32_UPD: + case ARM::VLD4d8: + case ARM::VLD4d16: + case ARM::VLD4d32: + case ARM::VLD1d64Q: + case ARM::VLD4d8_UPD: + case ARM::VLD4d16_UPD: + case ARM::VLD4d32_UPD: + case ARM::VLD1d64Q_UPD: + case ARM::VLD4q8_UPD: + case ARM::VLD4q16_UPD: + case ARM::VLD4q32_UPD: + case ARM::VLD1DUPq8: + case ARM::VLD1DUPq16: + case ARM::VLD1DUPq32: + case ARM::VLD1DUPq8_UPD: + case ARM::VLD1DUPq16_UPD: + case ARM::VLD1DUPq32_UPD: + case ARM::VLD2DUPd8: + case ARM::VLD2DUPd16: + case ARM::VLD2DUPd32: + case ARM::VLD2DUPd8_UPD: + case ARM::VLD2DUPd16_UPD: + case ARM::VLD2DUPd32_UPD: + case ARM::VLD4DUPd8: + case ARM::VLD4DUPd16: + case ARM::VLD4DUPd32: + case ARM::VLD4DUPd8_UPD: + case ARM::VLD4DUPd16_UPD: + case ARM::VLD4DUPd32_UPD: + case ARM::VLD1LNd8: + case ARM::VLD1LNd16: + case ARM::VLD1LNd32: + case ARM::VLD1LNd8_UPD: + case ARM::VLD1LNd16_UPD: + case ARM::VLD1LNd32_UPD: + case ARM::VLD2LNd8: + case ARM::VLD2LNd16: + case ARM::VLD2LNd32: + case ARM::VLD2LNq16: + case ARM::VLD2LNq32: + case ARM::VLD2LNd8_UPD: + case ARM::VLD2LNd16_UPD: + case ARM::VLD2LNd32_UPD: + case ARM::VLD2LNq16_UPD: + case ARM::VLD2LNq32_UPD: + case ARM::VLD4LNd8: + case ARM::VLD4LNd16: + case ARM::VLD4LNd32: + case ARM::VLD4LNq16: + case ARM::VLD4LNq32: + case ARM::VLD4LNd8_UPD: + case ARM::VLD4LNd16_UPD: + case ARM::VLD4LNd32_UPD: + case ARM::VLD4LNq16_UPD: + case ARM::VLD4LNq32_UPD: + // If the address is not 64-bit aligned, the latencies of these + // instructions increases by one. + ++Latency; + break; + } + return Latency; } @@ -1970,33 +2499,37 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, if (!DefNode->isMachineOpcode()) return 1; - const TargetInstrDesc &DefTID = get(DefNode->getMachineOpcode()); + const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode()); + + if (isZeroCost(DefMCID.Opcode)) + return 0; + if (!ItinData || ItinData->isEmpty()) - return DefTID.mayLoad() ? 3 : 1; + return DefMCID.mayLoad() ? 3 : 1; if (!UseNode->isMachineOpcode()) { - int Latency = ItinData->getOperandCycle(DefTID.getSchedClass(), DefIdx); + int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx); if (Subtarget.isCortexA9()) return Latency <= 2 ? 1 : Latency - 1; else return Latency <= 3 ? 1 : Latency - 2; } - const TargetInstrDesc &UseTID = get(UseNode->getMachineOpcode()); + const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode()); const MachineSDNode *DefMN = dyn_cast(DefNode); unsigned DefAlign = !DefMN->memoperands_empty() ? (*DefMN->memoperands_begin())->getAlignment() : 0; const MachineSDNode *UseMN = dyn_cast(UseNode); unsigned UseAlign = !UseMN->memoperands_empty() ? (*UseMN->memoperands_begin())->getAlignment() : 0; - int Latency = getOperandLatency(ItinData, DefTID, DefIdx, DefAlign, - UseTID, UseIdx, UseAlign); + int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign, + UseMCID, UseIdx, UseAlign); if (Latency > 1 && (Subtarget.isCortexA8() || Subtarget.isCortexA9())) { // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] // variants are one cycle cheaper. - switch (DefTID.getOpcode()) { + switch (DefMCID.getOpcode()) { default: break; case ARM::LDRrs: case ARM::LDRBrs: { @@ -2022,9 +2555,156 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, } } + if (DefAlign < 8 && Subtarget.isCortexA9()) + switch (DefMCID.getOpcode()) { + default: break; + case ARM::VLD1q8Pseudo: + case ARM::VLD1q16Pseudo: + case ARM::VLD1q32Pseudo: + case ARM::VLD1q64Pseudo: + case ARM::VLD1q8Pseudo_UPD: + case ARM::VLD1q16Pseudo_UPD: + case ARM::VLD1q32Pseudo_UPD: + case ARM::VLD1q64Pseudo_UPD: + case ARM::VLD2d8Pseudo: + case ARM::VLD2d16Pseudo: + case ARM::VLD2d32Pseudo: + case ARM::VLD2q8Pseudo: + case ARM::VLD2q16Pseudo: + case ARM::VLD2q32Pseudo: + case ARM::VLD2d8Pseudo_UPD: + case ARM::VLD2d16Pseudo_UPD: + case ARM::VLD2d32Pseudo_UPD: + case ARM::VLD2q8Pseudo_UPD: + case ARM::VLD2q16Pseudo_UPD: + case ARM::VLD2q32Pseudo_UPD: + case ARM::VLD3d8Pseudo: + case ARM::VLD3d16Pseudo: + case ARM::VLD3d32Pseudo: + case ARM::VLD1d64TPseudo: + case ARM::VLD3d8Pseudo_UPD: + case ARM::VLD3d16Pseudo_UPD: + case ARM::VLD3d32Pseudo_UPD: + case ARM::VLD1d64TPseudo_UPD: + case ARM::VLD3q8Pseudo_UPD: + case ARM::VLD3q16Pseudo_UPD: + case ARM::VLD3q32Pseudo_UPD: + case ARM::VLD3q8oddPseudo: + case ARM::VLD3q16oddPseudo: + case ARM::VLD3q32oddPseudo: + case ARM::VLD3q8oddPseudo_UPD: + case ARM::VLD3q16oddPseudo_UPD: + case ARM::VLD3q32oddPseudo_UPD: + case ARM::VLD4d8Pseudo: + case ARM::VLD4d16Pseudo: + case ARM::VLD4d32Pseudo: + case ARM::VLD1d64QPseudo: + case ARM::VLD4d8Pseudo_UPD: + case ARM::VLD4d16Pseudo_UPD: + case ARM::VLD4d32Pseudo_UPD: + case ARM::VLD1d64QPseudo_UPD: + case ARM::VLD4q8Pseudo_UPD: + case ARM::VLD4q16Pseudo_UPD: + case ARM::VLD4q32Pseudo_UPD: + case ARM::VLD4q8oddPseudo: + case ARM::VLD4q16oddPseudo: + case ARM::VLD4q32oddPseudo: + case ARM::VLD4q8oddPseudo_UPD: + case ARM::VLD4q16oddPseudo_UPD: + case ARM::VLD4q32oddPseudo_UPD: + case ARM::VLD1DUPq8Pseudo: + case ARM::VLD1DUPq16Pseudo: + case ARM::VLD1DUPq32Pseudo: + case ARM::VLD1DUPq8Pseudo_UPD: + case ARM::VLD1DUPq16Pseudo_UPD: + case ARM::VLD1DUPq32Pseudo_UPD: + case ARM::VLD2DUPd8Pseudo: + case ARM::VLD2DUPd16Pseudo: + case ARM::VLD2DUPd32Pseudo: + case ARM::VLD2DUPd8Pseudo_UPD: + case ARM::VLD2DUPd16Pseudo_UPD: + case ARM::VLD2DUPd32Pseudo_UPD: + case ARM::VLD4DUPd8Pseudo: + case ARM::VLD4DUPd16Pseudo: + case ARM::VLD4DUPd32Pseudo: + case ARM::VLD4DUPd8Pseudo_UPD: + case ARM::VLD4DUPd16Pseudo_UPD: + case ARM::VLD4DUPd32Pseudo_UPD: + case ARM::VLD1LNq8Pseudo: + case ARM::VLD1LNq16Pseudo: + case ARM::VLD1LNq32Pseudo: + case ARM::VLD1LNq8Pseudo_UPD: + case ARM::VLD1LNq16Pseudo_UPD: + case ARM::VLD1LNq32Pseudo_UPD: + case ARM::VLD2LNd8Pseudo: + case ARM::VLD2LNd16Pseudo: + case ARM::VLD2LNd32Pseudo: + case ARM::VLD2LNq16Pseudo: + case ARM::VLD2LNq32Pseudo: + case ARM::VLD2LNd8Pseudo_UPD: + case ARM::VLD2LNd16Pseudo_UPD: + case ARM::VLD2LNd32Pseudo_UPD: + case ARM::VLD2LNq16Pseudo_UPD: + case ARM::VLD2LNq32Pseudo_UPD: + case ARM::VLD4LNd8Pseudo: + case ARM::VLD4LNd16Pseudo: + case ARM::VLD4LNd32Pseudo: + case ARM::VLD4LNq16Pseudo: + case ARM::VLD4LNq32Pseudo: + case ARM::VLD4LNd8Pseudo_UPD: + case ARM::VLD4LNd16Pseudo_UPD: + case ARM::VLD4LNd32Pseudo_UPD: + case ARM::VLD4LNq16Pseudo_UPD: + case ARM::VLD4LNq32Pseudo_UPD: + // If the address is not 64-bit aligned, the latencies of these + // instructions increases by one. + ++Latency; + break; + } + return Latency; } +int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, + const MachineInstr *MI, + unsigned *PredCost) const { + if (MI->isCopyLike() || MI->isInsertSubreg() || + MI->isRegSequence() || MI->isImplicitDef()) + return 1; + + if (!ItinData || ItinData->isEmpty()) + return 1; + + const MCInstrDesc &MCID = MI->getDesc(); + unsigned Class = MCID.getSchedClass(); + unsigned UOps = ItinData->Itineraries[Class].NumMicroOps; + if (PredCost && MCID.hasImplicitDefOfPhysReg(ARM::CPSR)) + // When predicated, CPSR is an additional source operand for CPSR updating + // instructions, this apparently increases their latencies. + *PredCost = 1; + if (UOps) + return ItinData->getStageLatency(Class); + return getNumMicroOps(ItinData, MI); +} + +int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, + SDNode *Node) const { + if (!Node->isMachineOpcode()) + return 1; + + if (!ItinData || ItinData->isEmpty()) + return 1; + + unsigned Opcode = Node->getMachineOpcode(); + switch (Opcode) { + default: + return ItinData->getStageLatency(get(Opcode).getSchedClass()); + case ARM::VLDMQIA: + case ARM::VSTMQIA: + return 2; + } +} + bool ARMBaseInstrInfo:: hasHighOperandLatency(const InstrItineraryData *ItinData, const MachineRegisterInfo *MRI, @@ -2059,3 +2739,91 @@ hasLowDefLatency(const InstrItineraryData *ItinData, } return false; } + +bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr *MI, + StringRef &ErrInfo) const { + if (convertAddSubFlagsOpcode(MI->getOpcode())) { + ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG"; + return false; + } + return true; +} + +bool +ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc, + unsigned &AddSubOpc, + bool &NegAcc, bool &HasLane) const { + DenseMap::const_iterator I = MLxEntryMap.find(Opcode); + if (I == MLxEntryMap.end()) + return false; + + const ARM_MLxEntry &Entry = ARM_MLxTable[I->second]; + MulOpc = Entry.MulOpc; + AddSubOpc = Entry.AddSubOpc; + NegAcc = Entry.NegAcc; + HasLane = Entry.HasLane; + return true; +} + +//===----------------------------------------------------------------------===// +// Execution domains. +//===----------------------------------------------------------------------===// +// +// Some instructions go down the NEON pipeline, some go down the VFP pipeline, +// and some can go down both. The vmov instructions go down the VFP pipeline, +// but they can be changed to vorr equivalents that are executed by the NEON +// pipeline. +// +// We use the following execution domain numbering: +// +enum ARMExeDomain { + ExeGeneric = 0, + ExeVFP = 1, + ExeNEON = 2 +}; +// +// Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h +// +std::pair +ARMBaseInstrInfo::getExecutionDomain(const MachineInstr *MI) const { + // VMOVD is a VFP instruction, but can be changed to NEON if it isn't + // predicated. + if (MI->getOpcode() == ARM::VMOVD && !isPredicated(MI)) + return std::make_pair(ExeVFP, (1<getDesc().TSFlags & ARMII::DomainMask; + + if (Domain & ARMII::DomainNEON) + return std::make_pair(ExeNEON, 0); + + // Certain instructions can go either way on Cortex-A8. + // Treat them as NEON instructions. + if ((Domain & ARMII::DomainNEONA8) && Subtarget.isCortexA8()) + return std::make_pair(ExeNEON, 0); + + if (Domain & ARMII::DomainVFP) + return std::make_pair(ExeVFP, 0); + + return std::make_pair(ExeGeneric, 0); +} + +void +ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const { + // We only know how to change VMOVD into VORR. + assert(MI->getOpcode() == ARM::VMOVD && "Can only swizzle VMOVD"); + if (Domain != ExeNEON) + return; + + // Zap the predicate operands. + assert(!isPredicated(MI) && "Cannot predicate a VORRd"); + MI->RemoveOperand(3); + MI->RemoveOperand(2); + + // Change to a VORRd which requires two identical use operands. + MI->setDesc(get(ARM::VORRd)); + + // Add the extra source operand and new predicates. + // This will go before any implicit ops. + AddDefaultPred(MachineInstrBuilder(MI).addOperand(MI->getOperand(1))); +}