X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTarget%2FPowerPC%2FPPCInstrInfo.cpp;h=c17603a7718a63a3812b50c09f35f5d3daa00134;hp=ccd1247a388922e87ebb8c7dd993bc88daee7589;hb=79402ee6f96ff3be95b445286d91d0d87b5a3cc9;hpb=4b040294816e49413c739825d801042bc76171a7 diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp index ccd1247a388..c17603a7718 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -18,65 +18,229 @@ #include "PPCInstrBuilder.h" #include "PPCMachineFunctionInfo.h" #include "PPCTargetMachine.h" -#include "llvm/ADT/Statistic.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen/LiveIntervalAnalysis.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/PseudoSourceValue.h" +#include "llvm/CodeGen/ScheduleDAG.h" +#include "llvm/CodeGen/SlotIndexes.h" +#include "llvm/CodeGen/StackMaps.h" #include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCInst.h" #include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Support/raw_ostream.h" -#define GET_INSTRINFO_CTOR -#include "PPCGenInstrInfo.inc" - using namespace llvm; +#define DEBUG_TYPE "ppc-instr-info" + +#define GET_INSTRMAP_INFO +#define GET_INSTRINFO_CTOR_DTOR +#include "PPCGenInstrInfo.inc" + static cl:: opt DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden, cl::desc("Disable analysis for CTR loops")); -PPCInstrInfo::PPCInstrInfo(PPCTargetMachine &tm) - : PPCGenInstrInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP), - TM(tm), RI(*TM.getSubtargetImpl(), *this) {} +static cl::opt DisableCmpOpt("disable-ppc-cmp-opt", +cl::desc("Disable compare instruction optimization"), cl::Hidden); + +static cl::opt VSXSelfCopyCrash("crash-on-ppc-vsx-self-copy", +cl::desc("Causes the backend to crash instead of generating a nop VSX copy"), +cl::Hidden); + +static cl::opt +UseOldLatencyCalc("ppc-old-latency-calc", cl::Hidden, + cl::desc("Use the old (incorrect) instruction latency calculation")); + +// Pin the vtable to this file. +void PPCInstrInfo::anchor() {} + +PPCInstrInfo::PPCInstrInfo(PPCSubtarget &STI) + : PPCGenInstrInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP), + Subtarget(STI), RI(STI.getTargetMachine()) {} /// CreateTargetHazardRecognizer - Return the hazard recognizer to use for /// this target when scheduling the DAG. -ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetHazardRecognizer( - const TargetMachine *TM, - const ScheduleDAG *DAG) const { - unsigned Directive = TM->getSubtarget().getDarwinDirective(); +ScheduleHazardRecognizer * +PPCInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, + const ScheduleDAG *DAG) const { + unsigned Directive = + static_cast(STI)->getDarwinDirective(); if (Directive == PPC::DIR_440 || Directive == PPC::DIR_A2 || Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) { - const InstrItineraryData *II = TM->getInstrItineraryData(); - return new PPCScoreboardHazardRecognizer(II, DAG); + const InstrItineraryData *II = + static_cast(STI)->getInstrItineraryData(); + return new ScoreboardHazardRecognizer(II, DAG); } - return TargetInstrInfo::CreateTargetHazardRecognizer(TM, DAG); + return TargetInstrInfo::CreateTargetHazardRecognizer(STI, DAG); } /// CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer /// to use for this target when scheduling the DAG. -ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetPostRAHazardRecognizer( - const InstrItineraryData *II, - const ScheduleDAG *DAG) const { - unsigned Directive = TM.getSubtarget().getDarwinDirective(); +ScheduleHazardRecognizer * +PPCInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, + const ScheduleDAG *DAG) const { + unsigned Directive = + DAG->MF.getSubtarget().getDarwinDirective(); + + if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8) + return new PPCDispatchGroupSBHazardRecognizer(II, DAG); // Most subtargets use a PPC970 recognizer. if (Directive != PPC::DIR_440 && Directive != PPC::DIR_A2 && Directive != PPC::DIR_E500mc && Directive != PPC::DIR_E5500) { - const TargetInstrInfo *TII = TM.getInstrInfo(); - assert(TII && "No InstrInfo?"); + assert(DAG->TII && "No InstrInfo?"); - return new PPCHazardRecognizer970(*TII); + return new PPCHazardRecognizer970(*DAG); + } + + return new ScoreboardHazardRecognizer(II, DAG); +} + +unsigned PPCInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, + const MachineInstr *MI, + unsigned *PredCost) const { + if (!ItinData || UseOldLatencyCalc) + return PPCGenInstrInfo::getInstrLatency(ItinData, MI, PredCost); + + // The default implementation of getInstrLatency calls getStageLatency, but + // getStageLatency does not do the right thing for us. While we have + // itinerary, most cores are fully pipelined, and so the itineraries only + // express the first part of the pipeline, not every stage. Instead, we need + // to use the listed output operand cycle number (using operand 0 here, which + // is an output). + + unsigned Latency = 1; + unsigned DefClass = MI->getDesc().getSchedClass(); + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + const MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg() || !MO.isDef() || MO.isImplicit()) + continue; + + int Cycle = ItinData->getOperandCycle(DefClass, i); + if (Cycle < 0) + continue; + + Latency = std::max(Latency, (unsigned) Cycle); + } + + return Latency; +} + +int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, + const MachineInstr *DefMI, unsigned DefIdx, + const MachineInstr *UseMI, + unsigned UseIdx) const { + int Latency = PPCGenInstrInfo::getOperandLatency(ItinData, DefMI, DefIdx, + UseMI, UseIdx); + + if (!DefMI->getParent()) + return Latency; + + const MachineOperand &DefMO = DefMI->getOperand(DefIdx); + unsigned Reg = DefMO.getReg(); + + bool IsRegCR; + if (TargetRegisterInfo::isVirtualRegister(Reg)) { + const MachineRegisterInfo *MRI = + &DefMI->getParent()->getParent()->getRegInfo(); + IsRegCR = MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRRCRegClass) || + MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRBITRCRegClass); + } else { + IsRegCR = PPC::CRRCRegClass.contains(Reg) || + PPC::CRBITRCRegClass.contains(Reg); + } + + if (UseMI->isBranch() && IsRegCR) { + if (Latency < 0) + Latency = getInstrLatency(ItinData, DefMI); + + // On some cores, there is an additional delay between writing to a condition + // register, and using it from a branch. + unsigned Directive = Subtarget.getDarwinDirective(); + switch (Directive) { + default: break; + case PPC::DIR_7400: + case PPC::DIR_750: + case PPC::DIR_970: + case PPC::DIR_E5500: + case PPC::DIR_PWR4: + case PPC::DIR_PWR5: + case PPC::DIR_PWR5X: + case PPC::DIR_PWR6: + case PPC::DIR_PWR6X: + case PPC::DIR_PWR7: + case PPC::DIR_PWR8: + Latency += 2; + break; + } } - return new PPCScoreboardHazardRecognizer(II, DAG); + return Latency; +} + +// This function does not list all associative and commutative operations, but +// only those worth feeding through the machine combiner in an attempt to +// reduce the critical path. Mostly, this means floating-point operations, +// because they have high latencies (compared to other operations, such and +// and/or, which are also associative and commutative, but have low latencies). +bool PPCInstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const { + switch (Inst.getOpcode()) { + // FP Add: + case PPC::FADD: + case PPC::FADDS: + // FP Multiply: + case PPC::FMUL: + case PPC::FMULS: + // Altivec Add: + case PPC::VADDFP: + // VSX Add: + case PPC::XSADDDP: + case PPC::XVADDDP: + case PPC::XVADDSP: + case PPC::XSADDSP: + // VSX Multiply: + case PPC::XSMULDP: + case PPC::XVMULDP: + case PPC::XVMULSP: + case PPC::XSMULSP: + // QPX Add: + case PPC::QVFADD: + case PPC::QVFADDS: + case PPC::QVFADDSs: + // QPX Multiply: + case PPC::QVFMUL: + case PPC::QVFMULS: + case PPC::QVFMULSs: + return true; + default: + return false; + } +} + +bool PPCInstrInfo::getMachineCombinerPatterns( + MachineInstr &Root, + SmallVectorImpl &Patterns) const { + // Using the machine combiner in this way is potentially expensive, so + // restrict to when aggressive optimizations are desired. + if (Subtarget.getTargetMachine().getOptLevel() != CodeGenOpt::Aggressive) + return false; + + // FP reassociation is only legal when we don't need strict IEEE semantics. + if (!Root.getParent()->getParent()->getTarget().Options.UnsafeFPMath) + return false; + + return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns); } // Detect 32 -> 64-bit extensions where we may reuse the low sub-register. @@ -104,7 +268,12 @@ unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, case PPC::LFS: case PPC::LFD: case PPC::RESTORE_CR: + case PPC::RESTORE_CRBIT: case PPC::LVX: + case PPC::LXVD2X: + case PPC::QVLFDX: + case PPC::QVLFSXs: + case PPC::QVLFDXb: case PPC::RESTORE_VRSAVE: // Check for the operands added by addFrameReference (the immediate is the // offset which defaults to 0). @@ -128,7 +297,12 @@ unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr *MI, case PPC::STFS: case PPC::STFD: case PPC::SPILL_CR: + case PPC::SPILL_CRBIT: case PPC::STVX: + case PPC::STXVD2X: + case PPC::QVSTFDX: + case PPC::QVSTFSXs: + case PPC::QVSTFDXb: case PPC::SPILL_VRSAVE: // Check for the operands added by addFrameReference (the immediate is the // offset which defaults to 0). @@ -142,19 +316,24 @@ unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr *MI, return 0; } -// commuteInstruction - We can commute rlwimi instructions, but only if the -// rotate amt is zero. We also have to munge the immediates a bit. -MachineInstr * -PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { +MachineInstr *PPCInstrInfo::commuteInstructionImpl(MachineInstr *MI, + bool NewMI, + unsigned OpIdx1, + unsigned OpIdx2) const { MachineFunction &MF = *MI->getParent()->getParent(); // Normal instructions can be commuted the obvious way. - if (MI->getOpcode() != PPC::RLWIMI) - return TargetInstrInfo::commuteInstruction(MI, NewMI); + if (MI->getOpcode() != PPC::RLWIMI && + MI->getOpcode() != PPC::RLWIMIo) + return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2); + // Note that RLWIMI can be commuted as a 32-bit instruction, but not as a + // 64-bit instruction (so we don't handle PPC::RLWIMI8 here), because + // changing the relative order of the mask operands might change what happens + // to the high-bits of the mask (and, thus, the result). // Cannot commute if it has a non-zero rotate count. if (MI->getOperand(3).getImm() != 0) - return 0; + return nullptr; // If we have a zero rotate count, we have: // M = mask(MB,ME) @@ -164,9 +343,13 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { // Op0 = (Op2 & ~M) | (Op1 & M) // Swap op1/op2 + assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) && + "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMIo."); unsigned Reg0 = MI->getOperand(0).getReg(); unsigned Reg1 = MI->getOperand(1).getReg(); unsigned Reg2 = MI->getOperand(2).getReg(); + unsigned SubReg1 = MI->getOperand(1).getSubReg(); + unsigned SubReg2 = MI->getOperand(2).getSubReg(); bool Reg1IsKill = MI->getOperand(1).isKill(); bool Reg2IsKill = MI->getOperand(2).isKill(); bool ChangeReg0 = false; @@ -176,6 +359,7 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { // Must be two address instruction! assert(MI->getDesc().getOperandConstraint(0, MCOI::TIED_TO) && "Expecting a two-address instruction!"); + assert(MI->getOperand(0).getSubReg() == SubReg1 && "Tied subreg mismatch"); Reg2IsKill = false; ChangeReg0 = true; } @@ -184,6 +368,11 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { unsigned MB = MI->getOperand(4).getImm(); unsigned ME = MI->getOperand(5).getImm(); + // We can't commute a trivial mask (there is no way to represent an all-zero + // mask). + if (MB == 0 && ME == 31) + return nullptr; + if (NewMI) { // Create a new instruction. unsigned Reg0 = ChangeReg0 ? Reg2 : MI->getOperand(0).getReg(); @@ -196,10 +385,14 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { .addImm((MB-1) & 31); } - if (ChangeReg0) + if (ChangeReg0) { MI->getOperand(0).setReg(Reg2); + MI->getOperand(0).setSubReg(SubReg2); + } MI->getOperand(2).setReg(Reg1); MI->getOperand(1).setReg(Reg2); + MI->getOperand(2).setSubReg(SubReg1); + MI->getOperand(1).setSubReg(SubReg2); MI->getOperand(2).setIsKill(Reg1IsKill); MI->getOperand(1).setIsKill(Reg2IsKill); @@ -209,12 +402,42 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { return MI; } +bool PPCInstrInfo::findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1, + unsigned &SrcOpIdx2) const { + // For VSX A-Type FMA instructions, it is the first two operands that can be + // commuted, however, because the non-encoded tied input operand is listed + // first, the operands to swap are actually the second and third. + + int AltOpc = PPC::getAltVSXFMAOpcode(MI->getOpcode()); + if (AltOpc == -1) + return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2); + + // The commutable operand indices are 2 and 3. Return them in SrcOpIdx1 + // and SrcOpIdx2. + return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3); +} + void PPCInstrInfo::insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const { + // This function is used for scheduling, and the nop wanted here is the type + // that terminates dispatch groups on the POWER cores. + unsigned Directive = Subtarget.getDarwinDirective(); + unsigned Opcode; + switch (Directive) { + default: Opcode = PPC::NOP; break; + case PPC::DIR_PWR6: Opcode = PPC::NOP_GT_PWR6; break; + case PPC::DIR_PWR7: Opcode = PPC::NOP_GT_PWR7; break; + case PPC::DIR_PWR8: Opcode = PPC::NOP_GT_PWR7; break; /* FIXME: Update when P8 InstrScheduling model is ready */ + } + DebugLoc DL; - BuildMI(MBB, MI, DL, get(PPC::NOP)); + BuildMI(MBB, MI, DL, get(Opcode)); } +/// getNoopForMachoTarget - Return the noop instruction to use for a noop. +void PPCInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const { + NopInst.setOpcode(PPC::NOP); +} // Branch analysis. // Note: If the condition register is set to CTR or CTR8 then this is a @@ -223,18 +446,13 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl &Cond, bool AllowModify) const { - bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); + bool isPPC64 = Subtarget.isPPC64(); // If the block has no terminators, it just falls into the block after it. - MachineBasicBlock::iterator I = MBB.end(); - if (I == MBB.begin()) + MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); + if (I == MBB.end()) return false; - --I; - while (I->isDebugValue()) { - if (I == MBB.begin()) - return false; - --I; - } + if (!isUnpredicatedTerminator(I)) return false; @@ -256,6 +474,22 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, Cond.push_back(LastInst->getOperand(0)); Cond.push_back(LastInst->getOperand(1)); return false; + } else if (LastInst->getOpcode() == PPC::BC) { + if (!LastInst->getOperand(1).isMBB()) + return true; + // Block ends with fall-through condbranch. + TBB = LastInst->getOperand(1).getMBB(); + Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); + Cond.push_back(LastInst->getOperand(0)); + return false; + } else if (LastInst->getOpcode() == PPC::BCn) { + if (!LastInst->getOperand(1).isMBB()) + return true; + // Block ends with fall-through condbranch. + TBB = LastInst->getOperand(1).getMBB(); + Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_UNSET)); + Cond.push_back(LastInst->getOperand(0)); + return false; } else if (LastInst->getOpcode() == PPC::BDNZ8 || LastInst->getOpcode() == PPC::BDNZ) { if (!LastInst->getOperand(0).isMBB()) @@ -303,6 +537,26 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, Cond.push_back(SecondLastInst->getOperand(1)); FBB = LastInst->getOperand(0).getMBB(); return false; + } else if (SecondLastInst->getOpcode() == PPC::BC && + LastInst->getOpcode() == PPC::B) { + if (!SecondLastInst->getOperand(1).isMBB() || + !LastInst->getOperand(0).isMBB()) + return true; + TBB = SecondLastInst->getOperand(1).getMBB(); + Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); + Cond.push_back(SecondLastInst->getOperand(0)); + FBB = LastInst->getOperand(0).getMBB(); + return false; + } else if (SecondLastInst->getOpcode() == PPC::BCn && + LastInst->getOpcode() == PPC::B) { + if (!SecondLastInst->getOperand(1).isMBB() || + !LastInst->getOperand(0).isMBB()) + return true; + TBB = SecondLastInst->getOperand(1).getMBB(); + Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_UNSET)); + Cond.push_back(SecondLastInst->getOperand(0)); + FBB = LastInst->getOperand(0).getMBB(); + return false; } else if ((SecondLastInst->getOpcode() == PPC::BDNZ8 || SecondLastInst->getOpcode() == PPC::BDNZ) && LastInst->getOpcode() == PPC::B) { @@ -351,15 +605,12 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, } unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { - MachineBasicBlock::iterator I = MBB.end(); - if (I == MBB.begin()) return 0; - --I; - while (I->isDebugValue()) { - if (I == MBB.begin()) - return 0; - --I; - } + MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); + if (I == MBB.end()) + return 0; + if (I->getOpcode() != PPC::B && I->getOpcode() != PPC::BCC && + I->getOpcode() != PPC::BC && I->getOpcode() != PPC::BCn && I->getOpcode() != PPC::BDNZ8 && I->getOpcode() != PPC::BDNZ && I->getOpcode() != PPC::BDZ8 && I->getOpcode() != PPC::BDZ) return 0; @@ -372,6 +623,7 @@ unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { if (I == MBB.begin()) return 1; --I; if (I->getOpcode() != PPC::BCC && + I->getOpcode() != PPC::BC && I->getOpcode() != PPC::BCn && I->getOpcode() != PPC::BDNZ8 && I->getOpcode() != PPC::BDNZ && I->getOpcode() != PPC::BDZ8 && I->getOpcode() != PPC::BDZ) return 1; @@ -384,26 +636,30 @@ unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { unsigned PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, - const SmallVectorImpl &Cond, + ArrayRef Cond, DebugLoc DL) const { // Shouldn't be a fall through. assert(TBB && "InsertBranch must not be told to insert a fallthrough"); assert((Cond.size() == 2 || Cond.size() == 0) && "PPC branch conditions have two components!"); - bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); + bool isPPC64 = Subtarget.isPPC64(); // One-way branch. - if (FBB == 0) { + if (!FBB) { if (Cond.empty()) // Unconditional branch BuildMI(&MBB, DL, get(PPC::B)).addMBB(TBB); else if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8) BuildMI(&MBB, DL, get(Cond[0].getImm() ? (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) : (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB); + else if (Cond[0].getImm() == PPC::PRED_BIT_SET) + BuildMI(&MBB, DL, get(PPC::BC)).addOperand(Cond[1]).addMBB(TBB); + else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET) + BuildMI(&MBB, DL, get(PPC::BCn)).addOperand(Cond[1]).addMBB(TBB); else // Conditional branch BuildMI(&MBB, DL, get(PPC::BCC)) - .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB); + .addImm(Cond[0].getImm()).addOperand(Cond[1]).addMBB(TBB); return 1; } @@ -412,19 +668,23 @@ PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, BuildMI(&MBB, DL, get(Cond[0].getImm() ? (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) : (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB); + else if (Cond[0].getImm() == PPC::PRED_BIT_SET) + BuildMI(&MBB, DL, get(PPC::BC)).addOperand(Cond[1]).addMBB(TBB); + else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET) + BuildMI(&MBB, DL, get(PPC::BCn)).addOperand(Cond[1]).addMBB(TBB); else BuildMI(&MBB, DL, get(PPC::BCC)) - .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB); + .addImm(Cond[0].getImm()).addOperand(Cond[1]).addMBB(TBB); BuildMI(&MBB, DL, get(PPC::B)).addMBB(FBB); return 2; } // Select analysis. bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, - const SmallVectorImpl &Cond, + ArrayRef Cond, unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles, int &FalseCycles) const { - if (!TM.getSubtargetImpl()->hasISEL()) + if (!Subtarget.hasISEL()) return false; if (Cond.size() != 2) @@ -444,7 +704,9 @@ bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, // isel is for regular integer GPRs only. if (!PPC::GPRCRegClass.hasSubClassEq(RC) && - !PPC::G8RCRegClass.hasSubClassEq(RC)) + !PPC::GPRC_NOR0RegClass.hasSubClassEq(RC) && + !PPC::G8RCRegClass.hasSubClassEq(RC) && + !PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)) return false; // FIXME: These numbers are for the A2, how well they work for other cores is @@ -460,13 +722,12 @@ bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, DebugLoc dl, - unsigned DestReg, - const SmallVectorImpl &Cond, + unsigned DestReg, ArrayRef Cond, unsigned TrueReg, unsigned FalseReg) const { assert(Cond.size() == 2 && "PPC branch conditions have two components!"); - assert(TM.getSubtargetImpl()->hasISEL() && + assert(Subtarget.hasISEL() && "Cannot insert select on target without ISEL support"); // Get the register classes. @@ -474,12 +735,15 @@ void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB, const TargetRegisterClass *RC = RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); assert(RC && "TrueReg and FalseReg must have overlapping register classes"); - assert((PPC::GPRCRegClass.hasSubClassEq(RC) || - PPC::G8RCRegClass.hasSubClassEq(RC)) && + + bool Is64Bit = PPC::G8RCRegClass.hasSubClassEq(RC) || + PPC::G8RC_NOX0RegClass.hasSubClassEq(RC); + assert((Is64Bit || + PPC::GPRCRegClass.hasSubClassEq(RC) || + PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) && "isel is for regular integer GPRs only"); - unsigned OpCode = - PPC::GPRCRegClass.hasSubClassEq(RC) ? PPC::ISEL : PPC::ISEL8; + unsigned OpCode = Is64Bit ? PPC::ISEL8 : PPC::ISEL; unsigned SelectPred = Cond[0].getImm(); unsigned SubIdx; @@ -494,6 +758,8 @@ void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB, case PPC::PRED_LE: SubIdx = PPC::sub_gt; SwapOps = true; break; case PPC::PRED_UN: SubIdx = PPC::sub_un; SwapOps = false; break; case PPC::PRED_NU: SubIdx = PPC::sub_un; SwapOps = true; break; + case PPC::PRED_BIT_SET: SubIdx = 0; SwapOps = false; break; + case PPC::PRED_BIT_UNSET: SubIdx = 0; SwapOps = true; break; } unsigned FirstReg = SwapOps ? FalseReg : TrueReg, @@ -518,10 +784,104 @@ void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB, .addReg(Cond[1].getReg(), 0, SubIdx); } +static unsigned getCRBitValue(unsigned CRBit) { + unsigned Ret = 4; + if (CRBit == PPC::CR0LT || CRBit == PPC::CR1LT || + CRBit == PPC::CR2LT || CRBit == PPC::CR3LT || + CRBit == PPC::CR4LT || CRBit == PPC::CR5LT || + CRBit == PPC::CR6LT || CRBit == PPC::CR7LT) + Ret = 3; + if (CRBit == PPC::CR0GT || CRBit == PPC::CR1GT || + CRBit == PPC::CR2GT || CRBit == PPC::CR3GT || + CRBit == PPC::CR4GT || CRBit == PPC::CR5GT || + CRBit == PPC::CR6GT || CRBit == PPC::CR7GT) + Ret = 2; + if (CRBit == PPC::CR0EQ || CRBit == PPC::CR1EQ || + CRBit == PPC::CR2EQ || CRBit == PPC::CR3EQ || + CRBit == PPC::CR4EQ || CRBit == PPC::CR5EQ || + CRBit == PPC::CR6EQ || CRBit == PPC::CR7EQ) + Ret = 1; + if (CRBit == PPC::CR0UN || CRBit == PPC::CR1UN || + CRBit == PPC::CR2UN || CRBit == PPC::CR3UN || + CRBit == PPC::CR4UN || CRBit == PPC::CR5UN || + CRBit == PPC::CR6UN || CRBit == PPC::CR7UN) + Ret = 0; + + assert(Ret != 4 && "Invalid CR bit register"); + return Ret; +} + void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const { + // We can end up with self copies and similar things as a result of VSX copy + // legalization. Promote them here. + const TargetRegisterInfo *TRI = &getRegisterInfo(); + if (PPC::F8RCRegClass.contains(DestReg) && + PPC::VSRCRegClass.contains(SrcReg)) { + unsigned SuperReg = + TRI->getMatchingSuperReg(DestReg, PPC::sub_64, &PPC::VSRCRegClass); + + if (VSXSelfCopyCrash && SrcReg == SuperReg) + llvm_unreachable("nop VSX copy"); + + DestReg = SuperReg; + } else if (PPC::VRRCRegClass.contains(DestReg) && + PPC::VSRCRegClass.contains(SrcReg)) { + unsigned SuperReg = + TRI->getMatchingSuperReg(DestReg, PPC::sub_128, &PPC::VSRCRegClass); + + if (VSXSelfCopyCrash && SrcReg == SuperReg) + llvm_unreachable("nop VSX copy"); + + DestReg = SuperReg; + } else if (PPC::F8RCRegClass.contains(SrcReg) && + PPC::VSRCRegClass.contains(DestReg)) { + unsigned SuperReg = + TRI->getMatchingSuperReg(SrcReg, PPC::sub_64, &PPC::VSRCRegClass); + + if (VSXSelfCopyCrash && DestReg == SuperReg) + llvm_unreachable("nop VSX copy"); + + SrcReg = SuperReg; + } else if (PPC::VRRCRegClass.contains(SrcReg) && + PPC::VSRCRegClass.contains(DestReg)) { + unsigned SuperReg = + TRI->getMatchingSuperReg(SrcReg, PPC::sub_128, &PPC::VSRCRegClass); + + if (VSXSelfCopyCrash && DestReg == SuperReg) + llvm_unreachable("nop VSX copy"); + + SrcReg = SuperReg; + } + + // Different class register copy + if (PPC::CRBITRCRegClass.contains(SrcReg) && + PPC::GPRCRegClass.contains(DestReg)) { + unsigned CRReg = getCRFromCRBit(SrcReg); + BuildMI(MBB, I, DL, get(PPC::MFOCRF), DestReg) + .addReg(CRReg), getKillRegState(KillSrc); + // Rotate the CR bit in the CR fields to be the least significant bit and + // then mask with 0x1 (MB = ME = 31). + BuildMI(MBB, I, DL, get(PPC::RLWINM), DestReg) + .addReg(DestReg, RegState::Kill) + .addImm(TRI->getEncodingValue(CRReg) * 4 + (4 - getCRBitValue(SrcReg))) + .addImm(31) + .addImm(31); + return; + } else if (PPC::CRRCRegClass.contains(SrcReg) && + PPC::G8RCRegClass.contains(DestReg)) { + BuildMI(MBB, I, DL, get(PPC::MFOCRF8), DestReg) + .addReg(SrcReg), getKillRegState(KillSrc); + return; + } else if (PPC::CRRCRegClass.contains(SrcReg) && + PPC::GPRCRegClass.contains(DestReg)) { + BuildMI(MBB, I, DL, get(PPC::MFOCRF), DestReg) + .addReg(SrcReg), getKillRegState(KillSrc); + return; + } + unsigned Opc; if (PPC::GPRCRegClass.contains(DestReg, SrcReg)) Opc = PPC::OR; @@ -533,6 +893,25 @@ void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB, Opc = PPC::MCRF; else if (PPC::VRRCRegClass.contains(DestReg, SrcReg)) Opc = PPC::VOR; + else if (PPC::VSRCRegClass.contains(DestReg, SrcReg)) + // There are two different ways this can be done: + // 1. xxlor : This has lower latency (on the P7), 2 cycles, but can only + // issue in VSU pipeline 0. + // 2. xmovdp/xmovsp: This has higher latency (on the P7), 6 cycles, but + // can go to either pipeline. + // We'll always use xxlor here, because in practically all cases where + // copies are generated, they are close enough to some use that the + // lower-latency form is preferable. + Opc = PPC::XXLOR; + else if (PPC::VSFRCRegClass.contains(DestReg, SrcReg) || + PPC::VSSRCRegClass.contains(DestReg, SrcReg)) + Opc = PPC::XXLORf; + else if (PPC::QFRCRegClass.contains(DestReg, SrcReg)) + Opc = PPC::QVFMR; + else if (PPC::QSRCRegClass.contains(DestReg, SrcReg)) + Opc = PPC::QVFMRs; + else if (PPC::QBRCRegClass.contains(DestReg, SrcReg)) + Opc = PPC::QVFMRb; else if (PPC::CRBITRCRegClass.contains(DestReg, SrcReg)) Opc = PPC::CROR; else @@ -558,12 +937,14 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF, // update isStoreToStackSlot. DebugLoc DL; - if (PPC::GPRCRegClass.hasSubClassEq(RC)) { + if (PPC::GPRCRegClass.hasSubClassEq(RC) || + PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) { NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STW)) .addReg(SrcReg, getKillRegState(isKill)), FrameIdx)); - } else if (PPC::G8RCRegClass.hasSubClassEq(RC)) { + } else if (PPC::G8RCRegClass.hasSubClassEq(RC) || + PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)) { NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STD)) .addReg(SrcReg, getKillRegState(isKill)), @@ -585,53 +966,61 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF, FrameIdx)); return true; } else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) { - // FIXME: We use CRi here because there is no mtcrf on a bit. Since the - // backend currently only uses CR1EQ as an individual bit, this should - // not cause any bug. If we need other uses of CR bits, the following - // code may be invalid. - unsigned Reg = 0; - if (SrcReg == PPC::CR0LT || SrcReg == PPC::CR0GT || - SrcReg == PPC::CR0EQ || SrcReg == PPC::CR0UN) - Reg = PPC::CR0; - else if (SrcReg == PPC::CR1LT || SrcReg == PPC::CR1GT || - SrcReg == PPC::CR1EQ || SrcReg == PPC::CR1UN) - Reg = PPC::CR1; - else if (SrcReg == PPC::CR2LT || SrcReg == PPC::CR2GT || - SrcReg == PPC::CR2EQ || SrcReg == PPC::CR2UN) - Reg = PPC::CR2; - else if (SrcReg == PPC::CR3LT || SrcReg == PPC::CR3GT || - SrcReg == PPC::CR3EQ || SrcReg == PPC::CR3UN) - Reg = PPC::CR3; - else if (SrcReg == PPC::CR4LT || SrcReg == PPC::CR4GT || - SrcReg == PPC::CR4EQ || SrcReg == PPC::CR4UN) - Reg = PPC::CR4; - else if (SrcReg == PPC::CR5LT || SrcReg == PPC::CR5GT || - SrcReg == PPC::CR5EQ || SrcReg == PPC::CR5UN) - Reg = PPC::CR5; - else if (SrcReg == PPC::CR6LT || SrcReg == PPC::CR6GT || - SrcReg == PPC::CR6EQ || SrcReg == PPC::CR6UN) - Reg = PPC::CR6; - else if (SrcReg == PPC::CR7LT || SrcReg == PPC::CR7GT || - SrcReg == PPC::CR7EQ || SrcReg == PPC::CR7UN) - Reg = PPC::CR7; - - return StoreRegToStackSlot(MF, Reg, isKill, FrameIdx, - &PPC::CRRCRegClass, NewMIs, NonRI, SpillsVRS); - + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::SPILL_CRBIT)) + .addReg(SrcReg, + getKillRegState(isKill)), + FrameIdx)); + return true; } else if (PPC::VRRCRegClass.hasSubClassEq(RC)) { NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STVX)) .addReg(SrcReg, getKillRegState(isKill)), FrameIdx)); NonRI = true; + } else if (PPC::VSRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STXVD2X)) + .addReg(SrcReg, + getKillRegState(isKill)), + FrameIdx)); + NonRI = true; + } else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STXSDX)) + .addReg(SrcReg, + getKillRegState(isKill)), + FrameIdx)); + NonRI = true; + } else if (PPC::VSSRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STXSSPX)) + .addReg(SrcReg, + getKillRegState(isKill)), + FrameIdx)); + NonRI = true; } else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) { - assert(TM.getSubtargetImpl()->isDarwin() && + assert(Subtarget.isDarwin() && "VRSAVE only needs spill/restore on Darwin"); NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::SPILL_VRSAVE)) .addReg(SrcReg, getKillRegState(isKill)), FrameIdx)); SpillsVRS = true; + } else if (PPC::QFRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVSTFDX)) + .addReg(SrcReg, + getKillRegState(isKill)), + FrameIdx)); + NonRI = true; + } else if (PPC::QSRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVSTFSXs)) + .addReg(SrcReg, + getKillRegState(isKill)), + FrameIdx)); + NonRI = true; + } else if (PPC::QBRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVSTFDXb)) + .addReg(SrcReg, + getKillRegState(isKill)), + FrameIdx)); + NonRI = true; } else { llvm_unreachable("Unknown regclass!"); } @@ -666,11 +1055,10 @@ PPCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, MBB.insert(MI, NewMIs[i]); const MachineFrameInfo &MFI = *MF.getFrameInfo(); - MachineMemOperand *MMO = - MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), - MachineMemOperand::MOStore, - MFI.getObjectSize(FrameIdx), - MFI.getObjectAlignment(FrameIdx)); + MachineMemOperand *MMO = MF.getMachineMemOperand( + MachinePointerInfo::getFixedStack(MF, FrameIdx), + MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx), + MFI.getObjectAlignment(FrameIdx)); NewMIs.back()->addMemOperand(MF, MMO); } @@ -683,10 +1071,12 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL, // Note: If additional load instructions are added here, // update isLoadFromStackSlot. - if (PPC::GPRCRegClass.hasSubClassEq(RC)) { + if (PPC::GPRCRegClass.hasSubClassEq(RC) || + PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) { NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LWZ), DestReg), FrameIdx)); - } else if (PPC::G8RCRegClass.hasSubClassEq(RC)) { + } else if (PPC::G8RCRegClass.hasSubClassEq(RC) || + PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)) { NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LD), DestReg), FrameIdx)); } else if (PPC::F8RCRegClass.hasSubClassEq(RC)) { @@ -701,48 +1091,46 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL, FrameIdx)); return true; } else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) { - - unsigned Reg = 0; - if (DestReg == PPC::CR0LT || DestReg == PPC::CR0GT || - DestReg == PPC::CR0EQ || DestReg == PPC::CR0UN) - Reg = PPC::CR0; - else if (DestReg == PPC::CR1LT || DestReg == PPC::CR1GT || - DestReg == PPC::CR1EQ || DestReg == PPC::CR1UN) - Reg = PPC::CR1; - else if (DestReg == PPC::CR2LT || DestReg == PPC::CR2GT || - DestReg == PPC::CR2EQ || DestReg == PPC::CR2UN) - Reg = PPC::CR2; - else if (DestReg == PPC::CR3LT || DestReg == PPC::CR3GT || - DestReg == PPC::CR3EQ || DestReg == PPC::CR3UN) - Reg = PPC::CR3; - else if (DestReg == PPC::CR4LT || DestReg == PPC::CR4GT || - DestReg == PPC::CR4EQ || DestReg == PPC::CR4UN) - Reg = PPC::CR4; - else if (DestReg == PPC::CR5LT || DestReg == PPC::CR5GT || - DestReg == PPC::CR5EQ || DestReg == PPC::CR5UN) - Reg = PPC::CR5; - else if (DestReg == PPC::CR6LT || DestReg == PPC::CR6GT || - DestReg == PPC::CR6EQ || DestReg == PPC::CR6UN) - Reg = PPC::CR6; - else if (DestReg == PPC::CR7LT || DestReg == PPC::CR7GT || - DestReg == PPC::CR7EQ || DestReg == PPC::CR7UN) - Reg = PPC::CR7; - - return LoadRegFromStackSlot(MF, DL, Reg, FrameIdx, - &PPC::CRRCRegClass, NewMIs, NonRI, SpillsVRS); - + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, + get(PPC::RESTORE_CRBIT), DestReg), + FrameIdx)); + return true; } else if (PPC::VRRCRegClass.hasSubClassEq(RC)) { NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LVX), DestReg), FrameIdx)); NonRI = true; + } else if (PPC::VSRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LXVD2X), DestReg), + FrameIdx)); + NonRI = true; + } else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LXSDX), DestReg), + FrameIdx)); + NonRI = true; + } else if (PPC::VSSRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LXSSPX), DestReg), + FrameIdx)); + NonRI = true; } else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) { - assert(TM.getSubtargetImpl()->isDarwin() && + assert(Subtarget.isDarwin() && "VRSAVE only needs spill/restore on Darwin"); NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::RESTORE_VRSAVE), DestReg), FrameIdx)); SpillsVRS = true; + } else if (PPC::QFRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVLFDX), DestReg), + FrameIdx)); + NonRI = true; + } else if (PPC::QSRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVLFSXs), DestReg), + FrameIdx)); + NonRI = true; + } else if (PPC::QBRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVLFDXb), DestReg), + FrameIdx)); + NonRI = true; } else { llvm_unreachable("Unknown regclass!"); } @@ -779,24 +1167,13 @@ PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, MBB.insert(MI, NewMIs[i]); const MachineFrameInfo &MFI = *MF.getFrameInfo(); - MachineMemOperand *MMO = - MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), - MachineMemOperand::MOLoad, - MFI.getObjectSize(FrameIdx), - MFI.getObjectAlignment(FrameIdx)); + MachineMemOperand *MMO = MF.getMachineMemOperand( + MachinePointerInfo::getFixedStack(MF, FrameIdx), + MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx), + MFI.getObjectAlignment(FrameIdx)); NewMIs.back()->addMemOperand(MF, MMO); } -MachineInstr* -PPCInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, - int FrameIx, uint64_t Offset, - const MDNode *MDPtr, - DebugLoc DL) const { - MachineInstrBuilder MIB = BuildMI(MF, DL, get(PPC::DBG_VALUE)); - addFrameReference(MIB, FrameIx, 0, false).addImm(Offset).addMetadata(MDPtr); - return &*MIB; -} - bool PPCInstrInfo:: ReverseBranchCondition(SmallVectorImpl &Cond) const { assert(Cond.size() == 2 && "Invalid PPC branch opcode!"); @@ -860,7 +1237,7 @@ bool PPCInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, unsigned ZeroReg; if (UseInfo->isLookupPtrRegClass()) { - bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); + bool isPPC64 = Subtarget.isPPC64(); ZeroReg = isPPC64 ? PPC::ZERO8 : PPC::ZERO; } else { ZeroReg = UseInfo->RegClass == PPC::G8RC_NOX0RegClassID ? @@ -894,7 +1271,7 @@ bool PPCInstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumT, unsigned ExtraT, MachineBasicBlock &FMBB, unsigned NumF, unsigned ExtraF, - const BranchProbability &Probability) const { + BranchProbability Probability) const { return !(MBBDefinesCTR(TMBB) && MBBDefinesCTR(FMBB)); } @@ -921,18 +1298,25 @@ bool PPCInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { return !isPredicated(MI); } -bool PPCInstrInfo::PredicateInstruction( - MachineInstr *MI, - const SmallVectorImpl &Pred) const { +bool PPCInstrInfo::PredicateInstruction(MachineInstr *MI, + ArrayRef Pred) const { unsigned OpC = MI->getOpcode(); - if (OpC == PPC::BLR) { + if (OpC == PPC::BLR || OpC == PPC::BLR8) { if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) { - bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); + bool isPPC64 = Subtarget.isPPC64(); MI->setDesc(get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZLR8 : PPC::BDNZLR) : (isPPC64 ? PPC::BDZLR8 : PPC::BDZLR))); - } else { + } else if (Pred[0].getImm() == PPC::PRED_BIT_SET) { MI->setDesc(get(PPC::BCLR)); + MachineInstrBuilder(*MI->getParent()->getParent(), MI) + .addReg(Pred[1].getReg()); + } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) { + MI->setDesc(get(PPC::BCLRn)); + MachineInstrBuilder(*MI->getParent()->getParent(), MI) + .addReg(Pred[1].getReg()); + } else { + MI->setDesc(get(PPC::BCCLR)); MachineInstrBuilder(*MI->getParent()->getParent(), MI) .addImm(Pred[0].getImm()) .addReg(Pred[1].getReg()); @@ -941,10 +1325,26 @@ bool PPCInstrInfo::PredicateInstruction( return true; } else if (OpC == PPC::B) { if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) { - bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); + bool isPPC64 = Subtarget.isPPC64(); MI->setDesc(get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) : (isPPC64 ? PPC::BDZ8 : PPC::BDZ))); + } else if (Pred[0].getImm() == PPC::PRED_BIT_SET) { + MachineBasicBlock *MBB = MI->getOperand(0).getMBB(); + MI->RemoveOperand(0); + + MI->setDesc(get(PPC::BC)); + MachineInstrBuilder(*MI->getParent()->getParent(), MI) + .addReg(Pred[1].getReg()) + .addMBB(MBB); + } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) { + MachineBasicBlock *MBB = MI->getOperand(0).getMBB(); + MI->RemoveOperand(0); + + MI->setDesc(get(PPC::BCn)); + MachineInstrBuilder(*MI->getParent()->getParent(), MI) + .addReg(Pred[1].getReg()) + .addMBB(MBB); } else { MachineBasicBlock *MBB = MI->getOperand(0).getMBB(); MI->RemoveOperand(0); @@ -963,9 +1363,24 @@ bool PPCInstrInfo::PredicateInstruction( llvm_unreachable("Cannot predicate bctr[l] on the ctr register"); bool setLR = OpC == PPC::BCTRL || OpC == PPC::BCTRL8; - bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); - MI->setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8) : - (setLR ? PPC::BCCTRL : PPC::BCCTR))); + bool isPPC64 = Subtarget.isPPC64(); + + if (Pred[0].getImm() == PPC::PRED_BIT_SET) { + MI->setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8) : + (setLR ? PPC::BCCTRL : PPC::BCCTR))); + MachineInstrBuilder(*MI->getParent()->getParent(), MI) + .addReg(Pred[1].getReg()); + return true; + } else if (Pred[0].getImm() == PPC::PRED_BIT_UNSET) { + MI->setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8n : PPC::BCCTR8n) : + (setLR ? PPC::BCCTRLn : PPC::BCCTRn))); + MachineInstrBuilder(*MI->getParent()->getParent(), MI) + .addReg(Pred[1].getReg()); + return true; + } + + MI->setDesc(get(isPPC64 ? (setLR ? PPC::BCCCTRL8 : PPC::BCCCTR8) : + (setLR ? PPC::BCCCTRL : PPC::BCCCTR))); MachineInstrBuilder(*MI->getParent()->getParent(), MI) .addImm(Pred[0].getImm()) .addReg(Pred[1].getReg()); @@ -975,9 +1390,8 @@ bool PPCInstrInfo::PredicateInstruction( return false; } -bool PPCInstrInfo::SubsumesPredicate( - const SmallVectorImpl &Pred1, - const SmallVectorImpl &Pred2) const { +bool PPCInstrInfo::SubsumesPredicate(ArrayRef Pred1, + ArrayRef Pred2) const { assert(Pred1.size() == 2 && "Invalid PPC first predicate"); assert(Pred2.size() == 2 && "Invalid PPC second predicate"); @@ -986,6 +1400,10 @@ bool PPCInstrInfo::SubsumesPredicate( if (Pred2[1].getReg() == PPC::CTR8 || Pred2[1].getReg() == PPC::CTR) return false; + // P1 can only subsume P2 if they test the same condition register. + if (Pred1[1].getReg() != Pred2[1].getReg()) + return false; + PPC::Predicate P1 = (PPC::Predicate) Pred1[0].getImm(); PPC::Predicate P2 = (PPC::Predicate) Pred2[0].getImm(); @@ -1046,6 +1464,7 @@ bool PPCInstrInfo::isPredicable(MachineInstr *MI) const { return false; case PPC::B: case PPC::BLR: + case PPC::BLR8: case PPC::BCTR: case PPC::BCTR8: case PPC::BCTRL: @@ -1054,174 +1473,356 @@ bool PPCInstrInfo::isPredicable(MachineInstr *MI) const { } } -/// GetInstSize - Return the number of bytes of code the specified -/// instruction may be. This returns the maximum number of bytes. -/// -unsigned PPCInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { - switch (MI->getOpcode()) { - case PPC::INLINEASM: { // Inline Asm: Variable size. - const MachineFunction *MF = MI->getParent()->getParent(); - const char *AsmStr = MI->getOperand(0).getSymbolName(); - return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); - } - case PPC::PROLOG_LABEL: - case PPC::EH_LABEL: - case PPC::GC_LABEL: - case PPC::DBG_VALUE: - return 0; - case PPC::BL8_NOP: - case PPC::BLA8_NOP: - return 8; - default: - return 4; // PowerPC instructions are all 4 bytes +bool PPCInstrInfo::analyzeCompare(const MachineInstr *MI, + unsigned &SrcReg, unsigned &SrcReg2, + int &Mask, int &Value) const { + unsigned Opc = MI->getOpcode(); + + switch (Opc) { + default: return false; + case PPC::CMPWI: + case PPC::CMPLWI: + case PPC::CMPDI: + case PPC::CMPLDI: + SrcReg = MI->getOperand(1).getReg(); + SrcReg2 = 0; + Value = MI->getOperand(2).getImm(); + Mask = 0xFFFF; + return true; + case PPC::CMPW: + case PPC::CMPLW: + case PPC::CMPD: + case PPC::CMPLD: + case PPC::FCMPUS: + case PPC::FCMPUD: + SrcReg = MI->getOperand(1).getReg(); + SrcReg2 = MI->getOperand(2).getReg(); + return true; } } -#undef DEBUG_TYPE -#define DEBUG_TYPE "ppc-early-ret" -STATISTIC(NumBCLR, "Number of early conditional returns"); -STATISTIC(NumBLR, "Number of early returns"); +bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr, + unsigned SrcReg, unsigned SrcReg2, + int Mask, int Value, + const MachineRegisterInfo *MRI) const { + if (DisableCmpOpt) + return false; + + int OpC = CmpInstr->getOpcode(); + unsigned CRReg = CmpInstr->getOperand(0).getReg(); -namespace llvm { - void initializePPCEarlyReturnPass(PassRegistry&); -} + // FP record forms set CR1 based on the execption status bits, not a + // comparison with zero. + if (OpC == PPC::FCMPUS || OpC == PPC::FCMPUD) + return false; -namespace { - // PPCEarlyReturn pass - For simple functions without epilogue code, move - // returns up, and create conditional returns, to avoid unnecessary - // branch-to-blr sequences. - struct PPCEarlyReturn : public MachineFunctionPass { - static char ID; - PPCEarlyReturn() : MachineFunctionPass(ID) { - initializePPCEarlyReturnPass(*PassRegistry::getPassRegistry()); + // The record forms set the condition register based on a signed comparison + // with zero (so says the ISA manual). This is not as straightforward as it + // seems, however, because this is always a 64-bit comparison on PPC64, even + // for instructions that are 32-bit in nature (like slw for example). + // So, on PPC32, for unsigned comparisons, we can use the record forms only + // for equality checks (as those don't depend on the sign). On PPC64, + // we are restricted to equality for unsigned 64-bit comparisons and for + // signed 32-bit comparisons the applicability is more restricted. + bool isPPC64 = Subtarget.isPPC64(); + bool is32BitSignedCompare = OpC == PPC::CMPWI || OpC == PPC::CMPW; + bool is32BitUnsignedCompare = OpC == PPC::CMPLWI || OpC == PPC::CMPLW; + bool is64BitUnsignedCompare = OpC == PPC::CMPLDI || OpC == PPC::CMPLD; + + // Get the unique definition of SrcReg. + MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); + if (!MI) return false; + int MIOpC = MI->getOpcode(); + + bool equalityOnly = false; + bool noSub = false; + if (isPPC64) { + if (is32BitSignedCompare) { + // We can perform this optimization only if MI is sign-extending. + if (MIOpC == PPC::SRAW || MIOpC == PPC::SRAWo || + MIOpC == PPC::SRAWI || MIOpC == PPC::SRAWIo || + MIOpC == PPC::EXTSB || MIOpC == PPC::EXTSBo || + MIOpC == PPC::EXTSH || MIOpC == PPC::EXTSHo || + MIOpC == PPC::EXTSW || MIOpC == PPC::EXTSWo) { + noSub = true; + } else + return false; + } else if (is32BitUnsignedCompare) { + // We can perform this optimization, equality only, if MI is + // zero-extending. + if (MIOpC == PPC::CNTLZW || MIOpC == PPC::CNTLZWo || + MIOpC == PPC::SLW || MIOpC == PPC::SLWo || + MIOpC == PPC::SRW || MIOpC == PPC::SRWo) { + noSub = true; + equalityOnly = true; + } else + return false; + } else + equalityOnly = is64BitUnsignedCompare; + } else + equalityOnly = is32BitUnsignedCompare; + + if (equalityOnly) { + // We need to check the uses of the condition register in order to reject + // non-equality comparisons. + for (MachineRegisterInfo::use_instr_iterator I =MRI->use_instr_begin(CRReg), + IE = MRI->use_instr_end(); I != IE; ++I) { + MachineInstr *UseMI = &*I; + if (UseMI->getOpcode() == PPC::BCC) { + unsigned Pred = UseMI->getOperand(0).getImm(); + if (Pred != PPC::PRED_EQ && Pred != PPC::PRED_NE) + return false; + } else if (UseMI->getOpcode() == PPC::ISEL || + UseMI->getOpcode() == PPC::ISEL8) { + unsigned SubIdx = UseMI->getOperand(3).getSubReg(); + if (SubIdx != PPC::sub_eq) + return false; + } else + return false; } + } - const PPCTargetMachine *TM; - const PPCInstrInfo *TII; - -protected: - bool processBlock(MachineBasicBlock &ReturnMBB) { - bool Changed = false; - - MachineBasicBlock::iterator I = ReturnMBB.begin(); - I = ReturnMBB.SkipPHIsAndLabels(I); - - // The block must be essentially empty except for the blr. - if (I == ReturnMBB.end() || I->getOpcode() != PPC::BLR || - I != ReturnMBB.getLastNonDebugInstr()) - return Changed; - - SmallVector PredToRemove; - for (MachineBasicBlock::pred_iterator PI = ReturnMBB.pred_begin(), - PIE = ReturnMBB.pred_end(); PI != PIE; ++PI) { - bool OtherReference = false, BlockChanged = false; - for (MachineBasicBlock::iterator J = (*PI)->getLastNonDebugInstr();;) { - if (J->getOpcode() == PPC::B) { - if (J->getOperand(0).getMBB() == &ReturnMBB) { - // This is an unconditional branch to the return. Replace the - // branch with a blr. - BuildMI(**PI, J, J->getDebugLoc(), TII->get(PPC::BLR)); - MachineBasicBlock::iterator K = J--; - K->eraseFromParent(); - BlockChanged = true; - ++NumBLR; - continue; - } - } else if (J->getOpcode() == PPC::BCC) { - if (J->getOperand(2).getMBB() == &ReturnMBB) { - // This is a conditional branch to the return. Replace the branch - // with a bclr. - BuildMI(**PI, J, J->getDebugLoc(), TII->get(PPC::BCLR)) - .addImm(J->getOperand(0).getImm()) - .addReg(J->getOperand(1).getReg()); - MachineBasicBlock::iterator K = J--; - K->eraseFromParent(); - BlockChanged = true; - ++NumBCLR; - continue; - } - } else if (J->isBranch()) { - if (J->isIndirectBranch()) { - if (ReturnMBB.hasAddressTaken()) - OtherReference = true; - } else - for (unsigned i = 0; i < J->getNumOperands(); ++i) - if (J->getOperand(i).isMBB() && - J->getOperand(i).getMBB() == &ReturnMBB) - OtherReference = true; - } else if (!J->isTerminator() && !J->isDebugValue()) - break; - - if (J == (*PI)->begin()) - break; - - --J; - } - - if ((*PI)->canFallThrough() && (*PI)->isLayoutSuccessor(&ReturnMBB)) - OtherReference = true; + MachineBasicBlock::iterator I = CmpInstr; + + // Scan forward to find the first use of the compare. + for (MachineBasicBlock::iterator EL = CmpInstr->getParent()->end(); + I != EL; ++I) { + bool FoundUse = false; + for (MachineRegisterInfo::use_instr_iterator J =MRI->use_instr_begin(CRReg), + JE = MRI->use_instr_end(); J != JE; ++J) + if (&*J == &*I) { + FoundUse = true; + break; + } - // Predecessors are stored in a vector and can't be removed here. - if (!OtherReference && BlockChanged) { - PredToRemove.push_back(*PI); - } + if (FoundUse) + break; + } - if (BlockChanged) - Changed = true; - } + // There are two possible candidates which can be changed to set CR[01]. + // One is MI, the other is a SUB instruction. + // For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1). + MachineInstr *Sub = nullptr; + if (SrcReg2 != 0) + // MI is not a candidate for CMPrr. + MI = nullptr; + // FIXME: Conservatively refuse to convert an instruction which isn't in the + // same BB as the comparison. This is to allow the check below to avoid calls + // (and other explicit clobbers); instead we should really check for these + // more explicitly (in at least a few predecessors). + else if (MI->getParent() != CmpInstr->getParent() || Value != 0) { + // PPC does not have a record-form SUBri. + return false; + } - for (unsigned i = 0, ie = PredToRemove.size(); i != ie; ++i) - PredToRemove[i]->removeSuccessor(&ReturnMBB); - - if (Changed && !ReturnMBB.hasAddressTaken()) { - // We now might be able to merge this blr-only block into its - // by-layout predecessor. - if (ReturnMBB.pred_size() == 1 && - (*ReturnMBB.pred_begin())->isLayoutSuccessor(&ReturnMBB)) { - // Move the blr into the preceding block. - MachineBasicBlock &PrevMBB = **ReturnMBB.pred_begin(); - PrevMBB.splice(PrevMBB.end(), &ReturnMBB, I); - PrevMBB.removeSuccessor(&ReturnMBB); - } + // Search for Sub. + const TargetRegisterInfo *TRI = &getRegisterInfo(); + --I; - if (ReturnMBB.pred_empty()) - ReturnMBB.eraseFromParent(); - } + // Get ready to iterate backward from CmpInstr. + MachineBasicBlock::iterator E = MI, + B = CmpInstr->getParent()->begin(); + + for (; I != E && !noSub; --I) { + const MachineInstr &Instr = *I; + unsigned IOpC = Instr.getOpcode(); + + if (&*I != CmpInstr && ( + Instr.modifiesRegister(PPC::CR0, TRI) || + Instr.readsRegister(PPC::CR0, TRI))) + // This instruction modifies or uses the record condition register after + // the one we want to change. While we could do this transformation, it + // would likely not be profitable. This transformation removes one + // instruction, and so even forcing RA to generate one move probably + // makes it unprofitable. + return false; - return Changed; + // Check whether CmpInstr can be made redundant by the current instruction. + if ((OpC == PPC::CMPW || OpC == PPC::CMPLW || + OpC == PPC::CMPD || OpC == PPC::CMPLD) && + (IOpC == PPC::SUBF || IOpC == PPC::SUBF8) && + ((Instr.getOperand(1).getReg() == SrcReg && + Instr.getOperand(2).getReg() == SrcReg2) || + (Instr.getOperand(1).getReg() == SrcReg2 && + Instr.getOperand(2).getReg() == SrcReg))) { + Sub = &*I; + break; } -public: - virtual bool runOnMachineFunction(MachineFunction &MF) { - TM = static_cast(&MF.getTarget()); - TII = TM->getInstrInfo(); + if (I == B) + // The 'and' is below the comparison instruction. + return false; + } + + // Return false if no candidates exist. + if (!MI && !Sub) + return false; - bool Changed = false; + // The single candidate is called MI. + if (!MI) MI = Sub; + + int NewOpC = -1; + MIOpC = MI->getOpcode(); + if (MIOpC == PPC::ANDIo || MIOpC == PPC::ANDIo8) + NewOpC = MIOpC; + else { + NewOpC = PPC::getRecordFormOpcode(MIOpC); + if (NewOpC == -1 && PPC::getNonRecordFormOpcode(MIOpC) != -1) + NewOpC = MIOpC; + } - // If the function does not have at least two blocks, then there is - // nothing to do. - if (MF.size() < 2) - return Changed; + // FIXME: On the non-embedded POWER architectures, only some of the record + // forms are fast, and we should use only the fast ones. - for (MachineFunction::iterator I = MF.begin(); I != MF.end();) { - MachineBasicBlock &B = *I++; - if (processBlock(B)) - Changed = true; - } + // The defining instruction has a record form (or is already a record + // form). It is possible, however, that we'll need to reverse the condition + // code of the users. + if (NewOpC == -1) + return false; - return Changed; - } + SmallVector, 4> PredsToUpdate; + SmallVector, 4> SubRegsToUpdate; + + // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based on CMP + // needs to be updated to be based on SUB. Push the condition code + // operands to OperandsToUpdate. If it is safe to remove CmpInstr, the + // condition code of these operands will be modified. + bool ShouldSwap = false; + if (Sub) { + ShouldSwap = SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 && + Sub->getOperand(2).getReg() == SrcReg; + + // The operands to subf are the opposite of sub, so only in the fixed-point + // case, invert the order. + ShouldSwap = !ShouldSwap; + } - virtual void getAnalysisUsage(AnalysisUsage &AU) const { - MachineFunctionPass::getAnalysisUsage(AU); + if (ShouldSwap) + for (MachineRegisterInfo::use_instr_iterator + I = MRI->use_instr_begin(CRReg), IE = MRI->use_instr_end(); + I != IE; ++I) { + MachineInstr *UseMI = &*I; + if (UseMI->getOpcode() == PPC::BCC) { + PPC::Predicate Pred = (PPC::Predicate) UseMI->getOperand(0).getImm(); + assert((!equalityOnly || + Pred == PPC::PRED_EQ || Pred == PPC::PRED_NE) && + "Invalid predicate for equality-only optimization"); + PredsToUpdate.push_back(std::make_pair(&(UseMI->getOperand(0)), + PPC::getSwappedPredicate(Pred))); + } else if (UseMI->getOpcode() == PPC::ISEL || + UseMI->getOpcode() == PPC::ISEL8) { + unsigned NewSubReg = UseMI->getOperand(3).getSubReg(); + assert((!equalityOnly || NewSubReg == PPC::sub_eq) && + "Invalid CR bit for equality-only optimization"); + + if (NewSubReg == PPC::sub_lt) + NewSubReg = PPC::sub_gt; + else if (NewSubReg == PPC::sub_gt) + NewSubReg = PPC::sub_lt; + + SubRegsToUpdate.push_back(std::make_pair(&(UseMI->getOperand(3)), + NewSubReg)); + } else // We need to abort on a user we don't understand. + return false; } - }; + + // Create a new virtual register to hold the value of the CR set by the + // record-form instruction. If the instruction was not previously in + // record form, then set the kill flag on the CR. + CmpInstr->eraseFromParent(); + + MachineBasicBlock::iterator MII = MI; + BuildMI(*MI->getParent(), std::next(MII), MI->getDebugLoc(), + get(TargetOpcode::COPY), CRReg) + .addReg(PPC::CR0, MIOpC != NewOpC ? RegState::Kill : 0); + + if (MIOpC != NewOpC) { + // We need to be careful here: we're replacing one instruction with + // another, and we need to make sure that we get all of the right + // implicit uses and defs. On the other hand, the caller may be holding + // an iterator to this instruction, and so we can't delete it (this is + // specifically the case if this is the instruction directly after the + // compare). + + const MCInstrDesc &NewDesc = get(NewOpC); + MI->setDesc(NewDesc); + + if (NewDesc.ImplicitDefs) + for (const MCPhysReg *ImpDefs = NewDesc.getImplicitDefs(); + *ImpDefs; ++ImpDefs) + if (!MI->definesRegister(*ImpDefs)) + MI->addOperand(*MI->getParent()->getParent(), + MachineOperand::CreateReg(*ImpDefs, true, true)); + if (NewDesc.ImplicitUses) + for (const MCPhysReg *ImpUses = NewDesc.getImplicitUses(); + *ImpUses; ++ImpUses) + if (!MI->readsRegister(*ImpUses)) + MI->addOperand(*MI->getParent()->getParent(), + MachineOperand::CreateReg(*ImpUses, false, true)); + } + + // Modify the condition code of operands in OperandsToUpdate. + // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to + // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. + for (unsigned i = 0, e = PredsToUpdate.size(); i < e; i++) + PredsToUpdate[i].first->setImm(PredsToUpdate[i].second); + + for (unsigned i = 0, e = SubRegsToUpdate.size(); i < e; i++) + SubRegsToUpdate[i].first->setSubReg(SubRegsToUpdate[i].second); + + return true; +} + +/// GetInstSize - Return the number of bytes of code the specified +/// instruction may be. This returns the maximum number of bytes. +/// +unsigned PPCInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { + unsigned Opcode = MI->getOpcode(); + + if (Opcode == PPC::INLINEASM) { + const MachineFunction *MF = MI->getParent()->getParent(); + const char *AsmStr = MI->getOperand(0).getSymbolName(); + return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); + } else if (Opcode == TargetOpcode::STACKMAP) { + return MI->getOperand(1).getImm(); + } else if (Opcode == TargetOpcode::PATCHPOINT) { + PatchPointOpers Opers(MI); + return Opers.getMetaOper(PatchPointOpers::NBytesPos).getImm(); + } else { + const MCInstrDesc &Desc = get(Opcode); + return Desc.getSize(); + } } -INITIALIZE_PASS(PPCEarlyReturn, DEBUG_TYPE, - "PowerPC Early-Return Creation", false, false) +std::pair +PPCInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { + const unsigned Mask = PPCII::MO_ACCESS_MASK; + return std::make_pair(TF & Mask, TF & ~Mask); +} -char PPCEarlyReturn::ID = 0; -FunctionPass* -llvm::createPPCEarlyReturnPass() { return new PPCEarlyReturn(); } +ArrayRef> +PPCInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { + using namespace PPCII; + static const std::pair TargetFlags[] = { + {MO_LO, "ppc-lo"}, + {MO_HA, "ppc-ha"}, + {MO_TPREL_LO, "ppc-tprel-lo"}, + {MO_TPREL_HA, "ppc-tprel-ha"}, + {MO_DTPREL_LO, "ppc-dtprel-lo"}, + {MO_TLSLD_LO, "ppc-tlsld-lo"}, + {MO_TOC_LO, "ppc-toc-lo"}, + {MO_TLS, "ppc-tls"}}; + return makeArrayRef(TargetFlags); +} + +ArrayRef> +PPCInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const { + using namespace PPCII; + static const std::pair TargetFlags[] = { + {MO_PLT_OR_STUB, "ppc-plt-or-stub"}, + {MO_PIC_FLAG, "ppc-pic"}, + {MO_NLP_FLAG, "ppc-nlp"}, + {MO_NLP_HIDDEN_FLAG, "ppc-nlp-hidden"}}; + return makeArrayRef(TargetFlags); +}