X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FPowerPC%2FPPCInstrInfo.cpp;h=79c399b71ae865d7b3be0a675b7db496e9e797fc;hb=f606a6ed992d5e4e2877419b51e2a9b540b5e3f0;hp=1f98fe62126b0cfa28f433f00c8234873d26086b;hpb=7e77dabbd08f5822a8b95fe98ee1a48072aa0f1c;p=oota-llvm.git diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp index 1f98fe62126..79c399b71ae 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -20,24 +20,32 @@ #include "PPCTargetMachine.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen/LiveIntervalAnalysis.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/PseudoSourceValue.h" +#include "llvm/CodeGen/ScheduleDAG.h" +#include "llvm/CodeGen/SlotIndexes.h" +#include "llvm/CodeGen/StackMaps.h" #include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCInst.h" #include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Support/raw_ostream.h" +using namespace llvm; + +#define DEBUG_TYPE "ppc-instr-info" + #define GET_INSTRMAP_INFO #define GET_INSTRINFO_CTOR_DTOR #include "PPCGenInstrInfo.inc" -using namespace llvm; - static cl:: opt DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden, cl::desc("Disable analysis for CTR loops")); @@ -45,49 +53,89 @@ opt DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden, static cl::opt DisableCmpOpt("disable-ppc-cmp-opt", cl::desc("Disable compare instruction optimization"), cl::Hidden); +static cl::opt VSXSelfCopyCrash("crash-on-ppc-vsx-self-copy", +cl::desc("Causes the backend to crash instead of generating a nop VSX copy"), +cl::Hidden); + +static cl::opt +UseOldLatencyCalc("ppc-old-latency-calc", cl::Hidden, + cl::desc("Use the old (incorrect) instruction latency calculation")); + // Pin the vtable to this file. void PPCInstrInfo::anchor() {} -PPCInstrInfo::PPCInstrInfo(PPCTargetMachine &tm) - : PPCGenInstrInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP), - TM(tm), RI(*TM.getSubtargetImpl()) {} +PPCInstrInfo::PPCInstrInfo(PPCSubtarget &STI) + : PPCGenInstrInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP), + Subtarget(STI), RI(STI.getTargetMachine()) {} /// CreateTargetHazardRecognizer - Return the hazard recognizer to use for /// this target when scheduling the DAG. -ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetHazardRecognizer( - const TargetMachine *TM, - const ScheduleDAG *DAG) const { - unsigned Directive = TM->getSubtarget().getDarwinDirective(); +ScheduleHazardRecognizer * +PPCInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, + const ScheduleDAG *DAG) const { + unsigned Directive = + static_cast(STI)->getDarwinDirective(); if (Directive == PPC::DIR_440 || Directive == PPC::DIR_A2 || Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) { - const InstrItineraryData *II = TM->getInstrItineraryData(); + const InstrItineraryData *II = + static_cast(STI)->getInstrItineraryData(); return new ScoreboardHazardRecognizer(II, DAG); } - return TargetInstrInfo::CreateTargetHazardRecognizer(TM, DAG); + return TargetInstrInfo::CreateTargetHazardRecognizer(STI, DAG); } /// CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer /// to use for this target when scheduling the DAG. -ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetPostRAHazardRecognizer( - const InstrItineraryData *II, - const ScheduleDAG *DAG) const { - unsigned Directive = TM.getSubtarget().getDarwinDirective(); +ScheduleHazardRecognizer * +PPCInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, + const ScheduleDAG *DAG) const { + unsigned Directive = + DAG->MF.getSubtarget().getDarwinDirective(); - if (Directive == PPC::DIR_PWR7) + if (Directive == PPC::DIR_PWR7 || Directive == PPC::DIR_PWR8) return new PPCDispatchGroupSBHazardRecognizer(II, DAG); // Most subtargets use a PPC970 recognizer. if (Directive != PPC::DIR_440 && Directive != PPC::DIR_A2 && Directive != PPC::DIR_E500mc && Directive != PPC::DIR_E5500) { - assert(TM.getInstrInfo() && "No InstrInfo?"); + assert(DAG->TII && "No InstrInfo?"); - return new PPCHazardRecognizer970(TM); + return new PPCHazardRecognizer970(*DAG); } return new ScoreboardHazardRecognizer(II, DAG); } +unsigned PPCInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, + const MachineInstr *MI, + unsigned *PredCost) const { + if (!ItinData || UseOldLatencyCalc) + return PPCGenInstrInfo::getInstrLatency(ItinData, MI, PredCost); + + // The default implementation of getInstrLatency calls getStageLatency, but + // getStageLatency does not do the right thing for us. While we have + // itinerary, most cores are fully pipelined, and so the itineraries only + // express the first part of the pipeline, not every stage. Instead, we need + // to use the listed output operand cycle number (using operand 0 here, which + // is an output). + + unsigned Latency = 1; + unsigned DefClass = MI->getDesc().getSchedClass(); + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + const MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg() || !MO.isDef() || MO.isImplicit()) + continue; + + int Cycle = ItinData->getOperandCycle(DefClass, i); + if (Cycle < 0) + continue; + + Latency = std::max(Latency, (unsigned) Cycle); + } + + return Latency; +} int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr *DefMI, unsigned DefIdx, @@ -96,12 +144,14 @@ int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, int Latency = PPCGenInstrInfo::getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx); + if (!DefMI->getParent()) + return Latency; + const MachineOperand &DefMO = DefMI->getOperand(DefIdx); unsigned Reg = DefMO.getReg(); - const TargetRegisterInfo *TRI = &getRegisterInfo(); bool IsRegCR; - if (TRI->isVirtualRegister(Reg)) { + if (TargetRegisterInfo::isVirtualRegister(Reg)) { const MachineRegisterInfo *MRI = &DefMI->getParent()->getParent()->getRegInfo(); IsRegCR = MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRRCRegClass) || @@ -117,7 +167,7 @@ int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, // On some cores, there is an additional delay between writing to a condition // register, and using it from a branch. - unsigned Directive = TM.getSubtarget().getDarwinDirective(); + unsigned Directive = Subtarget.getDarwinDirective(); switch (Directive) { default: break; case PPC::DIR_7400: @@ -130,6 +180,7 @@ int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, case PPC::DIR_PWR6: case PPC::DIR_PWR6X: case PPC::DIR_PWR7: + case PPC::DIR_PWR8: Latency += 2; break; } @@ -138,6 +189,265 @@ int PPCInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, return Latency; } +static bool hasVirtualRegDefsInBasicBlock(const MachineInstr &Inst, + const MachineBasicBlock *MBB) { + const MachineOperand &Op1 = Inst.getOperand(1); + const MachineOperand &Op2 = Inst.getOperand(2); + const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); + + // We need virtual register definitions. + MachineInstr *MI1 = nullptr; + MachineInstr *MI2 = nullptr; + if (Op1.isReg() && TargetRegisterInfo::isVirtualRegister(Op1.getReg())) + MI1 = MRI.getUniqueVRegDef(Op1.getReg()); + if (Op2.isReg() && TargetRegisterInfo::isVirtualRegister(Op2.getReg())) + MI2 = MRI.getUniqueVRegDef(Op2.getReg()); + + // And they need to be in the trace (otherwise, they won't have a depth). + if (MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB) + return true; + + return false; +} + +static bool hasReassocSibling(const MachineInstr &Inst, bool &Commuted) { + const MachineBasicBlock *MBB = Inst.getParent(); + const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); + MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg()); + MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg()); + unsigned AssocOpcode = Inst.getOpcode(); + + // If only one operand has the same opcode and it's the second source operand, + // the operands must be commuted. + Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode; + if (Commuted) + std::swap(MI1, MI2); + + // 1. The previous instruction must be the same type as Inst. + // 2. The previous instruction must have virtual register definitions for its + // operands in the same basic block as Inst. + // 3. The previous instruction's result must only be used by Inst. + if (MI1->getOpcode() == AssocOpcode && + hasVirtualRegDefsInBasicBlock(*MI1, MBB) && + MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg())) + return true; + + return false; +} + +// This function does not list all associative and commutative operations, but +// only those worth feeding through the machine combiner in an attempt to +// reduce the critical path. Mostly, this means floating-point operations, +// because they have high latencies (compared to other operations, such and +// and/or, which are also associative and commutative, but have low latencies). +// +// The concept is that these operations can benefit from this kind of +// transformation: +// +// A = ? op ? +// B = A op X +// C = B op Y +// --> +// A = ? op ? +// B = X op Y +// C = A op B +// +// breaking the dependency between A and B, allowing them to be executed in +// parallel (or back-to-back in a pipeline) instead of depending on each other. +static bool isAssociativeAndCommutative(unsigned Opcode) { + switch (Opcode) { + // FP Add: + case PPC::FADD: + case PPC::FADDS: + // FP Multiply: + case PPC::FMUL: + case PPC::FMULS: + // Altivec Add: + case PPC::VADDFP: + // VSX Add: + case PPC::XSADDDP: + case PPC::XVADDDP: + case PPC::XVADDSP: + case PPC::XSADDSP: + // VSX Multiply: + case PPC::XSMULDP: + case PPC::XVMULDP: + case PPC::XVMULSP: + case PPC::XSMULSP: + // QPX Add: + case PPC::QVFADD: + case PPC::QVFADDS: + case PPC::QVFADDSs: + // QPX Multiply: + case PPC::QVFMUL: + case PPC::QVFMULS: + case PPC::QVFMULSs: + return true; + default: + return false; + } +} + +/// Return true if the input instruction is part of a chain of dependent ops +/// that are suitable for reassociation, otherwise return false. +/// If the instruction's operands must be commuted to have a previous +/// instruction of the same type define the first source operand, Commuted will +/// be set to true. +static bool isReassocCandidate(const MachineInstr &Inst, bool &Commuted) { + // 1. The operation must be associative and commutative. + // 2. The instruction must have virtual register definitions for its + // operands in the same basic block. + // 3. The instruction must have a reassociable sibling. + if (isAssociativeAndCommutative(Inst.getOpcode()) && + hasVirtualRegDefsInBasicBlock(Inst, Inst.getParent()) && + hasReassocSibling(Inst, Commuted)) + return true; + + return false; +} + +bool PPCInstrInfo::getMachineCombinerPatterns(MachineInstr &Root, + SmallVectorImpl &Patterns) const { + // Using the machine combiner in this way is potentially expensive, so + // restrict to when aggressive optimizations are desired. + if (Subtarget.getTargetMachine().getOptLevel() != CodeGenOpt::Aggressive) + return false; + + // FP reassociation is only legal when we don't need strict IEEE semantics. + if (!Root.getParent()->getParent()->getTarget().Options.UnsafeFPMath) + return false; + + // Look for this reassociation pattern: + // B = A op X (Prev) + // C = B op Y (Root) + + // FIXME: We should also match FMA operations here, where we consider the + // 'part' of the FMA, either the addition or the multiplication, paired with + // an actual addition or multiplication. + + bool Commute; + if (isReassocCandidate(Root, Commute)) { + // We found a sequence of instructions that may be suitable for a + // reassociation of operands to increase ILP. Specify each commutation + // possibility for the Prev instruction in the sequence and let the + // machine combiner decide if changing the operands is worthwhile. + if (Commute) { + Patterns.push_back(MachineCombinerPattern::MC_REASSOC_AX_YB); + Patterns.push_back(MachineCombinerPattern::MC_REASSOC_XA_YB); + } else { + Patterns.push_back(MachineCombinerPattern::MC_REASSOC_AX_BY); + Patterns.push_back(MachineCombinerPattern::MC_REASSOC_XA_BY); + } + return true; + } + + return false; +} + +/// Attempt the following reassociation to reduce critical path length: +/// B = A op X (Prev) +/// C = B op Y (Root) +/// ===> +/// B = X op Y +/// C = A op B +static void reassociateOps(MachineInstr &Root, MachineInstr &Prev, + MachineCombinerPattern::MC_PATTERN Pattern, + SmallVectorImpl &InsInstrs, + SmallVectorImpl &DelInstrs, + DenseMap &InstrIdxForVirtReg) { + MachineFunction *MF = Root.getParent()->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); + const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); + const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI); + + // This array encodes the operand index for each parameter because the + // operands may be commuted. Each row corresponds to a pattern value, + // and each column specifies the index of A, B, X, Y. + unsigned OpIdx[4][4] = { + { 1, 1, 2, 2 }, + { 1, 2, 2, 1 }, + { 2, 1, 1, 2 }, + { 2, 2, 1, 1 } + }; + + MachineOperand &OpA = Prev.getOperand(OpIdx[Pattern][0]); + MachineOperand &OpB = Root.getOperand(OpIdx[Pattern][1]); + MachineOperand &OpX = Prev.getOperand(OpIdx[Pattern][2]); + MachineOperand &OpY = Root.getOperand(OpIdx[Pattern][3]); + MachineOperand &OpC = Root.getOperand(0); + + unsigned RegA = OpA.getReg(); + unsigned RegB = OpB.getReg(); + unsigned RegX = OpX.getReg(); + unsigned RegY = OpY.getReg(); + unsigned RegC = OpC.getReg(); + + if (TargetRegisterInfo::isVirtualRegister(RegA)) + MRI.constrainRegClass(RegA, RC); + if (TargetRegisterInfo::isVirtualRegister(RegB)) + MRI.constrainRegClass(RegB, RC); + if (TargetRegisterInfo::isVirtualRegister(RegX)) + MRI.constrainRegClass(RegX, RC); + if (TargetRegisterInfo::isVirtualRegister(RegY)) + MRI.constrainRegClass(RegY, RC); + if (TargetRegisterInfo::isVirtualRegister(RegC)) + MRI.constrainRegClass(RegC, RC); + + // Create a new virtual register for the result of (X op Y) instead of + // recycling RegB because the MachineCombiner's computation of the critical + // path requires a new register definition rather than an existing one. + unsigned NewVR = MRI.createVirtualRegister(RC); + InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); + + unsigned Opcode = Root.getOpcode(); + bool KillA = OpA.isKill(); + bool KillX = OpX.isKill(); + bool KillY = OpY.isKill(); + + // Create new instructions for insertion. + MachineInstrBuilder MIB1 = + BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR) + .addReg(RegX, getKillRegState(KillX)) + .addReg(RegY, getKillRegState(KillY)); + InsInstrs.push_back(MIB1); + + MachineInstrBuilder MIB2 = + BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC) + .addReg(RegA, getKillRegState(KillA)) + .addReg(NewVR, getKillRegState(true)); + InsInstrs.push_back(MIB2); + + // Record old instructions for deletion. + DelInstrs.push_back(&Prev); + DelInstrs.push_back(&Root); +} + +void PPCInstrInfo::genAlternativeCodeSequence( + MachineInstr &Root, + MachineCombinerPattern::MC_PATTERN Pattern, + SmallVectorImpl &InsInstrs, + SmallVectorImpl &DelInstrs, + DenseMap &InstIdxForVirtReg) const { + MachineRegisterInfo &MRI = Root.getParent()->getParent()->getRegInfo(); + + // Select the previous instruction in the sequence based on the input pattern. + MachineInstr *Prev = nullptr; + switch (Pattern) { + case MachineCombinerPattern::MC_REASSOC_AX_BY: + case MachineCombinerPattern::MC_REASSOC_XA_BY: + Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg()); + break; + case MachineCombinerPattern::MC_REASSOC_AX_YB: + case MachineCombinerPattern::MC_REASSOC_XA_YB: + Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg()); + } + assert(Prev && "Unknown pattern for machine combiner"); + + reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg); + return; +} + // Detect 32 -> 64-bit extensions where we may reuse the low sub-register. bool PPCInstrInfo::isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg, unsigned &DstReg, @@ -166,6 +476,9 @@ unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, case PPC::RESTORE_CRBIT: case PPC::LVX: case PPC::LXVD2X: + case PPC::QVLFDX: + case PPC::QVLFSXs: + case PPC::QVLFDXb: case PPC::RESTORE_VRSAVE: // Check for the operands added by addFrameReference (the immediate is the // offset which defaults to 0). @@ -192,6 +505,9 @@ unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr *MI, case PPC::SPILL_CRBIT: case PPC::STVX: case PPC::STXVD2X: + case PPC::QVSTFDX: + case PPC::QVSTFSXs: + case PPC::QVSTFDXb: case PPC::SPILL_VRSAVE: // Check for the operands added by addFrameReference (the immediate is the // offset which defaults to 0). @@ -213,14 +529,16 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { // Normal instructions can be commuted the obvious way. if (MI->getOpcode() != PPC::RLWIMI && - MI->getOpcode() != PPC::RLWIMIo && - MI->getOpcode() != PPC::RLWIMI8 && - MI->getOpcode() != PPC::RLWIMI8o) + MI->getOpcode() != PPC::RLWIMIo) return TargetInstrInfo::commuteInstruction(MI, NewMI); + // Note that RLWIMI can be commuted as a 32-bit instruction, but not as a + // 64-bit instruction (so we don't handle PPC::RLWIMI8 here), because + // changing the relative order of the mask operands might change what happens + // to the high-bits of the mask (and, thus, the result). // Cannot commute if it has a non-zero rotate count. if (MI->getOperand(3).getImm() != 0) - return 0; + return nullptr; // If we have a zero rotate count, we have: // M = mask(MB,ME) @@ -301,18 +619,24 @@ void PPCInstrInfo::insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const { // This function is used for scheduling, and the nop wanted here is the type // that terminates dispatch groups on the POWER cores. - unsigned Directive = TM.getSubtarget().getDarwinDirective(); + unsigned Directive = Subtarget.getDarwinDirective(); unsigned Opcode; switch (Directive) { default: Opcode = PPC::NOP; break; case PPC::DIR_PWR6: Opcode = PPC::NOP_GT_PWR6; break; case PPC::DIR_PWR7: Opcode = PPC::NOP_GT_PWR7; break; + case PPC::DIR_PWR8: Opcode = PPC::NOP_GT_PWR7; break; /* FIXME: Update when P8 InstrScheduling model is ready */ } DebugLoc DL; BuildMI(MBB, MI, DL, get(Opcode)); } +/// getNoopForMachoTarget - Return the noop instruction to use for a noop. +void PPCInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const { + NopInst.setOpcode(PPC::NOP); +} + // Branch analysis. // Note: If the condition register is set to CTR or CTR8 then this is a // BDNZ (imm == 1) or BDZ (imm == 0) branch. @@ -320,18 +644,13 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl &Cond, bool AllowModify) const { - bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); + bool isPPC64 = Subtarget.isPPC64(); // If the block has no terminators, it just falls into the block after it. - MachineBasicBlock::iterator I = MBB.end(); - if (I == MBB.begin()) + MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); + if (I == MBB.end()) return false; - --I; - while (I->isDebugValue()) { - if (I == MBB.begin()) - return false; - --I; - } + if (!isUnpredicatedTerminator(I)) return false; @@ -484,14 +803,10 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, } unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { - MachineBasicBlock::iterator I = MBB.end(); - if (I == MBB.begin()) return 0; - --I; - while (I->isDebugValue()) { - if (I == MBB.begin()) - return 0; - --I; - } + MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr(); + if (I == MBB.end()) + return 0; + if (I->getOpcode() != PPC::B && I->getOpcode() != PPC::BCC && I->getOpcode() != PPC::BC && I->getOpcode() != PPC::BCn && I->getOpcode() != PPC::BDNZ8 && I->getOpcode() != PPC::BDNZ && @@ -519,17 +834,17 @@ unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { unsigned PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, - const SmallVectorImpl &Cond, + ArrayRef Cond, DebugLoc DL) const { // Shouldn't be a fall through. assert(TBB && "InsertBranch must not be told to insert a fallthrough"); assert((Cond.size() == 2 || Cond.size() == 0) && "PPC branch conditions have two components!"); - bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); + bool isPPC64 = Subtarget.isPPC64(); // One-way branch. - if (FBB == 0) { + if (!FBB) { if (Cond.empty()) // Unconditional branch BuildMI(&MBB, DL, get(PPC::B)).addMBB(TBB); else if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8) @@ -564,10 +879,10 @@ PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, // Select analysis. bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, - const SmallVectorImpl &Cond, + ArrayRef Cond, unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles, int &FalseCycles) const { - if (!TM.getSubtargetImpl()->hasISEL()) + if (!Subtarget.hasISEL()) return false; if (Cond.size() != 2) @@ -605,13 +920,12 @@ bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, DebugLoc dl, - unsigned DestReg, - const SmallVectorImpl &Cond, + unsigned DestReg, ArrayRef Cond, unsigned TrueReg, unsigned FalseReg) const { assert(Cond.size() == 2 && "PPC branch conditions have two components!"); - assert(TM.getSubtargetImpl()->hasISEL() && + assert(Subtarget.hasISEL() && "Cannot insert select on target without ISEL support"); // Get the register classes. @@ -668,51 +982,104 @@ void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB, .addReg(Cond[1].getReg(), 0, SubIdx); } +static unsigned getCRBitValue(unsigned CRBit) { + unsigned Ret = 4; + if (CRBit == PPC::CR0LT || CRBit == PPC::CR1LT || + CRBit == PPC::CR2LT || CRBit == PPC::CR3LT || + CRBit == PPC::CR4LT || CRBit == PPC::CR5LT || + CRBit == PPC::CR6LT || CRBit == PPC::CR7LT) + Ret = 3; + if (CRBit == PPC::CR0GT || CRBit == PPC::CR1GT || + CRBit == PPC::CR2GT || CRBit == PPC::CR3GT || + CRBit == PPC::CR4GT || CRBit == PPC::CR5GT || + CRBit == PPC::CR6GT || CRBit == PPC::CR7GT) + Ret = 2; + if (CRBit == PPC::CR0EQ || CRBit == PPC::CR1EQ || + CRBit == PPC::CR2EQ || CRBit == PPC::CR3EQ || + CRBit == PPC::CR4EQ || CRBit == PPC::CR5EQ || + CRBit == PPC::CR6EQ || CRBit == PPC::CR7EQ) + Ret = 1; + if (CRBit == PPC::CR0UN || CRBit == PPC::CR1UN || + CRBit == PPC::CR2UN || CRBit == PPC::CR3UN || + CRBit == PPC::CR4UN || CRBit == PPC::CR5UN || + CRBit == PPC::CR6UN || CRBit == PPC::CR7UN) + Ret = 0; + + assert(Ret != 4 && "Invalid CR bit register"); + return Ret; +} + void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const { // We can end up with self copies and similar things as a result of VSX copy - // legalization. Promote (or just ignore) them here. + // legalization. Promote them here. const TargetRegisterInfo *TRI = &getRegisterInfo(); if (PPC::F8RCRegClass.contains(DestReg) && - PPC::VSLRCRegClass.contains(SrcReg)) { + PPC::VSRCRegClass.contains(SrcReg)) { unsigned SuperReg = TRI->getMatchingSuperReg(DestReg, PPC::sub_64, &PPC::VSRCRegClass); - if (SrcReg == SuperReg) - return; + if (VSXSelfCopyCrash && SrcReg == SuperReg) + llvm_unreachable("nop VSX copy"); DestReg = SuperReg; } else if (PPC::VRRCRegClass.contains(DestReg) && - PPC::VSHRCRegClass.contains(SrcReg)) { + PPC::VSRCRegClass.contains(SrcReg)) { unsigned SuperReg = TRI->getMatchingSuperReg(DestReg, PPC::sub_128, &PPC::VSRCRegClass); - if (SrcReg == SuperReg) - return; + if (VSXSelfCopyCrash && SrcReg == SuperReg) + llvm_unreachable("nop VSX copy"); DestReg = SuperReg; } else if (PPC::F8RCRegClass.contains(SrcReg) && - PPC::VSLRCRegClass.contains(DestReg)) { + PPC::VSRCRegClass.contains(DestReg)) { unsigned SuperReg = TRI->getMatchingSuperReg(SrcReg, PPC::sub_64, &PPC::VSRCRegClass); - if (DestReg == SuperReg) - return; + if (VSXSelfCopyCrash && DestReg == SuperReg) + llvm_unreachable("nop VSX copy"); SrcReg = SuperReg; } else if (PPC::VRRCRegClass.contains(SrcReg) && - PPC::VSHRCRegClass.contains(DestReg)) { + PPC::VSRCRegClass.contains(DestReg)) { unsigned SuperReg = TRI->getMatchingSuperReg(SrcReg, PPC::sub_128, &PPC::VSRCRegClass); - if (DestReg == SuperReg) - return; + if (VSXSelfCopyCrash && DestReg == SuperReg) + llvm_unreachable("nop VSX copy"); SrcReg = SuperReg; } + // Different class register copy + if (PPC::CRBITRCRegClass.contains(SrcReg) && + PPC::GPRCRegClass.contains(DestReg)) { + unsigned CRReg = getCRFromCRBit(SrcReg); + BuildMI(MBB, I, DL, get(PPC::MFOCRF), DestReg) + .addReg(CRReg), getKillRegState(KillSrc); + // Rotate the CR bit in the CR fields to be the least significant bit and + // then mask with 0x1 (MB = ME = 31). + BuildMI(MBB, I, DL, get(PPC::RLWINM), DestReg) + .addReg(DestReg, RegState::Kill) + .addImm(TRI->getEncodingValue(CRReg) * 4 + (4 - getCRBitValue(SrcReg))) + .addImm(31) + .addImm(31); + return; + } else if (PPC::CRRCRegClass.contains(SrcReg) && + PPC::G8RCRegClass.contains(DestReg)) { + BuildMI(MBB, I, DL, get(PPC::MFOCRF8), DestReg) + .addReg(SrcReg), getKillRegState(KillSrc); + return; + } else if (PPC::CRRCRegClass.contains(SrcReg) && + PPC::GPRCRegClass.contains(DestReg)) { + BuildMI(MBB, I, DL, get(PPC::MFOCRF), DestReg) + .addReg(SrcReg), getKillRegState(KillSrc); + return; + } + unsigned Opc; if (PPC::GPRCRegClass.contains(DestReg, SrcReg)) Opc = PPC::OR; @@ -734,6 +1101,15 @@ void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB, // copies are generated, they are close enough to some use that the // lower-latency form is preferable. Opc = PPC::XXLOR; + else if (PPC::VSFRCRegClass.contains(DestReg, SrcReg) || + PPC::VSSRCRegClass.contains(DestReg, SrcReg)) + Opc = PPC::XXLORf; + else if (PPC::QFRCRegClass.contains(DestReg, SrcReg)) + Opc = PPC::QVFMR; + else if (PPC::QSRCRegClass.contains(DestReg, SrcReg)) + Opc = PPC::QVFMRs; + else if (PPC::QBRCRegClass.contains(DestReg, SrcReg)) + Opc = PPC::QVFMRb; else if (PPC::CRBITRCRegClass.contains(DestReg, SrcReg)) Opc = PPC::CROR; else @@ -805,14 +1181,44 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF, getKillRegState(isKill)), FrameIdx)); NonRI = true; + } else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STXSDX)) + .addReg(SrcReg, + getKillRegState(isKill)), + FrameIdx)); + NonRI = true; + } else if (PPC::VSSRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STXSSPX)) + .addReg(SrcReg, + getKillRegState(isKill)), + FrameIdx)); + NonRI = true; } else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) { - assert(TM.getSubtargetImpl()->isDarwin() && + assert(Subtarget.isDarwin() && "VRSAVE only needs spill/restore on Darwin"); NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::SPILL_VRSAVE)) .addReg(SrcReg, getKillRegState(isKill)), FrameIdx)); SpillsVRS = true; + } else if (PPC::QFRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVSTFDX)) + .addReg(SrcReg, + getKillRegState(isKill)), + FrameIdx)); + NonRI = true; + } else if (PPC::QSRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVSTFSXs)) + .addReg(SrcReg, + getKillRegState(isKill)), + FrameIdx)); + NonRI = true; + } else if (PPC::QBRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVSTFDXb)) + .addReg(SrcReg, + getKillRegState(isKill)), + FrameIdx)); + NonRI = true; } else { llvm_unreachable("Unknown regclass!"); } @@ -847,11 +1253,10 @@ PPCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, MBB.insert(MI, NewMIs[i]); const MachineFrameInfo &MFI = *MF.getFrameInfo(); - MachineMemOperand *MMO = - MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), - MachineMemOperand::MOStore, - MFI.getObjectSize(FrameIdx), - MFI.getObjectAlignment(FrameIdx)); + MachineMemOperand *MMO = MF.getMachineMemOperand( + MachinePointerInfo::getFixedStack(MF, FrameIdx), + MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx), + MFI.getObjectAlignment(FrameIdx)); NewMIs.back()->addMemOperand(MF, MMO); } @@ -896,14 +1301,34 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL, NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LXVD2X), DestReg), FrameIdx)); NonRI = true; + } else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LXSDX), DestReg), + FrameIdx)); + NonRI = true; + } else if (PPC::VSSRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LXSSPX), DestReg), + FrameIdx)); + NonRI = true; } else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) { - assert(TM.getSubtargetImpl()->isDarwin() && + assert(Subtarget.isDarwin() && "VRSAVE only needs spill/restore on Darwin"); NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::RESTORE_VRSAVE), DestReg), FrameIdx)); SpillsVRS = true; + } else if (PPC::QFRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVLFDX), DestReg), + FrameIdx)); + NonRI = true; + } else if (PPC::QSRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVLFSXs), DestReg), + FrameIdx)); + NonRI = true; + } else if (PPC::QBRCRegClass.hasSubClassEq(RC)) { + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::QVLFDXb), DestReg), + FrameIdx)); + NonRI = true; } else { llvm_unreachable("Unknown regclass!"); } @@ -940,11 +1365,10 @@ PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, MBB.insert(MI, NewMIs[i]); const MachineFrameInfo &MFI = *MF.getFrameInfo(); - MachineMemOperand *MMO = - MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), - MachineMemOperand::MOLoad, - MFI.getObjectSize(FrameIdx), - MFI.getObjectAlignment(FrameIdx)); + MachineMemOperand *MMO = MF.getMachineMemOperand( + MachinePointerInfo::getFixedStack(MF, FrameIdx), + MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx), + MFI.getObjectAlignment(FrameIdx)); NewMIs.back()->addMemOperand(MF, MMO); } @@ -1011,7 +1435,7 @@ bool PPCInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, unsigned ZeroReg; if (UseInfo->isLookupPtrRegClass()) { - bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); + bool isPPC64 = Subtarget.isPPC64(); ZeroReg = isPPC64 ? PPC::ZERO8 : PPC::ZERO; } else { ZeroReg = UseInfo->RegClass == PPC::G8RC_NOX0RegClassID ? @@ -1072,13 +1496,12 @@ bool PPCInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { return !isPredicated(MI); } -bool PPCInstrInfo::PredicateInstruction( - MachineInstr *MI, - const SmallVectorImpl &Pred) const { +bool PPCInstrInfo::PredicateInstruction(MachineInstr *MI, + ArrayRef Pred) const { unsigned OpC = MI->getOpcode(); - if (OpC == PPC::BLR) { + if (OpC == PPC::BLR || OpC == PPC::BLR8) { if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) { - bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); + bool isPPC64 = Subtarget.isPPC64(); MI->setDesc(get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZLR8 : PPC::BDNZLR) : (isPPC64 ? PPC::BDZLR8 : PPC::BDZLR))); @@ -1100,7 +1523,7 @@ bool PPCInstrInfo::PredicateInstruction( return true; } else if (OpC == PPC::B) { if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) { - bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); + bool isPPC64 = Subtarget.isPPC64(); MI->setDesc(get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) : (isPPC64 ? PPC::BDZ8 : PPC::BDZ))); @@ -1138,7 +1561,7 @@ bool PPCInstrInfo::PredicateInstruction( llvm_unreachable("Cannot predicate bctr[l] on the ctr register"); bool setLR = OpC == PPC::BCTRL || OpC == PPC::BCTRL8; - bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); + bool isPPC64 = Subtarget.isPPC64(); if (Pred[0].getImm() == PPC::PRED_BIT_SET) { MI->setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8) : @@ -1165,9 +1588,8 @@ bool PPCInstrInfo::PredicateInstruction( return false; } -bool PPCInstrInfo::SubsumesPredicate( - const SmallVectorImpl &Pred1, - const SmallVectorImpl &Pred2) const { +bool PPCInstrInfo::SubsumesPredicate(ArrayRef Pred1, + ArrayRef Pred2) const { assert(Pred1.size() == 2 && "Invalid PPC first predicate"); assert(Pred2.size() == 2 && "Invalid PPC second predicate"); @@ -1240,6 +1662,7 @@ bool PPCInstrInfo::isPredicable(MachineInstr *MI) const { return false; case PPC::B: case PPC::BLR: + case PPC::BLR8: case PPC::BCTR: case PPC::BCTR8: case PPC::BCTRL: @@ -1299,7 +1722,7 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr, // for equality checks (as those don't depend on the sign). On PPC64, // we are restricted to equality for unsigned 64-bit comparisons and for // signed 32-bit comparisons the applicability is more restricted. - bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); + bool isPPC64 = Subtarget.isPPC64(); bool is32BitSignedCompare = OpC == PPC::CMPWI || OpC == PPC::CMPW; bool is32BitUnsignedCompare = OpC == PPC::CMPLWI || OpC == PPC::CMPLW; bool is64BitUnsignedCompare = OpC == PPC::CMPLDI || OpC == PPC::CMPLD; @@ -1377,10 +1800,10 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr, // There are two possible candidates which can be changed to set CR[01]. // One is MI, the other is a SUB instruction. // For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1). - MachineInstr *Sub = NULL; + MachineInstr *Sub = nullptr; if (SrcReg2 != 0) // MI is not a candidate for CMPrr. - MI = NULL; + MI = nullptr; // FIXME: Conservatively refuse to convert an instruction which isn't in the // same BB as the comparison. This is to allow the check below to avoid calls // (and other explicit clobbers); instead we should really check for these @@ -1558,308 +1981,46 @@ unsigned PPCInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { const MachineFunction *MF = MI->getParent()->getParent(); const char *AsmStr = MI->getOperand(0).getSymbolName(); return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); + } else if (Opcode == TargetOpcode::STACKMAP) { + return MI->getOperand(1).getImm(); + } else if (Opcode == TargetOpcode::PATCHPOINT) { + PatchPointOpers Opers(MI); + return Opers.getMetaOper(PatchPointOpers::NBytesPos).getImm(); } else { const MCInstrDesc &Desc = get(Opcode); return Desc.getSize(); } } - -#undef DEBUG_TYPE -#define DEBUG_TYPE "ppc-vsx-copy" - -namespace llvm { - void initializePPCVSXCopyPass(PassRegistry&); +std::pair +PPCInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { + const unsigned Mask = PPCII::MO_ACCESS_MASK; + return std::make_pair(TF & Mask, TF & ~Mask); } -namespace { - // PPCVSXCopy pass - For copies between VSX registers and non-VSX registers - // (Altivec and scalar floating-point registers), we need to transform the - // copies into subregister copies with other restrictions. - struct PPCVSXCopy : public MachineFunctionPass { - static char ID; - PPCVSXCopy() : MachineFunctionPass(ID) { - initializePPCVSXCopyPass(*PassRegistry::getPassRegistry()); - } - - const PPCTargetMachine *TM; - const PPCInstrInfo *TII; - - bool IsRegInClass(unsigned Reg, const TargetRegisterClass *RC, - MachineRegisterInfo &MRI) { - if (TargetRegisterInfo::isVirtualRegister(Reg)) { - return RC->hasSubClassEq(MRI.getRegClass(Reg)); - } else if (RC->contains(Reg)) { - return true; - } - - return false; - } - - bool IsVSReg(unsigned Reg, MachineRegisterInfo &MRI) { - return IsRegInClass(Reg, &PPC::VSRCRegClass, MRI); - } - - bool IsVRReg(unsigned Reg, MachineRegisterInfo &MRI) { - return IsRegInClass(Reg, &PPC::VRRCRegClass, MRI); - } - - bool IsF8Reg(unsigned Reg, MachineRegisterInfo &MRI) { - return IsRegInClass(Reg, &PPC::F8RCRegClass, MRI); - } - -protected: - bool processBlock(MachineBasicBlock &MBB) { - bool Changed = false; - - MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); - for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end(); - I != IE; ++I) { - MachineInstr *MI = I; - if (!MI->isFullCopy()) - continue; - - MachineOperand &DstMO = MI->getOperand(0); - MachineOperand &SrcMO = MI->getOperand(1); - - if ( IsVSReg(DstMO.getReg(), MRI) && - !IsVSReg(SrcMO.getReg(), MRI)) { - // This is a copy *to* a VSX register from a non-VSX register. - Changed = true; - - const TargetRegisterClass *SrcRC = - IsVRReg(SrcMO.getReg(), MRI) ? &PPC::VSHRCRegClass : - &PPC::VSLRCRegClass; - assert((IsF8Reg(SrcMO.getReg(), MRI) || - IsVRReg(SrcMO.getReg(), MRI)) && - "Unknown source for a VSX copy"); - - unsigned NewVReg = MRI.createVirtualRegister(SrcRC); - BuildMI(MBB, MI, MI->getDebugLoc(), - TII->get(TargetOpcode::SUBREG_TO_REG), NewVReg) - .addImm(1) // add 1, not 0, because there is no implicit clearing - // of the high bits. - .addOperand(SrcMO) - .addImm(IsVRReg(SrcMO.getReg(), MRI) ? PPC::sub_128 : - PPC::sub_64); - - // The source of the original copy is now the new virtual register. - SrcMO.setReg(NewVReg); - } else if (!IsVSReg(DstMO.getReg(), MRI) && - IsVSReg(SrcMO.getReg(), MRI)) { - // This is a copy *from* a VSX register to a non-VSX register. - Changed = true; - - const TargetRegisterClass *DstRC = - IsVRReg(DstMO.getReg(), MRI) ? &PPC::VSHRCRegClass : - &PPC::VSLRCRegClass; - assert((IsF8Reg(DstMO.getReg(), MRI) || - IsVRReg(DstMO.getReg(), MRI)) && - "Unknown destination for a VSX copy"); - - // Copy the VSX value into a new VSX register of the correct subclass. - unsigned NewVReg = MRI.createVirtualRegister(DstRC); - BuildMI(MBB, MI, MI->getDebugLoc(), - TII->get(TargetOpcode::COPY), NewVReg) - .addOperand(SrcMO); - - // Transform the original copy into a subregister extraction copy. - SrcMO.setReg(NewVReg); - SrcMO.setSubReg(IsVRReg(DstMO.getReg(), MRI) ? PPC::sub_128 : - PPC::sub_64); - } - } - - return Changed; - } - -public: - virtual bool runOnMachineFunction(MachineFunction &MF) { - TM = static_cast(&MF.getTarget()); - TII = TM->getInstrInfo(); - - bool Changed = false; - - for (MachineFunction::iterator I = MF.begin(); I != MF.end();) { - MachineBasicBlock &B = *I++; - if (processBlock(B)) - Changed = true; - } - - return Changed; - } - - virtual void getAnalysisUsage(AnalysisUsage &AU) const { - MachineFunctionPass::getAnalysisUsage(AU); - } - }; +ArrayRef> +PPCInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { + using namespace PPCII; + static const std::pair TargetFlags[] = { + {MO_LO, "ppc-lo"}, + {MO_HA, "ppc-ha"}, + {MO_TPREL_LO, "ppc-tprel-lo"}, + {MO_TPREL_HA, "ppc-tprel-ha"}, + {MO_DTPREL_LO, "ppc-dtprel-lo"}, + {MO_TLSLD_LO, "ppc-tlsld-lo"}, + {MO_TOC_LO, "ppc-toc-lo"}, + {MO_TLS, "ppc-tls"}}; + return makeArrayRef(TargetFlags); } -INITIALIZE_PASS(PPCVSXCopy, DEBUG_TYPE, - "PowerPC VSX Copy Legalization", false, false) - -char PPCVSXCopy::ID = 0; -FunctionPass* -llvm::createPPCVSXCopyPass() { return new PPCVSXCopy(); } - -#undef DEBUG_TYPE -#define DEBUG_TYPE "ppc-early-ret" -STATISTIC(NumBCLR, "Number of early conditional returns"); -STATISTIC(NumBLR, "Number of early returns"); - -namespace llvm { - void initializePPCEarlyReturnPass(PassRegistry&); +ArrayRef> +PPCInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const { + using namespace PPCII; + static const std::pair TargetFlags[] = { + {MO_PLT_OR_STUB, "ppc-plt-or-stub"}, + {MO_PIC_FLAG, "ppc-pic"}, + {MO_NLP_FLAG, "ppc-nlp"}, + {MO_NLP_HIDDEN_FLAG, "ppc-nlp-hidden"}}; + return makeArrayRef(TargetFlags); } -namespace { - // PPCEarlyReturn pass - For simple functions without epilogue code, move - // returns up, and create conditional returns, to avoid unnecessary - // branch-to-blr sequences. - struct PPCEarlyReturn : public MachineFunctionPass { - static char ID; - PPCEarlyReturn() : MachineFunctionPass(ID) { - initializePPCEarlyReturnPass(*PassRegistry::getPassRegistry()); - } - - const PPCTargetMachine *TM; - const PPCInstrInfo *TII; - -protected: - bool processBlock(MachineBasicBlock &ReturnMBB) { - bool Changed = false; - - MachineBasicBlock::iterator I = ReturnMBB.begin(); - I = ReturnMBB.SkipPHIsAndLabels(I); - - // The block must be essentially empty except for the blr. - if (I == ReturnMBB.end() || I->getOpcode() != PPC::BLR || - I != ReturnMBB.getLastNonDebugInstr()) - return Changed; - - SmallVector PredToRemove; - for (MachineBasicBlock::pred_iterator PI = ReturnMBB.pred_begin(), - PIE = ReturnMBB.pred_end(); PI != PIE; ++PI) { - bool OtherReference = false, BlockChanged = false; - for (MachineBasicBlock::iterator J = (*PI)->getLastNonDebugInstr();;) { - if (J->getOpcode() == PPC::B) { - if (J->getOperand(0).getMBB() == &ReturnMBB) { - // This is an unconditional branch to the return. Replace the - // branch with a blr. - BuildMI(**PI, J, J->getDebugLoc(), TII->get(PPC::BLR)); - MachineBasicBlock::iterator K = J--; - K->eraseFromParent(); - BlockChanged = true; - ++NumBLR; - continue; - } - } else if (J->getOpcode() == PPC::BCC) { - if (J->getOperand(2).getMBB() == &ReturnMBB) { - // This is a conditional branch to the return. Replace the branch - // with a bclr. - BuildMI(**PI, J, J->getDebugLoc(), TII->get(PPC::BCCLR)) - .addImm(J->getOperand(0).getImm()) - .addReg(J->getOperand(1).getReg()); - MachineBasicBlock::iterator K = J--; - K->eraseFromParent(); - BlockChanged = true; - ++NumBCLR; - continue; - } - } else if (J->getOpcode() == PPC::BC || J->getOpcode() == PPC::BCn) { - if (J->getOperand(1).getMBB() == &ReturnMBB) { - // This is a conditional branch to the return. Replace the branch - // with a bclr. - BuildMI(**PI, J, J->getDebugLoc(), - TII->get(J->getOpcode() == PPC::BC ? - PPC::BCLR : PPC::BCLRn)) - .addReg(J->getOperand(0).getReg()); - MachineBasicBlock::iterator K = J--; - K->eraseFromParent(); - BlockChanged = true; - ++NumBCLR; - continue; - } - } else if (J->isBranch()) { - if (J->isIndirectBranch()) { - if (ReturnMBB.hasAddressTaken()) - OtherReference = true; - } else - for (unsigned i = 0; i < J->getNumOperands(); ++i) - if (J->getOperand(i).isMBB() && - J->getOperand(i).getMBB() == &ReturnMBB) - OtherReference = true; - } else if (!J->isTerminator() && !J->isDebugValue()) - break; - - if (J == (*PI)->begin()) - break; - - --J; - } - - if ((*PI)->canFallThrough() && (*PI)->isLayoutSuccessor(&ReturnMBB)) - OtherReference = true; - - // Predecessors are stored in a vector and can't be removed here. - if (!OtherReference && BlockChanged) { - PredToRemove.push_back(*PI); - } - - if (BlockChanged) - Changed = true; - } - - for (unsigned i = 0, ie = PredToRemove.size(); i != ie; ++i) - PredToRemove[i]->removeSuccessor(&ReturnMBB); - - if (Changed && !ReturnMBB.hasAddressTaken()) { - // We now might be able to merge this blr-only block into its - // by-layout predecessor. - if (ReturnMBB.pred_size() == 1 && - (*ReturnMBB.pred_begin())->isLayoutSuccessor(&ReturnMBB)) { - // Move the blr into the preceding block. - MachineBasicBlock &PrevMBB = **ReturnMBB.pred_begin(); - PrevMBB.splice(PrevMBB.end(), &ReturnMBB, I); - PrevMBB.removeSuccessor(&ReturnMBB); - } - - if (ReturnMBB.pred_empty()) - ReturnMBB.eraseFromParent(); - } - - return Changed; - } - -public: - virtual bool runOnMachineFunction(MachineFunction &MF) { - TM = static_cast(&MF.getTarget()); - TII = TM->getInstrInfo(); - - bool Changed = false; - - // If the function does not have at least two blocks, then there is - // nothing to do. - if (MF.size() < 2) - return Changed; - - for (MachineFunction::iterator I = MF.begin(); I != MF.end();) { - MachineBasicBlock &B = *I++; - if (processBlock(B)) - Changed = true; - } - - return Changed; - } - - virtual void getAnalysisUsage(AnalysisUsage &AU) const { - MachineFunctionPass::getAnalysisUsage(AU); - } - }; -} - -INITIALIZE_PASS(PPCEarlyReturn, DEBUG_TYPE, - "PowerPC Early-Return Creation", false, false) - -char PPCEarlyReturn::ID = 0; -FunctionPass* -llvm::createPPCEarlyReturnPass() { return new PPCEarlyReturn(); }