X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FTargetInstrInfo.cpp;h=97ca0253d3763199a3490479ac9ad52247b4b34f;hb=e235dc4936fce732299d07dda2d840a3cc0e87c2;hp=24141afd5ccc6d6d9c0f8ad929918f6e97d4bfa0;hpb=0d1521330705827822808f6f321b9cd371594569;p=oota-llvm.git diff --git a/lib/CodeGen/TargetInstrInfo.cpp b/lib/CodeGen/TargetInstrInfo.cpp index 24141afd5cc..97ca0253d37 100644 --- a/lib/CodeGen/TargetInstrInfo.cpp +++ b/lib/CodeGen/TargetInstrInfo.cpp @@ -19,12 +19,14 @@ #include "llvm/CodeGen/PseudoSourceValue.h" #include "llvm/CodeGen/ScoreboardHazardRecognizer.h" #include "llvm/CodeGen/StackMaps.h" +#include "llvm/CodeGen/TargetSchedule.h" #include "llvm/IR/DataLayout.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCInstrItineraries.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetRegisterInfo.h" @@ -141,6 +143,10 @@ MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI, unsigned SubReg2 = MI->getOperand(Idx2).getSubReg(); bool Reg1IsKill = MI->getOperand(Idx1).isKill(); bool Reg2IsKill = MI->getOperand(Idx2).isKill(); + bool Reg1IsUndef = MI->getOperand(Idx1).isUndef(); + bool Reg2IsUndef = MI->getOperand(Idx2).isUndef(); + bool Reg1IsInternal = MI->getOperand(Idx1).isInternalRead(); + bool Reg2IsInternal = MI->getOperand(Idx2).isInternalRead(); // If destination is tied to either of the commuted source register, then // it must be updated. if (HasDef && Reg0 == Reg1 && @@ -171,6 +177,10 @@ MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI, MI->getOperand(Idx1).setSubReg(SubReg2); MI->getOperand(Idx2).setIsKill(Reg1IsKill); MI->getOperand(Idx1).setIsKill(Reg2IsKill); + MI->getOperand(Idx2).setIsUndef(Reg1IsUndef); + MI->getOperand(Idx1).setIsUndef(Reg2IsUndef); + MI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal); + MI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal); return MI; } @@ -210,9 +220,8 @@ TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { return !isPredicated(MI); } - -bool TargetInstrInfo::PredicateInstruction(MachineInstr *MI, - const SmallVectorImpl &Pred) const { +bool TargetInstrInfo::PredicateInstruction( + MachineInstr *MI, ArrayRef Pred) const { bool MadeChange = false; assert(!MI->isBundle() && @@ -284,21 +293,20 @@ bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI, bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx, unsigned &Size, unsigned &Offset, - const TargetMachine *TM) const { + const MachineFunction &MF) const { if (!SubIdx) { Size = RC->getSize(); Offset = 0; return true; } - unsigned BitSize = - TM->getSubtargetImpl()->getRegisterInfo()->getSubRegIdxSize(SubIdx); + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); + unsigned BitSize = TRI->getSubRegIdxSize(SubIdx); // Convert bit size to byte size to be consistent with // MCRegisterClass::getSize(). if (BitSize % 8) return false; - int BitOffset = - TM->getSubtargetImpl()->getRegisterInfo()->getSubRegIdxOffset(SubIdx); + int BitOffset = TRI->getSubRegIdxOffset(SubIdx); if (BitOffset < 0 || BitOffset % 8) return false; @@ -307,7 +315,7 @@ bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC, assert(RC->getSize() >= (Offset + Size) && "bad subregister range"); - if (!TM->getSubtargetImpl()->getDataLayout()->isLittleEndian()) { + if (!MF.getTarget().getDataLayout()->isLittleEndian()) { Offset = RC->getSize() - (Offset + Size); } return true; @@ -372,16 +380,17 @@ static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI, return nullptr; } -bool TargetInstrInfo:: -canFoldMemoryOperand(const MachineInstr *MI, - const SmallVectorImpl &Ops) const { +void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const { + llvm_unreachable("Not a MachO target"); +} + +bool TargetInstrInfo::canFoldMemoryOperand(const MachineInstr *MI, + ArrayRef Ops) const { return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]); } -static MachineInstr* foldPatchpoint(MachineFunction &MF, - MachineInstr *MI, - const SmallVectorImpl &Ops, - int FrameIndex, +static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI, + ArrayRef Ops, int FrameIndex, const TargetInstrInfo &TII) { unsigned StartIdx = 0; switch (MI->getOpcode()) { @@ -400,9 +409,8 @@ static MachineInstr* foldPatchpoint(MachineFunction &MF, // Return false if any operands requested for folding are not foldable (not // part of the stackmap's live values). - for (SmallVectorImpl::const_iterator I = Ops.begin(), E = Ops.end(); - I != E; ++I) { - if (*I < StartIdx) + for (unsigned Op : Ops) { + if (Op < StartIdx) return nullptr; } @@ -422,8 +430,8 @@ static MachineInstr* foldPatchpoint(MachineFunction &MF, // Compute the spill slot size and offset. const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(MO.getReg()); - bool Valid = TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, - SpillOffset, &MF.getTarget()); + bool Valid = + TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF); if (!Valid) report_fatal_error("cannot spill patchpoint subregister operand"); MIB.addImm(StackMaps::IndirectMemRefOp); @@ -443,10 +451,9 @@ static MachineInstr* foldPatchpoint(MachineFunction &MF, /// operand folded, otherwise NULL is returned. The client is responsible for /// removing the old instruction and adding the new one in the instruction /// stream. -MachineInstr* -TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, - const SmallVectorImpl &Ops, - int FI) const { +MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, + ArrayRef Ops, + int FI) const { unsigned Flags = 0; for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (MI->getOperand(Ops[i]).isDef()) @@ -464,11 +471,13 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, MI->getOpcode() == TargetOpcode::PATCHPOINT) { // Fold stackmap/patchpoint. NewMI = foldPatchpoint(MF, MI, Ops, FI, *this); + if (NewMI) + MBB->insert(MI, NewMI); } else { // Ask the target to do the actual folding. - NewMI =foldMemoryOperandImpl(MF, MI, Ops, FI); + NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI); } - + if (NewMI) { NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); // Add a memory operand, foldMemoryOperandImpl doesn't do that. @@ -486,8 +495,7 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, MFI.getObjectAlignment(FI)); NewMI->addMemOperand(MF, MMO); - // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI. - return MBB->insert(MI, NewMI); + return NewMI; } // Straight COPY may fold as load/store. @@ -512,10 +520,9 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, /// foldMemoryOperand - Same as the previous version except it allows folding /// of any load and store from / to any address, not just from a specific /// stack slot. -MachineInstr* -TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, - const SmallVectorImpl &Ops, - MachineInstr* LoadMI) const { +MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, + ArrayRef Ops, + MachineInstr *LoadMI) const { assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!"); #ifndef NDEBUG for (unsigned i = 0, e = Ops.size(); i != e; ++i) @@ -533,15 +540,15 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, isLoadFromStackSlot(LoadMI, FrameIndex)) { // Fold stackmap/patchpoint. NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this); + if (NewMI) + NewMI = MBB.insert(MI, NewMI); } else { // Ask the target to do the actual folding. - NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI); + NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI); } if (!NewMI) return nullptr; - NewMI = MBB.insert(MI, NewMI); - // Copy the memoperands from the load to the folded instruction. if (MI->memoperands_empty()) { NewMI->setMemRefs(LoadMI->memoperands_begin(), @@ -640,6 +647,28 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, return true; } +int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const { + const MachineFunction *MF = MI->getParent()->getParent(); + const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); + bool StackGrowsDown = + TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; + + unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); + unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); + + if (MI->getOpcode() != FrameSetupOpcode && + MI->getOpcode() != FrameDestroyOpcode) + return 0; + + int SPAdj = MI->getOperand(0).getImm(); + + if ((!StackGrowsDown && MI->getOpcode() == FrameSetupOpcode) || + (StackGrowsDown && MI->getOpcode() == FrameDestroyOpcode)) + SPAdj = -SPAdj; + + return SPAdj; +} + /// isSchedulingBoundary - Test if the given instruction should be /// considered a scheduling boundary. This primarily includes labels /// and terminators. @@ -746,14 +775,14 @@ TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, } /// Return the default expected latency for a def based on it's opcode. -unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel *SchedModel, +unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel, const MachineInstr *DefMI) const { if (DefMI->isTransient()) return 0; if (DefMI->mayLoad()) - return SchedModel->LoadLatency; + return SchedModel.LoadLatency; if (isHighLatencyDef(DefMI->getOpcode())) - return SchedModel->HighLatency; + return SchedModel.HighLatency; return 1; } @@ -773,9 +802,10 @@ getInstrLatency(const InstrItineraryData *ItinData, return ItinData->getStageLatency(MI->getDesc().getSchedClass()); } -bool TargetInstrInfo::hasLowDefLatency(const InstrItineraryData *ItinData, +bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel, const MachineInstr *DefMI, unsigned DefIdx) const { + const InstrItineraryData *ItinData = SchedModel.getInstrItineraries(); if (!ItinData || ItinData->isEmpty()) return false;