X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTransforms%2FScalar%2FCodeGenPrepare.cpp;h=42209b8cbe2d5be2410a97408a358ed2d06a2a8d;hb=98b532cf33a3198391f2f411879059f6c7bf99a0;hp=9bf39911b6c697f80a2059766b225f506fd9f2a8;hpb=9918fb5631974f2201a640384b7ebe672c749e43;p=oota-llvm.git diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp index 9bf39911b6c..42209b8cbe2 100644 --- a/lib/Transforms/Scalar/CodeGenPrepare.cpp +++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp @@ -20,34 +20,50 @@ #include "llvm/Function.h" #include "llvm/InlineAsm.h" #include "llvm/Instructions.h" +#include "llvm/IntrinsicInst.h" +#include "llvm/LLVMContext.h" #include "llvm/Pass.h" -#include "llvm/Target/TargetAsmInfo.h" +#include "llvm/Analysis/ProfileInfo.h" #include "llvm/Target/TargetData.h" #include "llvm/Target/TargetLowering.h" -#include "llvm/Target/TargetMachine.h" +#include "llvm/Transforms/Utils/AddrModeMatcher.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallSet.h" +#include "llvm/Assembly/Writer.h" #include "llvm/Support/CallSite.h" -#include "llvm/Support/Compiler.h" +#include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/PatternMatch.h" +#include "llvm/Support/raw_ostream.h" using namespace llvm; using namespace llvm::PatternMatch; +static cl::opt FactorCommonPreds("split-critical-paths-tweak", + cl::init(false), cl::Hidden); + namespace { - class VISIBILITY_HIDDEN CodeGenPrepare : public FunctionPass { + class CodeGenPrepare : public FunctionPass { /// TLI - Keep a pointer of a TargetLowering to consult for determining /// transformation profitability. const TargetLowering *TLI; + ProfileInfo *PI; + + /// BackEdges - Keep a set of all the loop back edges. + /// + SmallSet, 8> BackEdges; public: static char ID; // Pass identification, replacement for typeid explicit CodeGenPrepare(const TargetLowering *tli = 0) : FunctionPass(&ID), TLI(tli) {} bool runOnFunction(Function &F); + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.addPreserved(); + } + private: bool EliminateMostlyEmptyBlocks(Function &F); bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; @@ -57,7 +73,9 @@ namespace { DenseMap &SunkAddrs); bool OptimizeInlineAsmInst(Instruction *I, CallSite CS, DenseMap &SunkAddrs); + bool MoveExtToFormExtLoad(Instruction *I); bool OptimizeExtUses(Instruction *I); + void findLoopBackEdges(const Function &F); }; } @@ -69,14 +87,27 @@ FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) { return new CodeGenPrepare(TLI); } +/// findLoopBackEdges - Do a DFS walk to find loop back edges. +/// +void CodeGenPrepare::findLoopBackEdges(const Function &F) { + SmallVector, 32> Edges; + FindFunctionBackedges(F, Edges); + + BackEdges.insert(Edges.begin(), Edges.end()); +} + bool CodeGenPrepare::runOnFunction(Function &F) { bool EverMadeChange = false; + PI = getAnalysisIfAvailable(); // First pass, eliminate blocks that contain only PHI nodes and an // unconditional branch. EverMadeChange |= EliminateMostlyEmptyBlocks(F); + // Now find loop back edges. + findLoopBackEdges(F); + bool MadeChange = true; while (MadeChange) { MadeChange = false; @@ -87,10 +118,11 @@ bool CodeGenPrepare::runOnFunction(Function &F) { return EverMadeChange; } -/// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes -/// and an unconditional branch. Passes before isel (e.g. LSR/loopsimplify) -/// often split edges in ways that are non-optimal for isel. Start by -/// eliminating these blocks so we can split them the way we want them. +/// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes, +/// debug info directives, and an unconditional branch. Passes before isel +/// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for +/// isel. Start by eliminating these blocks so we can split them the way we +/// want them. bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) { bool MadeChange = false; // Note that this intentionally skips the entry block. @@ -102,12 +134,18 @@ bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) { if (!BI || !BI->isUnconditional()) continue; - // If the instruction before the branch isn't a phi node, then other stuff - // is happening here. + // If the instruction before the branch (skipping debug info) isn't a phi + // node, then other stuff is happening here. BasicBlock::iterator BBI = BI; if (BBI != BB->begin()) { --BBI; - if (!isa(BBI)) continue; + while (isa(BBI)) { + if (BBI == BB->begin()) + break; + --BBI; + } + if (!isa(BBI) && !isa(BBI)) + continue; } // Do not break infinite loops. @@ -200,21 +238,23 @@ void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { BranchInst *BI = cast(BB->getTerminator()); BasicBlock *DestBB = BI->getSuccessor(0); - DOUT << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB; + DEBUG(errs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); // If the destination block has a single pred, then this is a trivial edge, // just collapse it. if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { - // Remember if SinglePred was the entry block of the function. If so, we - // will need to move BB back to the entry position. - bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); - MergeBasicBlockIntoOnlyPred(DestBB); - - if (isEntry && BB != &BB->getParent()->getEntryBlock()) - BB->moveBefore(&BB->getParent()->getEntryBlock()); - - DOUT << "AFTER:\n" << *DestBB << "\n\n\n"; - return; + if (SinglePred != DestBB) { + // Remember if SinglePred was the entry block of the function. If so, we + // will need to move BB back to the entry position. + bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); + MergeBasicBlockIntoOnlyPred(DestBB, this); + + if (isEntry && BB != &BB->getParent()->getEntryBlock()) + BB->moveBefore(&BB->getParent()->getEntryBlock()); + + DEBUG(errs() << "AFTER:\n" << *DestBB << "\n\n\n"); + return; + } } // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB @@ -249,9 +289,13 @@ void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { // The PHIs are now updated, change everything that refers to BB to use // DestBB and remove BB. BB->replaceAllUsesWith(DestBB); + if (PI) { + PI->replaceAllUses(BB, DestBB); + PI->removeEdge(ProfileInfo::getEdge(BB, DestBB)); + } BB->eraseFromParent(); - DOUT << "AFTER:\n" << *DestBB << "\n\n\n"; + DEBUG(errs() << "AFTER:\n" << *DestBB << "\n\n\n"); } @@ -260,75 +304,127 @@ void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { /// phi nodes (otherwise critical edges are ok). If there is already another /// predecessor of the succ that is empty (and thus has no phi nodes), use it /// instead of introducing a new block. -static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, Pass *P) { +static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, + SmallSet, 8> &BackEdges, + Pass *P) { BasicBlock *TIBB = TI->getParent(); BasicBlock *Dest = TI->getSuccessor(SuccNum); assert(isa(Dest->begin()) && "This should only be called if Dest has a PHI!"); + // Do not split edges to EH landing pads. + if (InvokeInst *Invoke = dyn_cast(TI)) { + if (Invoke->getSuccessor(1) == Dest) + return; + } + // As a hack, never split backedges of loops. Even though the copy for any // PHIs inserted on the backedge would be dead for exits from the loop, we // assume that the cost of *splitting* the backedge would be too high. - if (Dest == TIBB) + if (BackEdges.count(std::make_pair(TIBB, Dest))) return; - /// TIPHIValues - This array is lazily computed to determine the values of - /// PHIs in Dest that TI would provide. - SmallVector TIPHIValues; + if (!FactorCommonPreds) { + /// TIPHIValues - This array is lazily computed to determine the values of + /// PHIs in Dest that TI would provide. + SmallVector TIPHIValues; + + // Check to see if Dest has any blocks that can be used as a split edge for + // this terminator. + for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) { + BasicBlock *Pred = *PI; + // To be usable, the pred has to end with an uncond branch to the dest. + BranchInst *PredBr = dyn_cast(Pred->getTerminator()); + if (!PredBr || !PredBr->isUnconditional()) + continue; + // Must be empty other than the branch and debug info. + BasicBlock::iterator I = Pred->begin(); + while (isa(I)) + I++; + if (dyn_cast(I) != PredBr) + continue; + // Cannot be the entry block; its label does not get emitted. + if (Pred == &(Dest->getParent()->getEntryBlock())) + continue; - // Check to see if Dest has any blocks that can be used as a split edge for - // this terminator. - for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) { - BasicBlock *Pred = *PI; - // To be usable, the pred has to end with an uncond branch to the dest. - BranchInst *PredBr = dyn_cast(Pred->getTerminator()); - if (!PredBr || !PredBr->isUnconditional() || - // Must be empty other than the branch. - &Pred->front() != PredBr || - // Cannot be the entry block; its label does not get emitted. - Pred == &(Dest->getParent()->getEntryBlock())) - continue; + // Finally, since we know that Dest has phi nodes in it, we have to make + // sure that jumping to Pred will have the same effect as going to Dest in + // terms of PHI values. + PHINode *PN; + unsigned PHINo = 0; + bool FoundMatch = true; + for (BasicBlock::iterator I = Dest->begin(); + (PN = dyn_cast(I)); ++I, ++PHINo) { + if (PHINo == TIPHIValues.size()) + TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB)); + + // If the PHI entry doesn't work, we can't use this pred. + if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) { + FoundMatch = false; + break; + } + } - // Finally, since we know that Dest has phi nodes in it, we have to make - // sure that jumping to Pred will have the same affect as going to Dest in - // terms of PHI values. - PHINode *PN; - unsigned PHINo = 0; - bool FoundMatch = true; - for (BasicBlock::iterator I = Dest->begin(); - (PN = dyn_cast(I)); ++I, ++PHINo) { - if (PHINo == TIPHIValues.size()) - TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB)); - - // If the PHI entry doesn't work, we can't use this pred. - if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) { - FoundMatch = false; - break; + // If we found a workable predecessor, change TI to branch to Succ. + if (FoundMatch) { + ProfileInfo *PI = P->getAnalysisIfAvailable(); + if (PI) + PI->splitEdge(TIBB, Dest, Pred); + Dest->removePredecessor(TIBB); + TI->setSuccessor(SuccNum, Pred); + return; } } - // If we found a workable predecessor, change TI to branch to Succ. - if (FoundMatch) { - Dest->removePredecessor(TIBB); - TI->setSuccessor(SuccNum, Pred); - return; + SplitCriticalEdge(TI, SuccNum, P, true); + return; + } + + PHINode *PN; + SmallVector TIPHIValues; + for (BasicBlock::iterator I = Dest->begin(); + (PN = dyn_cast(I)); ++I) + TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB)); + + SmallVector IdenticalPreds; + for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) { + BasicBlock *Pred = *PI; + if (BackEdges.count(std::make_pair(Pred, Dest))) + continue; + if (PI == TIBB) + IdenticalPreds.push_back(Pred); + else { + bool Identical = true; + unsigned PHINo = 0; + for (BasicBlock::iterator I = Dest->begin(); + (PN = dyn_cast(I)); ++I, ++PHINo) + if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) { + Identical = false; + break; + } + if (Identical) + IdenticalPreds.push_back(Pred); } } - SplitCriticalEdge(TI, SuccNum, P, true); + assert(!IdenticalPreds.empty()); + SplitBlockPredecessors(Dest, &IdenticalPreds[0], IdenticalPreds.size(), + ".critedge", P); } + /// OptimizeNoopCopyExpression - If the specified cast instruction is a noop -/// copy (e.g. it's casting from one pointer type to another, int->uint, or -/// int->sbyte on PPC), sink it into user blocks to reduce the number of virtual +/// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC), +/// sink it into user blocks to reduce the number of virtual /// registers that must be created and coalesced. /// /// Return true if any changes are made. /// static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ // If this is a noop copy, - MVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); - MVT DstVT = TLI.getValueType(CI->getType()); + EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); + EVT DstVT = TLI.getValueType(CI->getType()); // This is an fp<->int conversion? if (SrcVT.isInteger() != DstVT.isInteger()) @@ -341,10 +437,10 @@ static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ // If these values will be promoted, find out what they will be promoted // to. This helps us consider truncates on PPC as noop copies when they // are. - if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote) - SrcVT = TLI.getTypeToTransformTo(SrcVT); - if (TLI.getTypeAction(DstVT) == TargetLowering::Promote) - DstVT = TLI.getTypeToTransformTo(DstVT); + if (TLI.getTypeAction(CI->getContext(), SrcVT) == TargetLowering::Promote) + SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); + if (TLI.getTypeAction(CI->getContext(), DstVT) == TargetLowering::Promote) + DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); // If, after promotion, these are the same types, this is a noop copy. if (SrcVT != DstVT) @@ -365,8 +461,7 @@ static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ // appropriate predecessor block. BasicBlock *UserBB = User->getParent(); if (PHINode *PN = dyn_cast(User)) { - unsigned OpVal = UI.getOperandNo()/2; - UserBB = PN->getIncomingBlock(OpVal); + UserBB = PN->getIncomingBlock(UI); } // Preincrement use iterator so we don't invalidate it. @@ -438,7 +533,8 @@ static bool OptimizeCmpExpression(CmpInst *CI) { BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); InsertedCmp = - CmpInst::Create(CI->getOpcode(), CI->getPredicate(), CI->getOperand(0), + CmpInst::Create(CI->getOpcode(), + CI->getPredicate(), CI->getOperand(0), CI->getOperand(1), "", InsertPt); MadeChange = true; } @@ -454,663 +550,6 @@ static bool OptimizeCmpExpression(CmpInst *CI) { return MadeChange; } -/// EraseDeadInstructions - Erase any dead instructions, recursively. -static void EraseDeadInstructions(Value *V) { - Instruction *I = dyn_cast(V); - if (!I || !I->use_empty()) return; - - SmallPtrSet Insts; - Insts.insert(I); - - while (!Insts.empty()) { - I = *Insts.begin(); - Insts.erase(I); - if (isInstructionTriviallyDead(I)) { - for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) - if (Instruction *U = dyn_cast(I->getOperand(i))) - Insts.insert(U); - I->eraseFromParent(); - } - } -} - -//===----------------------------------------------------------------------===// -// Addressing Mode Analysis and Optimization -//===----------------------------------------------------------------------===// - -namespace { - /// ExtAddrMode - This is an extended version of TargetLowering::AddrMode - /// which holds actual Value*'s for register values. - struct ExtAddrMode : public TargetLowering::AddrMode { - Value *BaseReg; - Value *ScaledReg; - ExtAddrMode() : BaseReg(0), ScaledReg(0) {} - void print(OStream &OS) const; - void dump() const { - print(cerr); - cerr << '\n'; - } - }; -} // end anonymous namespace - -static inline OStream &operator<<(OStream &OS, const ExtAddrMode &AM) { - AM.print(OS); - return OS; -} - -void ExtAddrMode::print(OStream &OS) const { - bool NeedPlus = false; - OS << "["; - if (BaseGV) - OS << (NeedPlus ? " + " : "") - << "GV:%" << BaseGV->getName(), NeedPlus = true; - - if (BaseOffs) - OS << (NeedPlus ? " + " : "") << BaseOffs, NeedPlus = true; - - if (BaseReg) - OS << (NeedPlus ? " + " : "") - << "Base:%" << BaseReg->getName(), NeedPlus = true; - if (Scale) - OS << (NeedPlus ? " + " : "") - << Scale << "*%" << ScaledReg->getName(), NeedPlus = true; - - OS << ']'; -} - -namespace { -/// AddressingModeMatcher - This class exposes a single public method, which is -/// used to construct a "maximal munch" of the addressing mode for the target -/// specified by TLI for an access to "V" with an access type of AccessTy. This -/// returns the addressing mode that is actually matched by value, but also -/// returns the list of instructions involved in that addressing computation in -/// AddrModeInsts. -class AddressingModeMatcher { - SmallVectorImpl &AddrModeInsts; - const TargetLowering &TLI; - - /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and - /// the memory instruction that we're computing this address for. - const Type *AccessTy; - Instruction *MemoryInst; - - /// AddrMode - This is the addressing mode that we're building up. This is - /// part of the return value of this addressing mode matching stuff. - ExtAddrMode &AddrMode; - - /// IgnoreProfitability - This is set to true when we should not do - /// profitability checks. When true, IsProfitableToFoldIntoAddressingMode - /// always returns true. - bool IgnoreProfitability; - - AddressingModeMatcher(SmallVectorImpl &AMI, - const TargetLowering &T, const Type *AT, - Instruction *MI, ExtAddrMode &AM) - : AddrModeInsts(AMI), TLI(T), AccessTy(AT), MemoryInst(MI), AddrMode(AM) { - IgnoreProfitability = false; - } -public: - - /// Match - Find the maximal addressing mode that a load/store of V can fold, - /// give an access type of AccessTy. This returns a list of involved - /// instructions in AddrModeInsts. - static ExtAddrMode Match(Value *V, const Type *AccessTy, - Instruction *MemoryInst, - SmallVectorImpl &AddrModeInsts, - const TargetLowering &TLI) { - ExtAddrMode Result; - - bool Success = - AddressingModeMatcher(AddrModeInsts, TLI, AccessTy, - MemoryInst, Result).MatchAddr(V, 0); - Success = Success; assert(Success && "Couldn't select *anything*?"); - return Result; - } -private: - bool MatchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); - bool MatchAddr(Value *V, unsigned Depth); - bool MatchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth); - bool IsProfitableToFoldIntoAddressingMode(Instruction *I, - ExtAddrMode &AMBefore, - ExtAddrMode &AMAfter); - bool ValueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); -}; -} // end anonymous namespace - -/// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode. -/// Return true and update AddrMode if this addr mode is legal for the target, -/// false if not. -bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale, - unsigned Depth) { - // If Scale is 1, then this is the same as adding ScaleReg to the addressing - // mode. Just process that directly. - if (Scale == 1) - return MatchAddr(ScaleReg, Depth); - - // If the scale is 0, it takes nothing to add this. - if (Scale == 0) - return true; - - // If we already have a scale of this value, we can add to it, otherwise, we - // need an available scale field. - if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) - return false; - - ExtAddrMode TestAddrMode = AddrMode; - - // Add scale to turn X*4+X*3 -> X*7. This could also do things like - // [A+B + A*7] -> [B+A*8]. - TestAddrMode.Scale += Scale; - TestAddrMode.ScaledReg = ScaleReg; - - // If the new address isn't legal, bail out. - if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) - return false; - - // It was legal, so commit it. - AddrMode = TestAddrMode; - - // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now - // to see if ScaleReg is actually X+C. If so, we can turn this into adding - // X*Scale + C*Scale to addr mode. - ConstantInt *CI; Value *AddLHS; - if (match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { - TestAddrMode.ScaledReg = AddLHS; - TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; - - // If this addressing mode is legal, commit it and remember that we folded - // this instruction. - if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) { - AddrModeInsts.push_back(cast(ScaleReg)); - AddrMode = TestAddrMode; - return true; - } - } - - // Otherwise, not (x+c)*scale, just return what we have. - return true; -} - -/// MightBeFoldableInst - This is a little filter, which returns true if an -/// addressing computation involving I might be folded into a load/store -/// accessing it. This doesn't need to be perfect, but needs to accept at least -/// the set of instructions that MatchOperationAddr can. -static bool MightBeFoldableInst(Instruction *I) { - switch (I->getOpcode()) { - case Instruction::BitCast: - // Don't touch identity bitcasts. - if (I->getType() == I->getOperand(0)->getType()) - return false; - return isa(I->getType()) || isa(I->getType()); - case Instruction::PtrToInt: - // PtrToInt is always a noop, as we know that the int type is pointer sized. - return true; - case Instruction::IntToPtr: - // We know the input is intptr_t, so this is foldable. - return true; - case Instruction::Add: - return true; - case Instruction::Mul: - case Instruction::Shl: - // Can only handle X*C and X << C. - return isa(I->getOperand(1)); - case Instruction::GetElementPtr: - return true; - default: - return false; - } -} - - -/// MatchOperationAddr - Given an instruction or constant expr, see if we can -/// fold the operation into the addressing mode. If so, update the addressing -/// mode and return true, otherwise return false without modifying AddrMode. -bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode, - unsigned Depth) { - // Avoid exponential behavior on extremely deep expression trees. - if (Depth >= 5) return false; - - switch (Opcode) { - case Instruction::PtrToInt: - // PtrToInt is always a noop, as we know that the int type is pointer sized. - return MatchAddr(AddrInst->getOperand(0), Depth); - case Instruction::IntToPtr: - // This inttoptr is a no-op if the integer type is pointer sized. - if (TLI.getValueType(AddrInst->getOperand(0)->getType()) == - TLI.getPointerTy()) - return MatchAddr(AddrInst->getOperand(0), Depth); - return false; - case Instruction::BitCast: - // BitCast is always a noop, and we can handle it as long as it is - // int->int or pointer->pointer (we don't want int<->fp or something). - if ((isa(AddrInst->getOperand(0)->getType()) || - isa(AddrInst->getOperand(0)->getType())) && - // Don't touch identity bitcasts. These were probably put here by LSR, - // and we don't want to mess around with them. Assume it knows what it - // is doing. - AddrInst->getOperand(0)->getType() != AddrInst->getType()) - return MatchAddr(AddrInst->getOperand(0), Depth); - return false; - case Instruction::Add: { - // Check to see if we can merge in the RHS then the LHS. If so, we win. - ExtAddrMode BackupAddrMode = AddrMode; - unsigned OldSize = AddrModeInsts.size(); - if (MatchAddr(AddrInst->getOperand(1), Depth+1) && - MatchAddr(AddrInst->getOperand(0), Depth+1)) - return true; - - // Restore the old addr mode info. - AddrMode = BackupAddrMode; - AddrModeInsts.resize(OldSize); - - // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. - if (MatchAddr(AddrInst->getOperand(0), Depth+1) && - MatchAddr(AddrInst->getOperand(1), Depth+1)) - return true; - - // Otherwise we definitely can't merge the ADD in. - AddrMode = BackupAddrMode; - AddrModeInsts.resize(OldSize); - break; - } - //case Instruction::Or: - // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. - //break; - case Instruction::Mul: - case Instruction::Shl: { - // Can only handle X*C and X << C. - ConstantInt *RHS = dyn_cast(AddrInst->getOperand(1)); - if (!RHS) return false; - int64_t Scale = RHS->getSExtValue(); - if (Opcode == Instruction::Shl) - Scale = 1 << Scale; - - return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth); - } - case Instruction::GetElementPtr: { - // Scan the GEP. We check it if it contains constant offsets and at most - // one variable offset. - int VariableOperand = -1; - unsigned VariableScale = 0; - - int64_t ConstantOffset = 0; - const TargetData *TD = TLI.getTargetData(); - gep_type_iterator GTI = gep_type_begin(AddrInst); - for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { - if (const StructType *STy = dyn_cast(*GTI)) { - const StructLayout *SL = TD->getStructLayout(STy); - unsigned Idx = - cast(AddrInst->getOperand(i))->getZExtValue(); - ConstantOffset += SL->getElementOffset(Idx); - } else { - uint64_t TypeSize = TD->getABITypeSize(GTI.getIndexedType()); - if (ConstantInt *CI = dyn_cast(AddrInst->getOperand(i))) { - ConstantOffset += CI->getSExtValue()*TypeSize; - } else if (TypeSize) { // Scales of zero don't do anything. - // We only allow one variable index at the moment. - if (VariableOperand != -1) - return false; - - // Remember the variable index. - VariableOperand = i; - VariableScale = TypeSize; - } - } - } - - // A common case is for the GEP to only do a constant offset. In this case, - // just add it to the disp field and check validity. - if (VariableOperand == -1) { - AddrMode.BaseOffs += ConstantOffset; - if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){ - // Check to see if we can fold the base pointer in too. - if (MatchAddr(AddrInst->getOperand(0), Depth+1)) - return true; - } - AddrMode.BaseOffs -= ConstantOffset; - return false; - } - - // Save the valid addressing mode in case we can't match. - ExtAddrMode BackupAddrMode = AddrMode; - - // Check that this has no base reg yet. If so, we won't have a place to - // put the base of the GEP (assuming it is not a null ptr). - bool SetBaseReg = true; - if (isa(AddrInst->getOperand(0))) - SetBaseReg = false; // null pointer base doesn't need representation. - else if (AddrMode.HasBaseReg) - return false; // Base register already specified, can't match GEP. - else { - // Otherwise, we'll use the GEP base as the BaseReg. - AddrMode.HasBaseReg = true; - AddrMode.BaseReg = AddrInst->getOperand(0); - } - - // See if the scale and offset amount is valid for this target. - AddrMode.BaseOffs += ConstantOffset; - - if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, - Depth)) { - AddrMode = BackupAddrMode; - return false; - } - - // If we have a null as the base of the GEP, folding in the constant offset - // plus variable scale is all we can do. - if (!SetBaseReg) return true; - - // If this match succeeded, we know that we can form an address with the - // GepBase as the basereg. Match the base pointer of the GEP more - // aggressively by zeroing out BaseReg and rematching. If the base is - // (for example) another GEP, this allows merging in that other GEP into - // the addressing mode we're forming. - AddrMode.HasBaseReg = false; - AddrMode.BaseReg = 0; - bool Success = MatchAddr(AddrInst->getOperand(0), Depth+1); - assert(Success && "MatchAddr should be able to fill in BaseReg!"); - Success=Success; - return true; - } - } - return false; -} - -/// MatchAddr - If we can, try to add the value of 'Addr' into the current -/// addressing mode. If Addr can't be added to AddrMode this returns false and -/// leaves AddrMode unmodified. This assumes that Addr is either a pointer type -/// or intptr_t for the target. -/// -bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) { - if (ConstantInt *CI = dyn_cast(Addr)) { - // Fold in immediates if legal for the target. - AddrMode.BaseOffs += CI->getSExtValue(); - if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) - return true; - AddrMode.BaseOffs -= CI->getSExtValue(); - } else if (GlobalValue *GV = dyn_cast(Addr)) { - // If this is a global variable, try to fold it into the addressing mode. - if (AddrMode.BaseGV == 0) { - AddrMode.BaseGV = GV; - if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) - return true; - AddrMode.BaseGV = 0; - } - } else if (Instruction *I = dyn_cast(Addr)) { - ExtAddrMode BackupAddrMode = AddrMode; - unsigned OldSize = AddrModeInsts.size(); - - // Check to see if it is possible to fold this operation. - if (MatchOperationAddr(I, I->getOpcode(), Depth)) { - // Okay, it's possible to fold this. Check to see if it is actually - // *profitable* to do so. We use a simple cost model to avoid increasing - // register pressure too much. - if (I->hasOneUse() || - IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { - AddrModeInsts.push_back(I); - return true; - } - - // It isn't profitable to do this, roll back. - //cerr << "NOT FOLDING: " << *I; - AddrMode = BackupAddrMode; - AddrModeInsts.resize(OldSize); - } - } else if (ConstantExpr *CE = dyn_cast(Addr)) { - if (MatchOperationAddr(CE, CE->getOpcode(), Depth)) - return true; - } else if (isa(Addr)) { - // Null pointer gets folded without affecting the addressing mode. - return true; - } - - // Worse case, the target should support [reg] addressing modes. :) - if (!AddrMode.HasBaseReg) { - AddrMode.HasBaseReg = true; - AddrMode.BaseReg = Addr; - // Still check for legality in case the target supports [imm] but not [i+r]. - if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) - return true; - AddrMode.HasBaseReg = false; - AddrMode.BaseReg = 0; - } - - // If the base register is already taken, see if we can do [r+r]. - if (AddrMode.Scale == 0) { - AddrMode.Scale = 1; - AddrMode.ScaledReg = Addr; - if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) - return true; - AddrMode.Scale = 0; - AddrMode.ScaledReg = 0; - } - // Couldn't match. - return false; -} - - -/// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified -/// inline asm call are due to memory operands. If so, return true, otherwise -/// return false. -static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, - const TargetLowering &TLI) { - std::vector - Constraints = IA->ParseConstraints(); - - unsigned ArgNo = 1; // ArgNo - The operand of the CallInst. - for (unsigned i = 0, e = Constraints.size(); i != e; ++i) { - TargetLowering::AsmOperandInfo OpInfo(Constraints[i]); - - // Compute the value type for each operand. - switch (OpInfo.Type) { - case InlineAsm::isOutput: - if (OpInfo.isIndirect) - OpInfo.CallOperandVal = CI->getOperand(ArgNo++); - break; - case InlineAsm::isInput: - OpInfo.CallOperandVal = CI->getOperand(ArgNo++); - break; - case InlineAsm::isClobber: - // Nothing to do. - break; - } - - // Compute the constraint code and ConstraintType to use. - TLI.ComputeConstraintToUse(OpInfo, SDValue(), - OpInfo.ConstraintType == TargetLowering::C_Memory); - - // If this asm operand is our Value*, and if it isn't an indirect memory - // operand, we can't fold it! - if (OpInfo.CallOperandVal == OpVal && - (OpInfo.ConstraintType != TargetLowering::C_Memory || - !OpInfo.isIndirect)) - return false; - } - - return true; -} - - -/// FindAllMemoryUses - Recursively walk all the uses of I until we find a -/// memory use. If we find an obviously non-foldable instruction, return true. -/// Add the ultimately found memory instructions to MemoryUses. -static bool FindAllMemoryUses(Instruction *I, - SmallVectorImpl > &MemoryUses, - SmallPtrSet &ConsideredInsts, - const TargetLowering &TLI) { - // If we already considered this instruction, we're done. - if (!ConsideredInsts.insert(I)) - return false; - - // If this is an obviously unfoldable instruction, bail out. - if (!MightBeFoldableInst(I)) - return true; - - // Loop over all the uses, recursively processing them. - for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); - UI != E; ++UI) { - if (LoadInst *LI = dyn_cast(*UI)) { - MemoryUses.push_back(std::make_pair(LI, UI.getOperandNo())); - continue; - } - - if (StoreInst *SI = dyn_cast(*UI)) { - if (UI.getOperandNo() == 0) return true; // Storing addr, not into addr. - MemoryUses.push_back(std::make_pair(SI, UI.getOperandNo())); - continue; - } - - if (CallInst *CI = dyn_cast(*UI)) { - InlineAsm *IA = dyn_cast(CI->getCalledValue()); - if (IA == 0) return true; - - // If this is a memory operand, we're cool, otherwise bail out. - if (!IsOperandAMemoryOperand(CI, IA, I, TLI)) - return true; - continue; - } - - if (FindAllMemoryUses(cast(*UI), MemoryUses, ConsideredInsts, - TLI)) - return true; - } - - return false; -} - - -/// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at -/// the use site that we're folding it into. If so, there is no cost to -/// include it in the addressing mode. KnownLive1 and KnownLive2 are two values -/// that we know are live at the instruction already. -bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, - Value *KnownLive2) { - // If Val is either of the known-live values, we know it is live! - if (Val == 0 || Val == KnownLive1 || Val == KnownLive2) - return true; - - // All values other than instructions and arguments (e.g. constants) are live. - if (!isa(Val) && !isa(Val)) return true; - - // If Val is a constant sized alloca in the entry block, it is live, this is - // true because it is just a reference to the stack/frame pointer, which is - // live for the whole function. - if (AllocaInst *AI = dyn_cast(Val)) - if (AI->isStaticAlloca()) - return true; - - // Check to see if this value is already used in the memory instruction's - // block. If so, it's already live into the block at the very least, so we - // can reasonably fold it. - BasicBlock *MemBB = MemoryInst->getParent(); - for (Value::use_iterator UI = Val->use_begin(), E = Val->use_end(); - UI != E; ++UI) - // We know that uses of arguments and instructions have to be instructions. - if (cast(*UI)->getParent() == MemBB) - return true; - - return false; -} - - - -/// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing -/// mode of the machine to fold the specified instruction into a load or store -/// that ultimately uses it. However, the specified instruction has multiple -/// uses. Given this, it may actually increase register pressure to fold it -/// into the load. For example, consider this code: -/// -/// X = ... -/// Y = X+1 -/// use(Y) -> nonload/store -/// Z = Y+1 -/// load Z -/// -/// In this case, Y has multiple uses, and can be folded into the load of Z -/// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to -/// be live at the use(Y) line. If we don't fold Y into load Z, we use one -/// fewer register. Since Y can't be folded into "use(Y)" we don't increase the -/// number of computations either. -/// -/// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If -/// X was live across 'load Z' for other reasons, we actually *would* want to -/// fold the addressing mode in the Z case. This would make Y die earlier. -bool AddressingModeMatcher:: -IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, - ExtAddrMode &AMAfter) { - if (IgnoreProfitability) return true; - - // AMBefore is the addressing mode before this instruction was folded into it, - // and AMAfter is the addressing mode after the instruction was folded. Get - // the set of registers referenced by AMAfter and subtract out those - // referenced by AMBefore: this is the set of values which folding in this - // address extends the lifetime of. - // - // Note that there are only two potential values being referenced here, - // BaseReg and ScaleReg (global addresses are always available, as are any - // folded immediates). - Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; - - // If the BaseReg or ScaledReg was referenced by the previous addrmode, their - // lifetime wasn't extended by adding this instruction. - if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) - BaseReg = 0; - if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) - ScaledReg = 0; - - // If folding this instruction (and it's subexprs) didn't extend any live - // ranges, we're ok with it. - if (BaseReg == 0 && ScaledReg == 0) - return true; - - // If all uses of this instruction are ultimately load/store/inlineasm's, - // check to see if their addressing modes will include this instruction. If - // so, we can fold it into all uses, so it doesn't matter if it has multiple - // uses. - SmallVector, 16> MemoryUses; - SmallPtrSet ConsideredInsts; - if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI)) - return false; // Has a non-memory, non-foldable use! - - // Now that we know that all uses of this instruction are part of a chain of - // computation involving only operations that could theoretically be folded - // into a memory use, loop over each of these uses and see if they could - // *actually* fold the instruction. - SmallVector MatchedAddrModeInsts; - for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { - Instruction *User = MemoryUses[i].first; - unsigned OpNo = MemoryUses[i].second; - - // Get the access type of this use. If the use isn't a pointer, we don't - // know what it accesses. - Value *Address = User->getOperand(OpNo); - if (!isa(Address->getType())) - return false; - const Type *AddressAccessTy = - cast(Address->getType())->getElementType(); - - // Do a match against the root of this address, ignoring profitability. This - // will tell us if the addressing mode for the memory operation will - // *actually* cover the shared instruction. - ExtAddrMode Result; - AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy, - MemoryInst, Result); - Matcher.IgnoreProfitability = true; - bool Success = Matcher.MatchAddr(Address, 0); - Success = Success; assert(Success && "Couldn't select *anything*?"); - - // If the match didn't cover I, then it won't be shared by it. - if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(), - I) == MatchedAddrModeInsts.end()) - return false; - - MatchedAddrModeInsts.clear(); - } - - return true; -} - - //===----------------------------------------------------------------------===// // Memory Optimization //===----------------------------------------------------------------------===// @@ -1152,7 +591,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, // If all the instructions matched are already in this BB, don't do anything. if (!AnyNonLocal) { - DEBUG(cerr << "CGP: Found local addrmode: " << AddrMode << "\n"); + DEBUG(errs() << "CGP: Found local addrmode: " << AddrMode << "\n"); return false; } @@ -1167,12 +606,15 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, // computation. Value *&SunkAddr = SunkAddrs[Addr]; if (SunkAddr) { - DEBUG(cerr << "CGP: Reusing nonlocal addrmode: " << AddrMode << "\n"); + DEBUG(errs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " + << *MemoryInst); if (SunkAddr->getType() != Addr->getType()) SunkAddr = new BitCastInst(SunkAddr, Addr->getType(), "tmp", InsertPt); } else { - DEBUG(cerr << "CGP: SINKING nonlocal addrmode: " << AddrMode << "\n"); - const Type *IntPtrTy = TLI->getTargetData()->getIntPtrType(); + DEBUG(errs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " + << *MemoryInst); + const Type *IntPtrTy = + TLI->getTargetData()->getIntPtrType(AccessTy->getContext()); Value *Result = 0; // Start with the scale value. @@ -1190,7 +632,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, } if (AddrMode.Scale != 1) V = BinaryOperator::CreateMul(V, ConstantInt::get(IntPtrTy, - AddrMode.Scale), + AddrMode.Scale), "sunkaddr", InsertPt); Result = V; } @@ -1198,8 +640,11 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, // Add in the base register. if (AddrMode.BaseReg) { Value *V = AddrMode.BaseReg; - if (V->getType() != IntPtrTy) + if (isa(V->getType())) V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt); + if (V->getType() != IntPtrTy) + V = CastInst::CreateIntegerCast(V, IntPtrTy, /*isSigned=*/true, + "sunkaddr", InsertPt); if (Result) Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); else @@ -1234,7 +679,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, MemoryInst->replaceUsesOfWith(Addr, SunkAddr); if (Addr->use_empty()) - EraseDeadInstructions(Addr); + RecursivelyDeleteTriviallyDeadInstructions(Addr); return true; } @@ -1287,6 +732,43 @@ bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS, return MadeChange; } +/// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same +/// basic block as the load, unless conditions are unfavorable. This allows +/// SelectionDAG to fold the extend into the load. +/// +bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) { + // Look for a load being extended. + LoadInst *LI = dyn_cast(I->getOperand(0)); + if (!LI) return false; + + // If they're already in the same block, there's nothing to do. + if (LI->getParent() == I->getParent()) + return false; + + // If the load has other users and the truncate is not free, this probably + // isn't worthwhile. + if (!LI->hasOneUse() && + TLI && !TLI->isTruncateFree(I->getType(), LI->getType())) + return false; + + // Check whether the target supports casts folded into loads. + unsigned LType; + if (isa(I)) + LType = ISD::ZEXTLOAD; + else { + assert(isa(I) && "Unexpected ext type!"); + LType = ISD::SEXTLOAD; + } + if (TLI && !TLI->isLoadExtLegal(LType, TLI->getValueType(LI->getType()))) + return false; + + // Move the extend into the same block as the load, so that SelectionDAG + // can fold it. + I->removeFromParent(); + I->insertAfter(LI); + return true; +} + bool CodeGenPrepare::OptimizeExtUses(Instruction *I) { BasicBlock *DefBB = I->getParent(); @@ -1368,17 +850,16 @@ bool CodeGenPrepare::OptimizeExtUses(Instruction *I) { bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { bool MadeChange = false; - // Split all critical edges where the dest block has a PHI and where the phi - // has shared immediate operands. + // Split all critical edges where the dest block has a PHI. TerminatorInst *BBTI = BB.getTerminator(); if (BBTI->getNumSuccessors() > 1) { - for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i) - if (isa(BBTI->getSuccessor(i)->begin()) && - isCriticalEdge(BBTI, i, true)) - SplitEdgeNicely(BBTI, i, this); + for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i) { + BasicBlock *SuccBB = BBTI->getSuccessor(i); + if (isa(SuccBB->begin()) && isCriticalEdge(BBTI, i, true)) + SplitEdgeNicely(BBTI, i, BackEdges, this); + } } - // Keep track of non-local addresses that have been sunk into this block. // This allows us to avoid inserting duplicate code for blocks with multiple // load/stores of the same address. @@ -1403,8 +884,10 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { MadeChange |= Change; } - if (!Change && (isa(I) || isa(I))) + if (!Change && (isa(I) || isa(I))) { + MadeChange |= MoveExtToFormExtLoad(I); MadeChange |= OptimizeExtUses(I); + } } else if (CmpInst *CI = dyn_cast(I)) { MadeChange |= OptimizeCmpExpression(CI); } else if (LoadInst *LI = dyn_cast(I)) { @@ -1429,15 +912,16 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { } else if (CallInst *CI = dyn_cast(I)) { // If we found an inline asm expession, and if the target knows how to // lower it to normal LLVM code, do so now. - if (TLI && isa(CI->getCalledValue())) - if (const TargetAsmInfo *TAI = - TLI->getTargetMachine().getTargetAsmInfo()) { - if (TAI->ExpandInlineAsm(CI)) - BBI = BB.begin(); - else - // Sink address computing for memory operands into the block. - MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs); - } + if (TLI && isa(CI->getCalledValue())) { + if (TLI->ExpandInlineAsm(CI)) { + BBI = BB.begin(); + // Avoid processing instructions out of order, which could cause + // reuse before a value is defined. + SunkAddrs.clear(); + } else + // Sink address computing for memory operands into the block. + MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs); + } } }