X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTransforms%2FScalar%2FCodeGenPrepare.cpp;h=823caceb2f854f2fd5a358bbaf7e138f740ca4f3;hp=0306b06be76f582da8ceee3927dd84d8b86296bc;hb=90c579de5a383cee278acc3f7e7b9d0a656e6a35;hpb=2efbbb38ba7b9601202f2271301f07195dea8959 diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp index 0306b06be76..823caceb2f8 100644 --- a/lib/Transforms/Scalar/CodeGenPrepare.cpp +++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp @@ -20,34 +20,51 @@ #include "llvm/Function.h" #include "llvm/InlineAsm.h" #include "llvm/Instructions.h" +#include "llvm/IntrinsicInst.h" #include "llvm/Pass.h" -#include "llvm/Target/TargetAsmInfo.h" +#include "llvm/Analysis/ProfileInfo.h" #include "llvm/Target/TargetData.h" #include "llvm/Target/TargetLowering.h" -#include "llvm/Target/TargetMachine.h" +#include "llvm/Transforms/Utils/AddrModeMatcher.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" +#include "llvm/Transforms/Utils/BuildLibCalls.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallSet.h" +#include "llvm/Assembly/Writer.h" #include "llvm/Support/CallSite.h" -#include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/PatternMatch.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/IRBuilder.h" using namespace llvm; using namespace llvm::PatternMatch; namespace { - class VISIBILITY_HIDDEN CodeGenPrepare : public FunctionPass { + class CodeGenPrepare : public FunctionPass { /// TLI - Keep a pointer of a TargetLowering to consult for determining /// transformation profitability. const TargetLowering *TLI; + ProfileInfo *PFI; + + /// BackEdges - Keep a set of all the loop back edges. + /// + SmallSet, 8> BackEdges; public: static char ID; // Pass identification, replacement for typeid explicit CodeGenPrepare(const TargetLowering *tli = 0) - : FunctionPass(&ID), TLI(tli) {} + : FunctionPass(ID), TLI(tli) {} bool runOnFunction(Function &F); + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.addPreserved(); + } + + virtual void releaseMemory() { + BackEdges.clear(); + } + private: bool EliminateMostlyEmptyBlocks(Function &F); bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; @@ -57,26 +74,42 @@ namespace { DenseMap &SunkAddrs); bool OptimizeInlineAsmInst(Instruction *I, CallSite CS, DenseMap &SunkAddrs); + bool OptimizeCallInst(CallInst *CI); + bool MoveExtToFormExtLoad(Instruction *I); bool OptimizeExtUses(Instruction *I); + void findLoopBackEdges(const Function &F); }; } char CodeGenPrepare::ID = 0; -static RegisterPass X("codegenprepare", - "Optimize for code generation"); +INITIALIZE_PASS(CodeGenPrepare, "codegenprepare", + "Optimize for code generation", false, false); FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) { return new CodeGenPrepare(TLI); } +/// findLoopBackEdges - Do a DFS walk to find loop back edges. +/// +void CodeGenPrepare::findLoopBackEdges(const Function &F) { + SmallVector, 32> Edges; + FindFunctionBackedges(F, Edges); + + BackEdges.insert(Edges.begin(), Edges.end()); +} + bool CodeGenPrepare::runOnFunction(Function &F) { bool EverMadeChange = false; + PFI = getAnalysisIfAvailable(); // First pass, eliminate blocks that contain only PHI nodes and an // unconditional branch. EverMadeChange |= EliminateMostlyEmptyBlocks(F); + // Now find loop back edges. + findLoopBackEdges(F); + bool MadeChange = true; while (MadeChange) { MadeChange = false; @@ -87,10 +120,11 @@ bool CodeGenPrepare::runOnFunction(Function &F) { return EverMadeChange; } -/// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes -/// and an unconditional branch. Passes before isel (e.g. LSR/loopsimplify) -/// often split edges in ways that are non-optimal for isel. Start by -/// eliminating these blocks so we can split them the way we want them. +/// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes, +/// debug info directives, and an unconditional branch. Passes before isel +/// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for +/// isel. Start by eliminating these blocks so we can split them the way we +/// want them. bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) { bool MadeChange = false; // Note that this intentionally skips the entry block. @@ -102,12 +136,18 @@ bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) { if (!BI || !BI->isUnconditional()) continue; - // If the instruction before the branch isn't a phi node, then other stuff - // is happening here. + // If the instruction before the branch (skipping debug info) isn't a phi + // node, then other stuff is happening here. BasicBlock::iterator BBI = BI; if (BBI != BB->begin()) { --BBI; - if (!isa(BBI)) continue; + while (isa(BBI)) { + if (BBI == BB->begin()) + break; + --BBI; + } + if (!isa(BBI) && !isa(BBI)) + continue; } // Do not break infinite loops. @@ -134,7 +174,7 @@ bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB, // don't mess around with them. BasicBlock::const_iterator BBI = BB->begin(); while (const PHINode *PN = dyn_cast(BBI++)) { - for (Value::use_const_iterator UI = PN->use_begin(), E = PN->use_end(); + for (Value::const_use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ++UI) { const Instruction *User = cast(*UI); if (User->getParent() != DestBB || !isa(User)) @@ -200,32 +240,23 @@ void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { BranchInst *BI = cast(BB->getTerminator()); BasicBlock *DestBB = BI->getSuccessor(0); - DOUT << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB; + DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB); // If the destination block has a single pred, then this is a trivial edge, // just collapse it. - if (DestBB->getSinglePredecessor()) { - // If DestBB has single-entry PHI nodes, fold them. - while (PHINode *PN = dyn_cast(DestBB->begin())) { - Value *NewVal = PN->getIncomingValue(0); - // Replace self referencing PHI with undef, it must be dead. - if (NewVal == PN) NewVal = UndefValue::get(PN->getType()); - PN->replaceAllUsesWith(NewVal); - PN->eraseFromParent(); + if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { + if (SinglePred != DestBB) { + // Remember if SinglePred was the entry block of the function. If so, we + // will need to move BB back to the entry position. + bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); + MergeBasicBlockIntoOnlyPred(DestBB, this); + + if (isEntry && BB != &BB->getParent()->getEntryBlock()) + BB->moveBefore(&BB->getParent()->getEntryBlock()); + + DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); + return; } - - // Splice all the PHI nodes from BB over to DestBB. - DestBB->getInstList().splice(DestBB->begin(), BB->getInstList(), - BB->begin(), BI); - - // Anything that branched to BB now branches to DestBB. - BB->replaceAllUsesWith(DestBB); - - // Nuke BB. - BB->eraseFromParent(); - - DOUT << "AFTER:\n" << *DestBB << "\n\n\n"; - return; } // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB @@ -260,86 +291,130 @@ void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { // The PHIs are now updated, change everything that refers to BB to use // DestBB and remove BB. BB->replaceAllUsesWith(DestBB); + if (PFI) { + PFI->replaceAllUses(BB, DestBB); + PFI->removeEdge(ProfileInfo::getEdge(BB, DestBB)); + } BB->eraseFromParent(); - DOUT << "AFTER:\n" << *DestBB << "\n\n\n"; + DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"); } - -/// SplitEdgeNicely - Split the critical edge from TI to its specified -/// successor if it will improve codegen. We only do this if the successor has -/// phi nodes (otherwise critical edges are ok). If there is already another -/// predecessor of the succ that is empty (and thus has no phi nodes), use it -/// instead of introducing a new block. -static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, Pass *P) { - BasicBlock *TIBB = TI->getParent(); - BasicBlock *Dest = TI->getSuccessor(SuccNum); - assert(isa(Dest->begin()) && - "This should only be called if Dest has a PHI!"); - - // As a hack, never split backedges of loops. Even though the copy for any - // PHIs inserted on the backedge would be dead for exits from the loop, we - // assume that the cost of *splitting* the backedge would be too high. - if (Dest == TIBB) - return; - +/// FindReusablePredBB - Check all of the predecessors of the block DestPHI +/// lives in to see if there is a block that we can reuse as a critical edge +/// from TIBB. +static BasicBlock *FindReusablePredBB(PHINode *DestPHI, BasicBlock *TIBB) { + BasicBlock *Dest = DestPHI->getParent(); + /// TIPHIValues - This array is lazily computed to determine the values of /// PHIs in Dest that TI would provide. SmallVector TIPHIValues; - + + /// TIBBEntryNo - This is a cache to speed up pred queries for TIBB. + unsigned TIBBEntryNo = 0; + // Check to see if Dest has any blocks that can be used as a split edge for // this terminator. - for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) { - BasicBlock *Pred = *PI; + for (unsigned pi = 0, e = DestPHI->getNumIncomingValues(); pi != e; ++pi) { + BasicBlock *Pred = DestPHI->getIncomingBlock(pi); // To be usable, the pred has to end with an uncond branch to the dest. BranchInst *PredBr = dyn_cast(Pred->getTerminator()); - if (!PredBr || !PredBr->isUnconditional() || - // Must be empty other than the branch. - &Pred->front() != PredBr || - // Cannot be the entry block; its label does not get emitted. - Pred == &(Dest->getParent()->getEntryBlock())) + if (!PredBr || !PredBr->isUnconditional()) continue; - + // Must be empty other than the branch and debug info. + BasicBlock::iterator I = Pred->begin(); + while (isa(I)) + I++; + if (&*I != PredBr) + continue; + // Cannot be the entry block; its label does not get emitted. + if (Pred == &Dest->getParent()->getEntryBlock()) + continue; + // Finally, since we know that Dest has phi nodes in it, we have to make - // sure that jumping to Pred will have the same affect as going to Dest in + // sure that jumping to Pred will have the same effect as going to Dest in // terms of PHI values. PHINode *PN; unsigned PHINo = 0; + unsigned PredEntryNo = pi; + bool FoundMatch = true; for (BasicBlock::iterator I = Dest->begin(); (PN = dyn_cast(I)); ++I, ++PHINo) { - if (PHINo == TIPHIValues.size()) - TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB)); - + if (PHINo == TIPHIValues.size()) { + if (PN->getIncomingBlock(TIBBEntryNo) != TIBB) + TIBBEntryNo = PN->getBasicBlockIndex(TIBB); + TIPHIValues.push_back(PN->getIncomingValue(TIBBEntryNo)); + } + // If the PHI entry doesn't work, we can't use this pred. - if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) { + if (PN->getIncomingBlock(PredEntryNo) != Pred) + PredEntryNo = PN->getBasicBlockIndex(Pred); + + if (TIPHIValues[PHINo] != PN->getIncomingValue(PredEntryNo)) { FoundMatch = false; break; } } - + // If we found a workable predecessor, change TI to branch to Succ. - if (FoundMatch) { - Dest->removePredecessor(TIBB); - TI->setSuccessor(SuccNum, Pred); + if (FoundMatch) + return Pred; + } + return 0; +} + + +/// SplitEdgeNicely - Split the critical edge from TI to its specified +/// successor if it will improve codegen. We only do this if the successor has +/// phi nodes (otherwise critical edges are ok). If there is already another +/// predecessor of the succ that is empty (and thus has no phi nodes), use it +/// instead of introducing a new block. +static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, + SmallSet, 8> &BackEdges, + Pass *P) { + BasicBlock *TIBB = TI->getParent(); + BasicBlock *Dest = TI->getSuccessor(SuccNum); + assert(isa(Dest->begin()) && + "This should only be called if Dest has a PHI!"); + PHINode *DestPHI = cast(Dest->begin()); + + // Do not split edges to EH landing pads. + if (InvokeInst *Invoke = dyn_cast(TI)) + if (Invoke->getSuccessor(1) == Dest) return; - } + + // As a hack, never split backedges of loops. Even though the copy for any + // PHIs inserted on the backedge would be dead for exits from the loop, we + // assume that the cost of *splitting* the backedge would be too high. + if (BackEdges.count(std::make_pair(TIBB, Dest))) + return; + + if (BasicBlock *ReuseBB = FindReusablePredBB(DestPHI, TIBB)) { + ProfileInfo *PFI = P->getAnalysisIfAvailable(); + if (PFI) + PFI->splitEdge(TIBB, Dest, ReuseBB); + Dest->removePredecessor(TIBB); + TI->setSuccessor(SuccNum, ReuseBB); + return; } SplitCriticalEdge(TI, SuccNum, P, true); } + /// OptimizeNoopCopyExpression - If the specified cast instruction is a noop -/// copy (e.g. it's casting from one pointer type to another, int->uint, or -/// int->sbyte on PPC), sink it into user blocks to reduce the number of virtual +/// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC), +/// sink it into user blocks to reduce the number of virtual /// registers that must be created and coalesced. /// /// Return true if any changes are made. /// static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ // If this is a noop copy, - MVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); - MVT DstVT = TLI.getValueType(CI->getType()); + EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); + EVT DstVT = TLI.getValueType(CI->getType()); // This is an fp<->int conversion? if (SrcVT.isInteger() != DstVT.isInteger()) @@ -352,10 +427,10 @@ static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ // If these values will be promoted, find out what they will be promoted // to. This helps us consider truncates on PPC as noop copies when they // are. - if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote) - SrcVT = TLI.getTypeToTransformTo(SrcVT); - if (TLI.getTypeAction(DstVT) == TargetLowering::Promote) - DstVT = TLI.getTypeToTransformTo(DstVT); + if (TLI.getTypeAction(CI->getContext(), SrcVT) == TargetLowering::Promote) + SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); + if (TLI.getTypeAction(CI->getContext(), DstVT) == TargetLowering::Promote) + DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); // If, after promotion, these are the same types, this is a noop copy. if (SrcVT != DstVT) @@ -376,8 +451,7 @@ static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ // appropriate predecessor block. BasicBlock *UserBB = User->getParent(); if (PHINode *PN = dyn_cast(User)) { - unsigned OpVal = UI.getOperandNo()/2; - UserBB = PN->getIncomingBlock(OpVal); + UserBB = PN->getIncomingBlock(UI); } // Preincrement use iterator so we don't invalidate it. @@ -449,7 +523,8 @@ static bool OptimizeCmpExpression(CmpInst *CI) { BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI(); InsertedCmp = - CmpInst::Create(CI->getOpcode(), CI->getPredicate(), CI->getOperand(0), + CmpInst::Create(CI->getOpcode(), + CI->getPredicate(), CI->getOperand(0), CI->getOperand(1), "", InsertPt); MadeChange = true; } @@ -465,374 +540,48 @@ static bool OptimizeCmpExpression(CmpInst *CI) { return MadeChange; } -/// EraseDeadInstructions - Erase any dead instructions, recursively. -static void EraseDeadInstructions(Value *V) { - Instruction *I = dyn_cast(V); - if (!I || !I->use_empty()) return; - - SmallPtrSet Insts; - Insts.insert(I); - - while (!Insts.empty()) { - I = *Insts.begin(); - Insts.erase(I); - if (isInstructionTriviallyDead(I)) { - for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) - if (Instruction *U = dyn_cast(I->getOperand(i))) - Insts.insert(U); - I->eraseFromParent(); - } - } -} - -//===----------------------------------------------------------------------===// -// Addressing Mode Analysis and Optimization -//===----------------------------------------------------------------------===// - -namespace { - /// ExtAddrMode - This is an extended version of TargetLowering::AddrMode - /// which holds actual Value*'s for register values. - struct ExtAddrMode : public TargetLowering::AddrMode { - Value *BaseReg; - Value *ScaledReg; - ExtAddrMode() : BaseReg(0), ScaledReg(0) {} - void print(OStream &OS) const; - void dump() const { - print(cerr); - cerr << '\n'; - } - }; -} // end anonymous namespace - -static OStream &operator<<(OStream &OS, const ExtAddrMode &AM) { - AM.print(OS); - return OS; -} - -void ExtAddrMode::print(OStream &OS) const { - bool NeedPlus = false; - OS << "["; - if (BaseGV) - OS << (NeedPlus ? " + " : "") - << "GV:%" << BaseGV->getName(), NeedPlus = true; - - if (BaseOffs) - OS << (NeedPlus ? " + " : "") << BaseOffs, NeedPlus = true; - - if (BaseReg) - OS << (NeedPlus ? " + " : "") - << "Base:%" << BaseReg->getName(), NeedPlus = true; - if (Scale) - OS << (NeedPlus ? " + " : "") - << Scale << "*%" << ScaledReg->getName(), NeedPlus = true; - - OS << ']'; -} - namespace { -/// AddressingModeMatcher - This class exposes a single public method, which is -/// used to construct a "maximal munch" of the addressing mode for the target -/// specified by TLI for an access to "V" with an access type of AccessTy. This -/// returns the addressing mode that is actually matched by value, but also -/// returns the list of instructions involved in that addressing computation in -/// AddrModeInsts. -class AddressingModeMatcher { - SmallVectorImpl &AddrModeInsts; - const TargetLowering &TLI; - const Type *AccessTy; - ExtAddrMode &AddrMode; - AddressingModeMatcher(SmallVectorImpl &AMI, - const TargetLowering &T, const Type *AT,ExtAddrMode &AM) - : AddrModeInsts(AMI), TLI(T), AccessTy(AT), AddrMode(AM) {} -public: - - static ExtAddrMode Match(Value *V, const Type *AccessTy, - SmallVectorImpl &AddrModeInsts, - const TargetLowering &TLI) { - ExtAddrMode Result; - - bool Success = - AddressingModeMatcher(AddrModeInsts,TLI,AccessTy,Result).MatchAddr(V, 0); - Success = Success; assert(Success && "Couldn't select *anything*?"); - return Result; +class CodeGenPrepareFortifiedLibCalls : public SimplifyFortifiedLibCalls { +protected: + void replaceCall(Value *With) { + CI->replaceAllUsesWith(With); + CI->eraseFromParent(); + } + bool isFoldable(unsigned SizeCIOp, unsigned, bool) const { + if (ConstantInt *SizeCI = + dyn_cast(CI->getArgOperand(SizeCIOp))) + return SizeCI->isAllOnesValue(); + return false; } -private: - bool MatchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); - bool MatchAddr(Value *V, unsigned Depth); - bool MatchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth); }; } // end anonymous namespace -/// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode. -/// Return true and update AddrMode if this addr mode is legal for the target, -/// false if not. -bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale, - unsigned Depth) { - // If Scale is 1, then this is the same as adding ScaleReg to the addressing - // mode. Just process that directly. - if (Scale == 1) - return MatchAddr(ScaleReg, Depth); - - // If the scale is 0, it takes nothing to add this. - if (Scale == 0) +bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) { + // Lower all uses of llvm.objectsize.* + IntrinsicInst *II = dyn_cast(CI); + if (II && II->getIntrinsicID() == Intrinsic::objectsize) { + bool Min = (cast(II->getArgOperand(1))->getZExtValue() == 1); + const Type *ReturnTy = CI->getType(); + Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); + CI->replaceAllUsesWith(RetVal); + CI->eraseFromParent(); return true; - - // If we already have a scale of this value, we can add to it, otherwise, we - // need an available scale field. - if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) - return false; - - ExtAddrMode TestAddrMode = AddrMode; - - // Add scale to turn X*4+X*3 -> X*7. This could also do things like - // [A+B + A*7] -> [B+A*8]. - TestAddrMode.Scale += Scale; - TestAddrMode.ScaledReg = ScaleReg; - - // If the new address isn't legal, bail out. - if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) - return false; - - // It was legal, so commit it. - AddrMode = TestAddrMode; - - // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now - // to see if ScaleReg is actually X+C. If so, we can turn this into adding - // X*Scale + C*Scale to addr mode. - ConstantInt *CI; Value *AddLHS; - if (match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { - TestAddrMode.ScaledReg = AddLHS; - TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; - - // If this addressing mode is legal, commit it and remember that we folded - // this instruction. - if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) { - AddrModeInsts.push_back(cast(ScaleReg)); - AddrMode = TestAddrMode; - return true; - } } - // Otherwise, not (x+c)*scale, just return what we have. - return true; -} - - -/// MatchOperationAddr - Given an instruction or constant expr, see if we can -/// fold the operation into the addressing mode. If so, update the addressing -/// mode and return true, otherwise return false without modifying AddrMode. -bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode, - unsigned Depth) { - // Avoid exponential behavior on extremely deep expression trees. - if (Depth >= 5) return false; + // From here on out we're working with named functions. + if (CI->getCalledFunction() == 0) return false; - switch (Opcode) { - case Instruction::PtrToInt: - // PtrToInt is always a noop, as we know that the int type is pointer sized. - return MatchAddr(AddrInst->getOperand(0), Depth); - case Instruction::IntToPtr: - // This inttoptr is a no-op if the integer type is pointer sized. - if (TLI.getValueType(AddrInst->getOperand(0)->getType()) == - TLI.getPointerTy()) - return MatchAddr(AddrInst->getOperand(0), Depth); - return false; - case Instruction::BitCast: - // BitCast is always a noop, and we can handle it as long as it is - // int->int or pointer->pointer (we don't want int<->fp or something). - if ((isa(AddrInst->getOperand(0)->getType()) || - isa(AddrInst->getOperand(0)->getType())) && - // Don't touch identity bitcasts. These were probably put here by LSR, - // and we don't want to mess around with them. Assume it knows what it - // is doing. - AddrInst->getOperand(0)->getType() != AddrInst->getType()) - return MatchAddr(AddrInst->getOperand(0), Depth); - return false; - case Instruction::Add: { - // Check to see if we can merge in the RHS then the LHS. If so, we win. - ExtAddrMode BackupAddrMode = AddrMode; - unsigned OldSize = AddrModeInsts.size(); - if (MatchAddr(AddrInst->getOperand(1), Depth+1) && - MatchAddr(AddrInst->getOperand(0), Depth+1)) - return true; - - // Restore the old addr mode info. - AddrMode = BackupAddrMode; - AddrModeInsts.resize(OldSize); - - // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. - if (MatchAddr(AddrInst->getOperand(0), Depth+1) && - MatchAddr(AddrInst->getOperand(1), Depth+1)) - return true; - - // Otherwise we definitely can't merge the ADD in. - AddrMode = BackupAddrMode; - AddrModeInsts.resize(OldSize); - break; - } - case Instruction::Or: { - //ConstantInt *RHS = dyn_cast(AddrInst->getOperand(1)); - //if (!RHS) break; - // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. - break; - } - case Instruction::Mul: - case Instruction::Shl: { - // Can only handle X*C and X << C. - ConstantInt *RHS = dyn_cast(AddrInst->getOperand(1)); - if (!RHS) return false; - int64_t Scale = RHS->getSExtValue(); - if (Opcode == Instruction::Shl) - Scale = 1 << Scale; - - return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth); - } - case Instruction::GetElementPtr: { - // Scan the GEP. We check it if it contains constant offsets and at most - // one variable offset. - int VariableOperand = -1; - unsigned VariableScale = 0; - - int64_t ConstantOffset = 0; - const TargetData *TD = TLI.getTargetData(); - gep_type_iterator GTI = gep_type_begin(AddrInst); - for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { - if (const StructType *STy = dyn_cast(*GTI)) { - const StructLayout *SL = TD->getStructLayout(STy); - unsigned Idx = - cast(AddrInst->getOperand(i))->getZExtValue(); - ConstantOffset += SL->getElementOffset(Idx); - } else { - uint64_t TypeSize = TD->getABITypeSize(GTI.getIndexedType()); - if (ConstantInt *CI = dyn_cast(AddrInst->getOperand(i))) { - ConstantOffset += CI->getSExtValue()*TypeSize; - } else if (TypeSize) { // Scales of zero don't do anything. - // We only allow one variable index at the moment. - if (VariableOperand != -1) - return false; - - // Remember the variable index. - VariableOperand = i; - VariableScale = TypeSize; - } - } - } - - // A common case is for the GEP to only do a constant offset. In this case, - // just add it to the disp field and check validity. - if (VariableOperand == -1) { - AddrMode.BaseOffs += ConstantOffset; - if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){ - // Check to see if we can fold the base pointer in too. - if (MatchAddr(AddrInst->getOperand(0), Depth+1)) - return true; - } - AddrMode.BaseOffs -= ConstantOffset; - return false; - } - - // Save the valid addressing mode in case we can't match. - ExtAddrMode BackupAddrMode = AddrMode; - - // Check that this has no base reg yet. If so, we won't have a place to - // put the base of the GEP (assuming it is not a null ptr). - bool SetBaseReg = true; - if (isa(AddrInst->getOperand(0))) - SetBaseReg = false; // null pointer base doesn't need representation. - else if (AddrMode.HasBaseReg) - return false; // Base register already specified, can't match GEP. - else { - // Otherwise, we'll use the GEP base as the BaseReg. - AddrMode.HasBaseReg = true; - AddrMode.BaseReg = AddrInst->getOperand(0); - } - - // See if the scale and offset amount is valid for this target. - AddrMode.BaseOffs += ConstantOffset; - - if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, - Depth)) { - AddrMode = BackupAddrMode; - return false; - } - - // If we have a null as the base of the GEP, folding in the constant offset - // plus variable scale is all we can do. - if (!SetBaseReg) return true; - - // If this match succeeded, we know that we can form an address with the - // GepBase as the basereg. Match the base pointer of the GEP more - // aggressively by zeroing out BaseReg and rematching. If the base is - // (for example) another GEP, this allows merging in that other GEP into - // the addressing mode we're forming. - AddrMode.HasBaseReg = false; - AddrMode.BaseReg = 0; - bool Success = MatchAddr(AddrInst->getOperand(0), Depth+1); - assert(Success && "MatchAddr should be able to fill in BaseReg!"); - Success=Success; - return true; - } - } - return false; -} - -/// MatchAddr - If we can, try to add the value of 'Addr' into the current -/// addressing mode. If Addr can't be added to AddrMode this returns false and -/// leaves AddrMode unmodified. This assumes that Addr is either a pointer type -/// or intptr_t for the target. -/// -bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) { - if (ConstantInt *CI = dyn_cast(Addr)) { - // Fold in immediates if legal for the target. - AddrMode.BaseOffs += CI->getSExtValue(); - if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) - return true; - AddrMode.BaseOffs -= CI->getSExtValue(); - } else if (GlobalValue *GV = dyn_cast(Addr)) { - // If this is a global variable, try to fold it into the addressing mode. - if (AddrMode.BaseGV == 0) { - AddrMode.BaseGV = GV; - if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) - return true; - AddrMode.BaseGV = 0; - } - } else if (Instruction *I = dyn_cast(Addr)) { - if (MatchOperationAddr(I, I->getOpcode(), Depth)) { - AddrModeInsts.push_back(I); - return true; - } - } else if (ConstantExpr *CE = dyn_cast(Addr)) { - if (MatchOperationAddr(CE, CE->getOpcode(), Depth)) - return true; - } else if (isa(Addr)) { - // Null pointer gets folded without affecting the addressing mode. - return true; - } - - // Worse case, the target should support [reg] addressing modes. :) - if (!AddrMode.HasBaseReg) { - AddrMode.HasBaseReg = true; - // Still check for legality in case the target supports [imm] but not [i+r]. - if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) { - AddrMode.BaseReg = Addr; - return true; - } - AddrMode.HasBaseReg = false; - } - - // If the base register is already taken, see if we can do [r+r]. - if (AddrMode.Scale == 0) { - AddrMode.Scale = 1; - if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) { - AddrMode.ScaledReg = Addr; - return true; - } - AddrMode.Scale = 0; - } - // Couldn't match. - return false; + // We'll need TargetData from here on out. + const TargetData *TD = TLI ? TLI->getTargetData() : 0; + if (!TD) return false; + + // Lower all default uses of _chk calls. This is very similar + // to what InstCombineCalls does, but here we are only lowering calls + // that have the default "don't know" as the objectsize. Anything else + // should be left alone. + CodeGenPrepareFortifiedLibCalls Simplifier; + return Simplifier.fold(CI, TD); } - - //===----------------------------------------------------------------------===// // Memory Optimization //===----------------------------------------------------------------------===// @@ -845,7 +594,7 @@ static bool IsNonLocalValue(Value *V, BasicBlock *BB) { return false; } -/// OptimizeMemoryInst - Load and Store Instructions have often have +/// OptimizeMemoryInst - Load and Store Instructions often have /// addressing modes that can do significant amounts of computation. As such, /// instruction selection will try to get the load or store to do as much /// computation as possible for the program. The problem is that isel can only @@ -854,19 +603,19 @@ static bool IsNonLocalValue(Value *V, BasicBlock *BB) { /// /// This method is used to optimize both load/store and inline asms with memory /// operands. -bool CodeGenPrepare::OptimizeMemoryInst(Instruction *LdStInst, Value *Addr, +bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, const Type *AccessTy, DenseMap &SunkAddrs) { // Figure out what addressing mode will be built up for this operation. SmallVector AddrModeInsts; - ExtAddrMode AddrMode = - AddressingModeMatcher::Match(Addr, AccessTy, AddrModeInsts, *TLI); + ExtAddrMode AddrMode = AddressingModeMatcher::Match(Addr, AccessTy,MemoryInst, + AddrModeInsts, *TLI); // Check to see if any of the instructions supersumed by this addr mode are // non-local to I's BB. bool AnyNonLocal = false; for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) { - if (IsNonLocalValue(AddrModeInsts[i], LdStInst->getParent())) { + if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) { AnyNonLocal = true; break; } @@ -874,14 +623,14 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *LdStInst, Value *Addr, // If all the instructions matched are already in this BB, don't do anything. if (!AnyNonLocal) { - DEBUG(cerr << "CGP: Found local addrmode: " << AddrMode << "\n"); + DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"); return false; } // Insert this computation right after this user. Since our caller is // scanning from the top of the BB to the bottom, reuse of the expr are // guaranteed to happen later. - BasicBlock::iterator InsertPt = LdStInst; + BasicBlock::iterator InsertPt = MemoryInst; // Now that we determined the addressing expression we want to use and know // that we have to sink it into this block. Check to see if we have already @@ -889,20 +638,39 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *LdStInst, Value *Addr, // computation. Value *&SunkAddr = SunkAddrs[Addr]; if (SunkAddr) { - DEBUG(cerr << "CGP: Reusing nonlocal addrmode: " << AddrMode << "\n"); + DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " + << *MemoryInst); if (SunkAddr->getType() != Addr->getType()) SunkAddr = new BitCastInst(SunkAddr, Addr->getType(), "tmp", InsertPt); } else { - DEBUG(cerr << "CGP: SINKING nonlocal addrmode: " << AddrMode << "\n"); - const Type *IntPtrTy = TLI->getTargetData()->getIntPtrType(); + DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " + << *MemoryInst); + const Type *IntPtrTy = + TLI->getTargetData()->getIntPtrType(AccessTy->getContext()); Value *Result = 0; - // Start with the scale value. + + // Start with the base register. Do this first so that subsequent address + // matching finds it last, which will prevent it from trying to match it + // as the scaled value in case it happens to be a mul. That would be + // problematic if we've sunk a different mul for the scale, because then + // we'd end up sinking both muls. + if (AddrMode.BaseReg) { + Value *V = AddrMode.BaseReg; + if (V->getType()->isPointerTy()) + V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt); + if (V->getType() != IntPtrTy) + V = CastInst::CreateIntegerCast(V, IntPtrTy, /*isSigned=*/true, + "sunkaddr", InsertPt); + Result = V; + } + + // Add the scale value. if (AddrMode.Scale) { Value *V = AddrMode.ScaledReg; if (V->getType() == IntPtrTy) { // done. - } else if (isa(V->getType())) { + } else if (V->getType()->isPointerTy()) { V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt); } else if (cast(IntPtrTy)->getBitWidth() < cast(V->getType())->getBitWidth()) { @@ -912,16 +680,8 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *LdStInst, Value *Addr, } if (AddrMode.Scale != 1) V = BinaryOperator::CreateMul(V, ConstantInt::get(IntPtrTy, - AddrMode.Scale), + AddrMode.Scale), "sunkaddr", InsertPt); - Result = V; - } - - // Add in the base register. - if (AddrMode.BaseReg) { - Value *V = AddrMode.BaseReg; - if (V->getType() != IntPtrTy) - V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt); if (Result) Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt); else @@ -953,10 +713,14 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *LdStInst, Value *Addr, SunkAddr = new IntToPtrInst(Result, Addr->getType(), "sunkaddr",InsertPt); } - LdStInst->replaceUsesOfWith(Addr, SunkAddr); + MemoryInst->replaceUsesOfWith(Addr, SunkAddr); - if (Addr->use_empty()) - EraseDeadInstructions(Addr); + if (Addr->use_empty()) { + RecursivelyDeleteTriviallyDeadInstructions(Addr); + // This address is now available for reassignment, so erase the table entry; + // we don't want to match some completely different instruction. + SunkAddrs[Addr] = 0; + } return true; } @@ -996,8 +760,7 @@ bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS, } // Compute the constraint code and ConstraintType to use. - TLI->ComputeConstraintToUse(OpInfo, SDValue(), - OpInfo.ConstraintType == TargetLowering::C_Memory); + TLI->ComputeConstraintToUse(OpInfo, SDValue()); if (OpInfo.ConstraintType == TargetLowering::C_Memory && OpInfo.isIndirect) { @@ -1009,6 +772,43 @@ bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS, return MadeChange; } +/// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same +/// basic block as the load, unless conditions are unfavorable. This allows +/// SelectionDAG to fold the extend into the load. +/// +bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) { + // Look for a load being extended. + LoadInst *LI = dyn_cast(I->getOperand(0)); + if (!LI) return false; + + // If they're already in the same block, there's nothing to do. + if (LI->getParent() == I->getParent()) + return false; + + // If the load has other users and the truncate is not free, this probably + // isn't worthwhile. + if (!LI->hasOneUse() && + TLI && !TLI->isTruncateFree(I->getType(), LI->getType())) + return false; + + // Check whether the target supports casts folded into loads. + unsigned LType; + if (isa(I)) + LType = ISD::ZEXTLOAD; + else { + assert(isa(I) && "Unexpected ext type!"); + LType = ISD::SEXTLOAD; + } + if (TLI && !TLI->isLoadExtLegal(LType, TLI->getValueType(LI->getType()))) + return false; + + // Move the extend into the same block as the load, so that SelectionDAG + // can fold it. + I->removeFromParent(); + I->insertAfter(LI); + return true; +} + bool CodeGenPrepare::OptimizeExtUses(Instruction *I) { BasicBlock *DefBB = I->getParent(); @@ -1090,17 +890,16 @@ bool CodeGenPrepare::OptimizeExtUses(Instruction *I) { bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { bool MadeChange = false; - // Split all critical edges where the dest block has a PHI and where the phi - // has shared immediate operands. + // Split all critical edges where the dest block has a PHI. TerminatorInst *BBTI = BB.getTerminator(); - if (BBTI->getNumSuccessors() > 1) { - for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i) - if (isa(BBTI->getSuccessor(i)->begin()) && - isCriticalEdge(BBTI, i, true)) - SplitEdgeNicely(BBTI, i, this); + if (BBTI->getNumSuccessors() > 1 && !isa(BBTI)) { + for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i) { + BasicBlock *SuccBB = BBTI->getSuccessor(i); + if (isa(SuccBB->begin()) && isCriticalEdge(BBTI, i, true)) + SplitEdgeNicely(BBTI, i, BackEdges, this); + } } - // Keep track of non-local addresses that have been sunk into this block. // This allows us to avoid inserting duplicate code for blocks with multiple // load/stores of the same address. @@ -1125,8 +924,10 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { MadeChange |= Change; } - if (!Change && (isa(I) || isa(I))) + if (!Change && (isa(I) || isa(I))) { + MadeChange |= MoveExtToFormExtLoad(I); MadeChange |= OptimizeExtUses(I); + } } else if (CmpInst *CI = dyn_cast(I)) { MadeChange |= OptimizeCmpExpression(CI); } else if (LoadInst *LI = dyn_cast(I)) { @@ -1151,15 +952,20 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { } else if (CallInst *CI = dyn_cast(I)) { // If we found an inline asm expession, and if the target knows how to // lower it to normal LLVM code, do so now. - if (TLI && isa(CI->getCalledValue())) - if (const TargetAsmInfo *TAI = - TLI->getTargetMachine().getTargetAsmInfo()) { - if (TAI->ExpandInlineAsm(CI)) - BBI = BB.begin(); - else - // Sink address computing for memory operands into the block. - MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs); - } + if (TLI && isa(CI->getCalledValue())) { + if (TLI->ExpandInlineAsm(CI)) { + BBI = BB.begin(); + // Avoid processing instructions out of order, which could cause + // reuse before a value is defined. + SunkAddrs.clear(); + } else + // Sink address computing for memory operands into the block. + MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs); + } else { + // Other CallInst optimizations that don't need to muck with the + // enclosing iterator here. + MadeChange |= OptimizeCallInst(CI); + } } }