1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass munges the code in the input function to better prepare it for
11 // SelectionDAG-based code generation. This works around limitations in it's
12 // basic-block-at-a-time approach. It should eventually be removed.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/CodeGen/Passes.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/InstructionSimplify.h"
24 #include "llvm/Analysis/MemoryLocation.h"
25 #include "llvm/Analysis/TargetLibraryInfo.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/CallSite.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/GetElementPtrTypeIterator.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/InlineAsm.h"
37 #include "llvm/IR/InstIterator.h"
38 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/IntrinsicInst.h"
41 #include "llvm/IR/MDBuilder.h"
42 #include "llvm/IR/NoFolder.h"
43 #include "llvm/IR/PatternMatch.h"
44 #include "llvm/IR/Statepoint.h"
45 #include "llvm/IR/ValueHandle.h"
46 #include "llvm/IR/ValueMap.h"
47 #include "llvm/Pass.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/Debug.h"
50 #include "llvm/Support/raw_ostream.h"
51 #include "llvm/Target/TargetLowering.h"
52 #include "llvm/Target/TargetSubtargetInfo.h"
53 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
54 #include "llvm/Transforms/Utils/BuildLibCalls.h"
55 #include "llvm/Transforms/Utils/BypassSlowDivision.h"
56 #include "llvm/Transforms/Utils/Local.h"
57 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
59 using namespace llvm::PatternMatch;
61 #define DEBUG_TYPE "codegenprepare"
63 STATISTIC(NumBlocksElim, "Number of blocks eliminated");
64 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
65 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
66 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
68 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
70 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
71 "computations were sunk");
72 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
73 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
74 STATISTIC(NumAndsAdded,
75 "Number of and mask instructions added to form ext loads");
76 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized");
77 STATISTIC(NumRetsDup, "Number of return instructions duplicated");
78 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
79 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
80 STATISTIC(NumAndCmpsMoved, "Number of and/cmp's pushed into branches");
81 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed");
83 static cl::opt<bool> DisableBranchOpts(
84 "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
85 cl::desc("Disable branch optimizations in CodeGenPrepare"));
88 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false),
89 cl::desc("Disable GC optimizations in CodeGenPrepare"));
91 static cl::opt<bool> DisableSelectToBranch(
92 "disable-cgp-select2branch", cl::Hidden, cl::init(false),
93 cl::desc("Disable select to branch conversion."));
95 static cl::opt<bool> AddrSinkUsingGEPs(
96 "addr-sink-using-gep", cl::Hidden, cl::init(false),
97 cl::desc("Address sinking in CGP using GEPs."));
99 static cl::opt<bool> EnableAndCmpSinking(
100 "enable-andcmp-sinking", cl::Hidden, cl::init(true),
101 cl::desc("Enable sinkinig and/cmp into branches."));
103 static cl::opt<bool> DisableStoreExtract(
104 "disable-cgp-store-extract", cl::Hidden, cl::init(false),
105 cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
107 static cl::opt<bool> StressStoreExtract(
108 "stress-cgp-store-extract", cl::Hidden, cl::init(false),
109 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
111 static cl::opt<bool> DisableExtLdPromotion(
112 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
113 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
116 static cl::opt<bool> StressExtLdPromotion(
117 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
118 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
119 "optimization in CodeGenPrepare"));
122 typedef SmallPtrSet<Instruction *, 16> SetOfInstrs;
123 typedef PointerIntPair<Type *, 1, bool> TypeIsSExt;
124 typedef DenseMap<Instruction *, TypeIsSExt> InstrToOrigTy;
125 class TypePromotionTransaction;
127 class CodeGenPrepare : public FunctionPass {
128 const TargetMachine *TM;
129 const TargetLowering *TLI;
130 const TargetTransformInfo *TTI;
131 const TargetLibraryInfo *TLInfo;
133 /// As we scan instructions optimizing them, this is the next instruction
134 /// to optimize. Transforms that can invalidate this should update it.
135 BasicBlock::iterator CurInstIterator;
137 /// Keeps track of non-local addresses that have been sunk into a block.
138 /// This allows us to avoid inserting duplicate code for blocks with
139 /// multiple load/stores of the same address.
140 ValueMap<Value*, Value*> SunkAddrs;
142 /// Keeps track of all instructions inserted for the current function.
143 SetOfInstrs InsertedInsts;
144 /// Keeps track of the type of the related instruction before their
145 /// promotion for the current function.
146 InstrToOrigTy PromotedInsts;
148 /// True if CFG is modified in any way.
151 /// True if optimizing for size.
154 /// DataLayout for the Function being processed.
155 const DataLayout *DL;
158 static char ID; // Pass identification, replacement for typeid
159 explicit CodeGenPrepare(const TargetMachine *TM = nullptr)
160 : FunctionPass(ID), TM(TM), TLI(nullptr), TTI(nullptr), DL(nullptr) {
161 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
163 bool runOnFunction(Function &F) override;
165 const char *getPassName() const override { return "CodeGen Prepare"; }
167 void getAnalysisUsage(AnalysisUsage &AU) const override {
168 AU.addPreserved<DominatorTreeWrapperPass>();
169 AU.addRequired<TargetLibraryInfoWrapperPass>();
170 AU.addRequired<TargetTransformInfoWrapperPass>();
174 bool eliminateFallThrough(Function &F);
175 bool eliminateMostlyEmptyBlocks(Function &F);
176 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
177 void eliminateMostlyEmptyBlock(BasicBlock *BB);
178 bool optimizeBlock(BasicBlock &BB, bool& ModifiedDT);
179 bool optimizeInst(Instruction *I, bool& ModifiedDT);
180 bool optimizeMemoryInst(Instruction *I, Value *Addr,
181 Type *AccessTy, unsigned AS);
182 bool optimizeInlineAsmInst(CallInst *CS);
183 bool optimizeCallInst(CallInst *CI, bool& ModifiedDT);
184 bool moveExtToFormExtLoad(Instruction *&I);
185 bool optimizeExtUses(Instruction *I);
186 bool optimizeLoadExt(LoadInst *I);
187 bool optimizeSelectInst(SelectInst *SI);
188 bool optimizeShuffleVectorInst(ShuffleVectorInst *SI);
189 bool optimizeSwitchInst(SwitchInst *CI);
190 bool optimizeExtractElementInst(Instruction *Inst);
191 bool dupRetToEnableTailCallOpts(BasicBlock *BB);
192 bool placeDbgValues(Function &F);
193 bool sinkAndCmp(Function &F);
194 bool extLdPromotion(TypePromotionTransaction &TPT, LoadInst *&LI,
196 const SmallVectorImpl<Instruction *> &Exts,
197 unsigned CreatedInstCost);
198 bool splitBranchCondition(Function &F);
199 bool simplifyOffsetableRelocate(Instruction &I);
200 void stripInvariantGroupMetadata(Instruction &I);
204 char CodeGenPrepare::ID = 0;
205 INITIALIZE_TM_PASS(CodeGenPrepare, "codegenprepare",
206 "Optimize for code generation", false, false)
208 FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) {
209 return new CodeGenPrepare(TM);
214 bool StoreAddressDependOnValue(StoreInst* SI, Value* DepVal);
215 Value* GetUntaintedAddress(Value* CurrentAddress);
217 // The depth we trace down a variable to look for its dependence set.
218 const unsigned kDependenceDepth = 4;
220 // Recursively looks for variables that 'Val' depends on at the given depth
221 // 'Depth', and adds them in 'DepSet'. If 'InsertOnlyLeafNodes' is true, only
222 // inserts the leaf node values; otherwise, all visited nodes are included in
223 // 'DepSet'. Note that constants will be ignored.
224 template <typename SetType>
225 void recursivelyFindDependence(SetType* DepSet, Value* Val,
226 bool InsertOnlyLeafNodes = false,
227 unsigned Depth = kDependenceDepth) {
228 if (Val == nullptr) {
231 if (!InsertOnlyLeafNodes && !isa<Constant>(Val)) {
235 // Cannot go deeper. Insert the leaf nodes.
236 if (InsertOnlyLeafNodes && !isa<Constant>(Val)) {
242 // Go one step further to explore the dependence of the operands.
243 Instruction* I = nullptr;
244 if ((I = dyn_cast<Instruction>(Val))) {
245 if (isa<LoadInst>(I)) {
246 // A load is considerd the leaf load of the dependence tree. Done.
249 } else if (I->isBinaryOp()) {
250 BinaryOperator* I = dyn_cast<BinaryOperator>(Val);
251 Value *Op0 = I->getOperand(0), *Op1 = I->getOperand(1);
252 recursivelyFindDependence(DepSet, Op0, Depth - 1);
253 recursivelyFindDependence(DepSet, Op1, Depth - 1);
254 } else if (I->isCast()) {
255 Value* Op0 = I->getOperand(0);
256 recursivelyFindDependence(DepSet, Op0, Depth - 1);
257 } else if (I->getOpcode() == Instruction::Select) {
258 Value* Op0 = I->getOperand(0);
259 Value* Op1 = I->getOperand(1);
260 Value* Op2 = I->getOperand(2);
261 recursivelyFindDependence(DepSet, Op0, Depth - 1);
262 recursivelyFindDependence(DepSet, Op1, Depth - 1);
263 recursivelyFindDependence(DepSet, Op2, Depth - 1);
264 } else if (I->getOpcode() == Instruction::GetElementPtr) {
265 for (unsigned i = 0; i < I->getNumOperands(); i++) {
266 recursivelyFindDependence(DepSet, I->getOperand(i), Depth - 1);
268 } else if (I->getOpcode() == Instruction::Store) {
269 auto* SI = dyn_cast<StoreInst>(Val);
270 recursivelyFindDependence(DepSet, SI->getPointerOperand(), Depth - 1);
271 recursivelyFindDependence(DepSet, SI->getValueOperand(), Depth - 1);
273 Value* Op0 = nullptr;
274 Value* Op1 = nullptr;
275 switch (I->getOpcode()) {
276 case Instruction::ICmp:
277 case Instruction::FCmp: {
278 Op0 = I->getOperand(0);
279 Op1 = I->getOperand(1);
280 recursivelyFindDependence(DepSet, Op0, Depth - 1);
281 recursivelyFindDependence(DepSet, Op1, Depth - 1);
285 // Be conservative. Add it and be done with it.
291 } else if (isa<Constant>(Val)) {
292 // Not interested in constant values. Done.
295 // Be conservative. Add it and be done with it.
301 // Helper function to create a Cast instruction.
302 Value* createCast(IRBuilder<true, NoFolder>& Builder, Value* DepVal,
303 Type* TargetIntegerType) {
304 Instruction::CastOps CastOp = Instruction::BitCast;
305 switch (DepVal->getType()->getTypeID()) {
306 case Type::IntegerTyID: {
307 CastOp = Instruction::SExt;
310 case Type::FloatTyID:
311 case Type::DoubleTyID: {
312 CastOp = Instruction::FPToSI;
315 case Type::PointerTyID: {
316 CastOp = Instruction::PtrToInt;
322 return Builder.CreateCast(CastOp, DepVal, TargetIntegerType);
325 // Given a value, if it's a tainted address, this function returns the
326 // instruction that ORs the "dependence value" with the "original address".
327 // Otherwise, returns nullptr. This instruction is the first OR instruction
328 // where one of its operand is an AND instruction with an operand being 0.
330 // E.g., it returns '%4 = or i32 %3, %2' given 'CurrentAddress' is '%5'.
331 // %0 = load i32, i32* @y, align 4, !tbaa !1
332 // %cmp = icmp ne i32 %0, 42 // <== this is like the condition
333 // %1 = sext i1 %cmp to i32
334 // %2 = ptrtoint i32* @x to i32
335 // %3 = and i32 %1, 0
336 // %4 = or i32 %3, %2
337 // %5 = inttoptr i32 %4 to i32*
338 // store i32 1, i32* %5, align 4
339 Instruction* getOrAddress(Value* CurrentAddress) {
340 // Is it a cast from integer to pointer type.
341 Instruction* OrAddress = nullptr;
342 Instruction* AndDep = nullptr;
343 Instruction* CastToInt = nullptr;
344 Value* ActualAddress = nullptr;
345 Constant* ZeroConst = nullptr;
347 const Instruction* CastToPtr = dyn_cast<Instruction>(CurrentAddress);
348 if (CastToPtr && CastToPtr->getOpcode() == Instruction::IntToPtr) {
349 // Is it an OR instruction: %1 = or %and, %actualAddress.
350 if ((OrAddress = dyn_cast<Instruction>(CastToPtr->getOperand(0))) &&
351 OrAddress->getOpcode() == Instruction::Or) {
352 // The first operand should be and AND instruction.
353 AndDep = dyn_cast<Instruction>(OrAddress->getOperand(0));
354 if (AndDep && AndDep->getOpcode() == Instruction::And) {
355 // Also make sure its first operand of the "AND" is 0, or the "AND" is
356 // marked explicitly by "NoInstCombine".
357 if ((ZeroConst = dyn_cast<Constant>(AndDep->getOperand(1))) &&
358 ZeroConst->isNullValue()) {
364 // Looks like it's not been tainted.
368 // Given a value, if it's a tainted address, this function returns the
369 // instruction that taints the "dependence value". Otherwise, returns nullptr.
370 // This instruction is the last AND instruction where one of its operand is 0.
371 // E.g., it returns '%3' given 'CurrentAddress' is '%5'.
372 // %0 = load i32, i32* @y, align 4, !tbaa !1
373 // %cmp = icmp ne i32 %0, 42 // <== this is like the condition
374 // %1 = sext i1 %cmp to i32
375 // %2 = ptrtoint i32* @x to i32
376 // %3 = and i32 %1, 0
377 // %4 = or i32 %3, %2
378 // %5 = inttoptr i32 %4 to i32*
379 // store i32 1, i32* %5, align 4
380 Instruction* getAndDependence(Value* CurrentAddress) {
381 // If 'CurrentAddress' is tainted, get the OR instruction.
382 auto* OrAddress = getOrAddress(CurrentAddress);
383 if (OrAddress == nullptr) {
387 // No need to check the operands.
388 auto* AndDepInst = dyn_cast<Instruction>(OrAddress->getOperand(0));
393 // Given a value, if it's a tainted address, this function returns
394 // the "dependence value", which is the first operand in the AND instruction.
395 // E.g., it returns '%1' given 'CurrentAddress' is '%5'.
396 // %0 = load i32, i32* @y, align 4, !tbaa !1
397 // %cmp = icmp ne i32 %0, 42 // <== this is like the condition
398 // %1 = sext i1 %cmp to i32
399 // %2 = ptrtoint i32* @x to i32
400 // %3 = and i32 %1, 0
401 // %4 = or i32 %3, %2
402 // %5 = inttoptr i32 %4 to i32*
403 // store i32 1, i32* %5, align 4
404 Value* getDependence(Value* CurrentAddress) {
405 auto* AndInst = getAndDependence(CurrentAddress);
406 if (AndInst == nullptr) {
409 return AndInst->getOperand(0);
412 // Given an address that has been tainted, returns the only condition it depends
413 // on, if any; otherwise, returns nullptr.
414 Value* getConditionDependence(Value* Address) {
415 auto* Dep = getDependence(Address);
416 if (Dep == nullptr) {
417 // 'Address' has not been dependence-tainted.
421 Value* Operand = Dep;
423 auto* Inst = dyn_cast<Instruction>(Operand);
424 if (Inst == nullptr) {
425 // Non-instruction type does not have condition dependence.
428 if (Inst->getOpcode() == Instruction::ICmp) {
431 if (Inst->getNumOperands() != 1) {
434 Operand = Inst->getOperand(0);
440 // Conservatively decides whether the dependence set of 'Val1' includes the
441 // dependence set of 'Val2'. If 'ExpandSecondValue' is false, we do not expand
442 // 'Val2' and use that single value as its dependence set.
443 // If it returns true, it means the dependence set of 'Val1' includes that of
444 // 'Val2'; otherwise, it only means we cannot conclusively decide it.
445 bool dependenceSetInclusion(Value* Val1, Value* Val2,
446 int Val1ExpandLevel = 2 * kDependenceDepth,
447 int Val2ExpandLevel = kDependenceDepth) {
448 typedef SmallSet<Value*, 8> IncludingSet;
449 typedef SmallSet<Value*, 4> IncludedSet;
451 IncludingSet DepSet1;
453 // Look for more depths for the including set.
454 recursivelyFindDependence(&DepSet1, Val1, false /*Insert all visited nodes*/,
456 recursivelyFindDependence(&DepSet2, Val2, true /*Only insert leaf nodes*/,
459 auto set_inclusion = [](IncludingSet FullSet, IncludedSet Subset) {
460 for (auto* Dep : Subset) {
461 if (0 == FullSet.count(Dep)) {
467 bool inclusion = set_inclusion(DepSet1, DepSet2);
468 DEBUG(dbgs() << "[dependenceSetInclusion]: " << inclusion << "\n");
469 DEBUG(dbgs() << "Including set for: " << *Val1 << "\n");
470 DEBUG(for (const auto* Dep : DepSet1) { dbgs() << "\t\t" << *Dep << "\n"; });
471 DEBUG(dbgs() << "Included set for: " << *Val2 << "\n");
472 DEBUG(for (const auto* Dep : DepSet2) { dbgs() << "\t\t" << *Dep << "\n"; });
477 // Recursively iterates through the operands spawned from 'DepVal'. If there
478 // exists a single value that 'DepVal' only depends on, we call that value the
479 // root dependence of 'DepVal' and return it. Otherwise, return 'DepVal'.
480 Value* getRootDependence(Value* DepVal) {
481 SmallSet<Value*, 8> DepSet;
482 for (unsigned depth = kDependenceDepth; depth > 0; --depth) {
483 recursivelyFindDependence(&DepSet, DepVal, true /*Only insert leaf nodes*/,
485 if (DepSet.size() == 1) {
486 return *DepSet.begin();
493 // This function actually taints 'DepVal' to the address to 'SI'. If the
495 // of 'SI' already depends on whatever 'DepVal' depends on, this function
496 // doesn't do anything and returns false. Otherwise, returns true.
498 // This effect forces the store and any stores that comes later to depend on
499 // 'DepVal'. For example, we have a condition "cond", and a store instruction
500 // "s: STORE addr, val". If we want "s" (and any later store) to depend on
501 // "cond", we do the following:
502 // %conv = sext i1 %cond to i32
503 // %addrVal = ptrtoint i32* %addr to i32
504 // %andCond = and i32 conv, 0;
505 // %orAddr = or i32 %andCond, %addrVal;
506 // %NewAddr = inttoptr i32 %orAddr to i32*;
508 // This is a more concrete example:
510 // %0 = load i32, i32* @y, align 4, !tbaa !1
511 // %cmp = icmp ne i32 %0, 42 // <== this is like the condition
512 // %1 = sext i1 %cmp to i32
513 // %2 = ptrtoint i32* @x to i32
514 // %3 = and i32 %1, 0
515 // %4 = or i32 %3, %2
516 // %5 = inttoptr i32 %4 to i32*
517 // store i32 1, i32* %5, align 4
518 bool taintStoreAddress(StoreInst* SI, Value* DepVal,
519 const char* calling_func = __builtin_FUNCTION()) {
520 DEBUG(dbgs() << "Called from " << calling_func << '\n');
521 IRBuilder<true, NoFolder> Builder(SI);
522 BasicBlock* BB = SI->getParent();
523 Value* Address = SI->getPointerOperand();
524 Type* TargetIntegerType =
525 IntegerType::get(Address->getContext(),
526 BB->getModule()->getDataLayout().getPointerSizeInBits());
528 // Does SI's address already depends on whatever 'DepVal' depends on?
529 if (StoreAddressDependOnValue(SI, DepVal)) {
533 // Figure out if there's a root variable 'DepVal' depends on. For example, we
534 // can extract "getelementptr inbounds %struct, %struct* %0, i64 0, i32 123"
535 // to be "%struct* %0" since all other operands are constant.
536 DepVal = getRootDependence(DepVal);
538 // Is this already a dependence-tainted store?
539 Value* OldDep = getDependence(Address);
541 // The address of 'SI' has already been tainted. Just need to absorb the
542 // DepVal to the existing dependence in the address of SI.
543 Instruction* AndDep = getAndDependence(Address);
544 IRBuilder<true, NoFolder> Builder(AndDep);
545 Value* NewDep = nullptr;
546 if (DepVal->getType() == AndDep->getType()) {
547 NewDep = Builder.CreateAnd(OldDep, DepVal);
549 NewDep = Builder.CreateAnd(
550 OldDep, createCast(Builder, DepVal, TargetIntegerType));
553 auto* NewDepInst = dyn_cast<Instruction>(NewDep);
555 // Use the new AND instruction as the dependence
556 AndDep->setOperand(0, NewDep);
560 // SI's address has not been tainted. Now taint it with 'DepVal'.
561 Value* CastDepToInt = createCast(Builder, DepVal, TargetIntegerType);
562 Value* PtrToIntCast = Builder.CreatePtrToInt(Address, TargetIntegerType);
564 Builder.CreateAnd(CastDepToInt, ConstantInt::get(TargetIntegerType, 0));
565 auto AndInst = dyn_cast<Instruction>(AndDepVal);
566 // XXX-comment: The original IR InstCombiner would change our and instruction
567 // to a select and then the back end optimize the condition out. We attach a
568 // flag to instructions and set it here to inform the InstCombiner to not to
569 // touch this and instruction at all.
570 Value* OrAddr = Builder.CreateOr(AndDepVal, PtrToIntCast);
571 Value* NewAddr = Builder.CreateIntToPtr(OrAddr, Address->getType());
573 DEBUG(dbgs() << "[taintStoreAddress]\n"
574 << "Original store: " << *SI << '\n');
575 SI->setOperand(1, NewAddr);
578 DEBUG(dbgs() << "\tTargetIntegerType: " << *TargetIntegerType << '\n'
579 << "\tCast dependence value to integer: " << *CastDepToInt
581 << "\tCast address to integer: " << *PtrToIntCast << '\n'
582 << "\tAnd dependence value: " << *AndDepVal << '\n'
583 << "\tOr address: " << *OrAddr << '\n'
584 << "\tCast or instruction to address: " << *NewAddr << "\n\n");
589 // Looks for the previous store in the if block --- 'BrBB', which makes the
590 // speculative store 'StoreToHoist' safe.
591 Value* getSpeculativeStoreInPrevBB(StoreInst* StoreToHoist, BasicBlock* BrBB) {
592 assert(StoreToHoist && "StoreToHoist must be a real store");
594 Value* StorePtr = StoreToHoist->getPointerOperand();
596 // Look for a store to the same pointer in BrBB.
597 for (BasicBlock::reverse_iterator RI = BrBB->rbegin(), RE = BrBB->rend();
599 Instruction* CurI = &*RI;
601 StoreInst* SI = dyn_cast<StoreInst>(CurI);
602 // Found the previous store make sure it stores to the same location.
603 // XXX-update: If the previous store's original untainted address are the
604 // same as 'StorePtr', we are also good to hoist the store.
605 if (SI && (SI->getPointerOperand() == StorePtr ||
606 GetUntaintedAddress(SI->getPointerOperand()) == StorePtr)) {
607 // Found the previous store, return its value operand.
613 "We should not reach here since this store is safe to speculate");
616 // XXX-comment: Returns true if it changes the code, false otherwise (the branch
617 // condition already depends on 'DepVal'.
618 bool taintConditionalBranch(BranchInst* BI, Value* DepVal) {
619 assert(BI->isConditional());
620 auto* Cond = BI->getOperand(0);
621 if (dependenceSetInclusion(Cond, DepVal)) {
622 // The dependence/ordering is self-evident.
626 IRBuilder<true, NoFolder> Builder(BI);
628 Builder.CreateAnd(DepVal, ConstantInt::get(DepVal->getType(), 0));
630 Builder.CreateTrunc(AndDep, IntegerType::get(DepVal->getContext(), 1));
631 auto* OrCond = Builder.CreateOr(TruncAndDep, Cond);
632 BI->setOperand(0, OrCond);
635 DEBUG(dbgs() << "\tTainted branch condition:\n" << *BI->getParent());
640 bool ConditionalBranchDependsOnValue(BranchInst* BI, Value* DepVal) {
641 assert(BI->isConditional());
642 auto* Cond = BI->getOperand(0);
643 return dependenceSetInclusion(Cond, DepVal);
646 // XXX-update: For a relaxed load 'LI', find the first immediate atomic store or
647 // the first conditional branch. Returns nullptr if there's no such immediately
648 // following store/branch instructions, which we can only enforce the load with
650 Instruction* findFirstStoreCondBranchInst(LoadInst* LI) {
651 // In some situations, relaxed loads can be left as is:
652 // 1. The relaxed load is used to calculate the address of the immediate
654 // 2. The relaxed load is used as a condition in the immediate following
655 // condition, and there are no stores in between. This is actually quite
657 // int r1 = x.load(relaxed);
659 // y.store(1, relaxed);
661 // However, in this function, we don't deal with them directly. Instead, we
662 // just find the immediate following store/condition branch and return it.
664 auto* BB = LI->getParent();
666 auto BBI = BasicBlock::iterator(LI);
669 for (; BBI != BE; BBI++) {
670 auto* Inst = dyn_cast<Instruction>(&*BBI);
671 if (Inst == nullptr) {
674 if (Inst->getOpcode() == Instruction::Store) {
676 } else if (Inst->getOpcode() == Instruction::Br) {
677 auto* BrInst = dyn_cast<BranchInst>(Inst);
678 if (BrInst->isConditional()) {
681 // Reinitialize iterators with the destination of the unconditional
683 BB = BrInst->getSuccessor(0);
696 // XXX-comment: Returns whether the code has been changed.
697 bool taintMonotonicLoads(const SmallVector<LoadInst*, 1>& MonotonicLoadInsts) {
698 bool Changed = false;
699 for (auto* LI : MonotonicLoadInsts) {
700 auto* FirstInst = findFirstStoreCondBranchInst(LI);
701 if (FirstInst == nullptr) {
702 // We don't seem to be able to taint a following store/conditional branch
703 // instruction. Simply make it acquire.
704 DEBUG(dbgs() << "[RelaxedLoad]: Transformed to acquire load\n"
706 LI->setOrdering(Acquire);
710 // Taint 'FirstInst', which could be a store or a condition branch
712 if (FirstInst->getOpcode() == Instruction::Store) {
713 Changed |= taintStoreAddress(dyn_cast<StoreInst>(FirstInst), LI);
714 } else if (FirstInst->getOpcode() == Instruction::Br) {
715 Changed |= taintConditionalBranch(dyn_cast<BranchInst>(FirstInst), LI);
717 assert(false && "findFirstStoreCondBranchInst() should return a "
718 "store/condition branch instruction");
724 // Inserts a fake conditional branch right after the instruction 'SplitInst',
725 // and the branch condition is 'Condition'. 'SplitInst' will be placed in the
726 // newly created block.
727 void AddFakeConditionalBranch(Instruction* SplitInst, Value* Condition) {
728 auto* BB = SplitInst->getParent();
729 TerminatorInst* ThenTerm = nullptr;
730 TerminatorInst* ElseTerm = nullptr;
731 SplitBlockAndInsertIfThenElse(Condition, SplitInst, &ThenTerm, &ElseTerm);
732 auto* ThenBB = ThenTerm->getParent();
733 auto* ElseBB = ElseTerm->getParent();
734 ThenBB->disableCanEliminateBlock();
735 ThenBB->disableCanEliminateBlock();
736 ThenBB->setName(BB->getName() + "Then.Fake");
737 ElseBB->setName(BB->getName() + "Else.Fake");
738 DEBUG(dbgs() << "Add fake conditional branch:\n"
740 << *ThenBB << "Else Block:\n"
744 // Returns true if the code is changed, and false otherwise.
745 void TaintRelaxedLoads(LoadInst* LI) {
746 IRBuilder<true, NoFolder> Builder(LI->getNextNode());
747 auto* FakeCondition = dyn_cast<Instruction>(Builder.CreateICmp(
748 CmpInst::ICMP_EQ, LI, Constant::getNullValue(LI->getType())));
749 AddFakeConditionalBranch(FakeCondition->getNextNode(), FakeCondition);
752 // XXX-comment: Returns whether the code has been changed.
753 bool AddsFakeConditionalBranchAfterMonotonicLoads(
754 const SmallVector<LoadInst*, 1>& MonotonicLoadInsts) {
755 bool Changed = false;
756 for (auto* LI : MonotonicLoadInsts) {
757 auto* FirstInst = findFirstStoreCondBranchInst(LI);
758 if (FirstInst != nullptr) {
759 if (FirstInst->getOpcode() == Instruction::Store) {
760 if (StoreAddressDependOnValue(dyn_cast<StoreInst>(FirstInst), LI)) {
763 } else if (FirstInst->getOpcode() == Instruction::Br) {
764 if (ConditionalBranchDependsOnValue(dyn_cast<BranchInst>(FirstInst),
769 dbgs() << "FirstInst=" << *FirstInst << "\n";
770 assert(false && "findFirstStoreCondBranchInst() should return a "
771 "store/condition branch instruction");
775 // We really need to process the relaxed load now.
776 TaintRelaxedLoads(LI);
782 /**** Implementations of public methods for dependence tainting ****/
783 Value* GetUntaintedAddress(Value* CurrentAddress) {
784 auto* OrAddress = getOrAddress(CurrentAddress);
785 if (OrAddress == nullptr) {
786 // Is it tainted by a select instruction?
787 auto* Inst = dyn_cast<Instruction>(CurrentAddress);
788 if (nullptr != Inst && Inst->getOpcode() == Instruction::Select) {
789 // A selection instruction.
790 if (Inst->getOperand(1) == Inst->getOperand(2)) {
791 return Inst->getOperand(1);
795 return CurrentAddress;
797 Value* ActualAddress = nullptr;
799 auto* CastToInt = dyn_cast<Instruction>(OrAddress->getOperand(1));
800 if (CastToInt && CastToInt->getOpcode() == Instruction::PtrToInt) {
801 return CastToInt->getOperand(0);
803 // This should be a IntToPtr constant expression.
804 ConstantExpr* PtrToIntExpr =
805 dyn_cast<ConstantExpr>(OrAddress->getOperand(1));
806 if (PtrToIntExpr && PtrToIntExpr->getOpcode() == Instruction::PtrToInt) {
807 return PtrToIntExpr->getOperand(0);
811 // Looks like it's not been dependence-tainted. Returns itself.
812 return CurrentAddress;
815 MemoryLocation GetUntaintedMemoryLocation(StoreInst* SI) {
817 SI->getAAMetadata(AATags);
818 const auto& DL = SI->getModule()->getDataLayout();
819 const auto* OriginalAddr = GetUntaintedAddress(SI->getPointerOperand());
820 DEBUG(if (OriginalAddr != SI->getPointerOperand()) {
821 dbgs() << "[GetUntaintedMemoryLocation]\n"
822 << "Storing address: " << *SI->getPointerOperand()
823 << "\nUntainted address: " << *OriginalAddr << "\n";
825 return MemoryLocation(OriginalAddr,
826 DL.getTypeStoreSize(SI->getValueOperand()->getType()),
830 bool TaintDependenceToStore(StoreInst* SI, Value* DepVal) {
831 if (dependenceSetInclusion(SI, DepVal)) {
835 bool tainted = taintStoreAddress(SI, DepVal);
840 bool TaintDependenceToStoreAddress(StoreInst* SI, Value* DepVal) {
841 if (dependenceSetInclusion(SI->getPointerOperand(), DepVal)) {
845 bool tainted = taintStoreAddress(SI, DepVal);
850 bool CompressTaintedStore(BasicBlock* BB) {
851 // This function looks for windows of adajcent stores in 'BB' that satisfy the
852 // following condition (and then do optimization):
853 // *Addr(d1) = v1, d1 is a condition and is the only dependence the store's
854 // address depends on && Dep(v1) includes Dep(d1);
855 // *Addr(d2) = v2, d2 is a condition and is the only dependnece the store's
856 // address depends on && Dep(v2) includes Dep(d2) &&
857 // Dep(d2) includes Dep(d1);
859 // *Addr(dN) = vN, dN is a condition and is the only dependence the store's
860 // address depends on && Dep(dN) includes Dep(d"N-1").
862 // As a result, Dep(dN) includes [Dep(d1) V ... V Dep(d"N-1")], so we can
863 // safely transform the above to the following. In between these stores, we
864 // can omit untainted stores to the same address 'Addr' since they internally
865 // have dependence on the previous stores on the same address.
870 for (auto BI = BB->begin(), BE = BB->end(); BI != BE; BI++) {
871 // Look for the first store in such a window of adajacent stores.
872 auto* FirstSI = dyn_cast<StoreInst>(&*BI);
877 // The first store in the window must be tainted.
878 auto* UntaintedAddress = GetUntaintedAddress(FirstSI->getPointerOperand());
879 if (UntaintedAddress == FirstSI->getPointerOperand()) {
883 // The first store's address must directly depend on and only depend on a
885 auto* FirstSIDepCond = getConditionDependence(FirstSI->getPointerOperand());
886 if (nullptr == FirstSIDepCond) {
890 // Dep(first store's storing value) includes Dep(tainted dependence).
891 if (!dependenceSetInclusion(FirstSI->getValueOperand(), FirstSIDepCond)) {
895 // Look for subsequent stores to the same address that satisfy the condition
896 // of "compressing the dependence".
897 SmallVector<StoreInst*, 8> AdajacentStores;
898 AdajacentStores.push_back(FirstSI);
899 auto BII = BasicBlock::iterator(FirstSI);
900 for (BII++; BII != BE; BII++) {
901 auto* CurrSI = dyn_cast<StoreInst>(&*BII);
903 if (BII->mayHaveSideEffects()) {
904 // Be conservative. Instructions with side effects are similar to
911 auto* OrigAddress = GetUntaintedAddress(CurrSI->getPointerOperand());
912 auto* CurrSIDepCond = getConditionDependence(CurrSI->getPointerOperand());
913 // All other stores must satisfy either:
914 // A. 'CurrSI' is an untainted store to the same address, or
915 // B. the combination of the following 5 subconditions:
917 // 2. Untainted address is the same as the group's address;
918 // 3. The address is tainted with a sole value which is a condition;
919 // 4. The storing value depends on the condition in 3.
920 // 5. The condition in 3 depends on the previous stores dependence
923 // Condition A. Should ignore this store directly.
924 if (OrigAddress == CurrSI->getPointerOperand() &&
925 OrigAddress == UntaintedAddress) {
928 // Check condition B.
929 Value* Cond = nullptr;
930 if (OrigAddress == CurrSI->getPointerOperand() ||
931 OrigAddress != UntaintedAddress || CurrSIDepCond == nullptr ||
932 !dependenceSetInclusion(CurrSI->getValueOperand(), CurrSIDepCond)) {
933 // Check condition 1, 2, 3 & 4.
937 // Check condition 5.
938 StoreInst* PrevSI = AdajacentStores[AdajacentStores.size() - 1];
939 auto* PrevSIDepCond = getConditionDependence(PrevSI->getPointerOperand());
940 assert(PrevSIDepCond &&
941 "Store in the group must already depend on a condtion");
942 if (!dependenceSetInclusion(CurrSIDepCond, PrevSIDepCond)) {
946 AdajacentStores.push_back(CurrSI);
949 if (AdajacentStores.size() == 1) {
950 // The outer loop should keep looking from the next store.
954 // Now we have such a group of tainted stores to the same address.
955 DEBUG(dbgs() << "[CompressTaintedStore]\n");
956 DEBUG(dbgs() << "Original BB\n");
957 DEBUG(dbgs() << *BB << '\n');
958 auto* LastSI = AdajacentStores[AdajacentStores.size() - 1];
959 for (unsigned i = 0; i < AdajacentStores.size() - 1; ++i) {
960 auto* SI = AdajacentStores[i];
962 // Use the original address for stores before the last one.
963 SI->setOperand(1, UntaintedAddress);
965 DEBUG(dbgs() << "Store address has been reversed: " << *SI << '\n';);
967 // XXX-comment: Try to make the last store use fewer registers.
968 // If LastSI's storing value is a select based on the condition with which
969 // its address is tainted, transform the tainted address to a select
970 // instruction, as follows:
971 // r1 = Select Cond ? A : B
976 // r1 = Select Cond ? A : B
977 // r2 = Select Cond ? Addr : Addr
979 // The idea is that both Select instructions depend on the same condition,
980 // so hopefully the backend can generate two cmov instructions for them (and
981 // this saves the number of registers needed).
982 auto* LastSIDep = getConditionDependence(LastSI->getPointerOperand());
983 auto* LastSIValue = dyn_cast<Instruction>(LastSI->getValueOperand());
984 if (LastSIValue && LastSIValue->getOpcode() == Instruction::Select &&
985 LastSIValue->getOperand(0) == LastSIDep) {
986 // XXX-comment: Maybe it's better for us to just leave it as an and/or
987 // dependence pattern.
989 IRBuilder<true, NoFolder> Builder(LastSI);
991 Builder.CreateSelect(LastSIDep, UntaintedAddress, UntaintedAddress);
992 LastSI->setOperand(1, Address);
993 DEBUG(dbgs() << "The last store becomes :" << *LastSI << "\n\n";);
1001 bool PassDependenceToStore(Value* OldAddress, StoreInst* NewStore) {
1002 Value* OldDep = getDependence(OldAddress);
1003 // Return false when there's no dependence to pass from the OldAddress.
1008 // No need to pass the dependence to NewStore's address if it already depends
1009 // on whatever 'OldAddress' depends on.
1010 if (StoreAddressDependOnValue(NewStore, OldDep)) {
1013 return taintStoreAddress(NewStore, OldAddress);
1016 SmallSet<Value*, 8> FindDependence(Value* Val) {
1017 SmallSet<Value*, 8> DepSet;
1018 recursivelyFindDependence(&DepSet, Val, true /*Only insert leaf nodes*/);
1022 bool StoreAddressDependOnValue(StoreInst* SI, Value* DepVal) {
1023 return dependenceSetInclusion(SI->getPointerOperand(), DepVal);
1026 bool StoreDependOnValue(StoreInst* SI, Value* Dep) {
1027 return dependenceSetInclusion(SI, Dep);
1034 bool CodeGenPrepare::runOnFunction(Function &F) {
1035 // XXX-comment: Delay dealing with relaxed loads in this function to avoid
1036 // further changes done by other passes (e.g., SimplifyCFG).
1038 // Collect all the relaxed loads.
1039 SmallVector<LoadInst*, 1> MonotonicLoadInsts;
1040 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
1041 if (I->isAtomic()) {
1042 switch (I->getOpcode()) {
1043 case Instruction::Load: {
1044 auto* LI = dyn_cast<LoadInst>(&*I);
1045 if (LI->getOrdering() == Monotonic) {
1046 MonotonicLoadInsts.push_back(LI);
1056 bool EverMadeChange =
1057 AddsFakeConditionalBranchAfterMonotonicLoads(MonotonicLoadInsts);
1059 if (skipOptnoneFunction(F))
1062 DL = &F.getParent()->getDataLayout();
1064 // Clear per function information.
1065 InsertedInsts.clear();
1066 PromotedInsts.clear();
1070 TLI = TM->getSubtargetImpl(F)->getTargetLowering();
1071 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1072 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1073 OptSize = F.optForSize();
1075 /// This optimization identifies DIV instructions that can be
1076 /// profitably bypassed and carried out with a shorter, faster divide.
1077 if (!OptSize && TLI && TLI->isSlowDivBypassed()) {
1078 const DenseMap<unsigned int, unsigned int> &BypassWidths =
1079 TLI->getBypassSlowDivWidths();
1080 BasicBlock* BB = &*F.begin();
1081 while (BB != nullptr) {
1082 // bypassSlowDivision may create new BBs, but we don't want to reapply the
1083 // optimization to those blocks.
1084 BasicBlock* Next = BB->getNextNode();
1085 EverMadeChange |= bypassSlowDivision(BB, BypassWidths);
1090 // Eliminate blocks that contain only PHI nodes and an
1091 // unconditional branch.
1092 EverMadeChange |= eliminateMostlyEmptyBlocks(F);
1094 // llvm.dbg.value is far away from the value then iSel may not be able
1095 // handle it properly. iSel will drop llvm.dbg.value if it can not
1096 // find a node corresponding to the value.
1097 EverMadeChange |= placeDbgValues(F);
1099 // If there is a mask, compare against zero, and branch that can be combined
1100 // into a single target instruction, push the mask and compare into branch
1101 // users. Do this before OptimizeBlock -> OptimizeInst ->
1102 // OptimizeCmpExpression, which perturbs the pattern being searched for.
1103 if (!DisableBranchOpts) {
1104 EverMadeChange |= sinkAndCmp(F);
1105 EverMadeChange |= splitBranchCondition(F);
1108 bool MadeChange = true;
1109 while (MadeChange) {
1111 for (Function::iterator I = F.begin(); I != F.end(); ) {
1112 BasicBlock *BB = &*I++;
1113 bool ModifiedDTOnIteration = false;
1114 MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration);
1116 // Restart BB iteration if the dominator tree of the Function was changed
1117 if (ModifiedDTOnIteration)
1120 EverMadeChange |= MadeChange;
1125 if (!DisableBranchOpts) {
1127 SmallPtrSet<BasicBlock*, 8> WorkList;
1128 for (BasicBlock &BB : F) {
1129 SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB));
1130 MadeChange |= ConstantFoldTerminator(&BB, true);
1131 if (!MadeChange) continue;
1133 for (SmallVectorImpl<BasicBlock*>::iterator
1134 II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
1135 if (pred_begin(*II) == pred_end(*II))
1136 WorkList.insert(*II);
1139 // Delete the dead blocks and any of their dead successors.
1140 MadeChange |= !WorkList.empty();
1141 while (!WorkList.empty()) {
1142 BasicBlock *BB = *WorkList.begin();
1144 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB));
1146 DeleteDeadBlock(BB);
1148 for (SmallVectorImpl<BasicBlock*>::iterator
1149 II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
1150 if (pred_begin(*II) == pred_end(*II))
1151 WorkList.insert(*II);
1154 // Merge pairs of basic blocks with unconditional branches, connected by
1156 if (EverMadeChange || MadeChange)
1157 MadeChange |= eliminateFallThrough(F);
1159 EverMadeChange |= MadeChange;
1162 if (!DisableGCOpts) {
1163 SmallVector<Instruction *, 2> Statepoints;
1164 for (BasicBlock &BB : F)
1165 for (Instruction &I : BB)
1166 if (isStatepoint(I))
1167 Statepoints.push_back(&I);
1168 for (auto &I : Statepoints)
1169 EverMadeChange |= simplifyOffsetableRelocate(*I);
1172 return EverMadeChange;
1175 /// Merge basic blocks which are connected by a single edge, where one of the
1176 /// basic blocks has a single successor pointing to the other basic block,
1177 /// which has a single predecessor.
1178 bool CodeGenPrepare::eliminateFallThrough(Function &F) {
1179 bool Changed = false;
1180 // Scan all of the blocks in the function, except for the entry block.
1181 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) {
1182 BasicBlock *BB = &*I++;
1183 // If the destination block has a single pred, then this is a trivial
1184 // edge, just collapse it.
1185 BasicBlock *SinglePred = BB->getSinglePredecessor();
1187 // Don't merge if BB's address is taken.
1188 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue;
1190 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
1191 if (Term && !Term->isConditional()) {
1193 DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n");
1194 // Remember if SinglePred was the entry block of the function.
1195 // If so, we will need to move BB back to the entry position.
1196 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
1197 MergeBasicBlockIntoOnlyPred(BB, nullptr);
1199 if (isEntry && BB != &BB->getParent()->getEntryBlock())
1200 BB->moveBefore(&BB->getParent()->getEntryBlock());
1202 // We have erased a block. Update the iterator.
1203 I = BB->getIterator();
1209 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an
1210 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
1211 /// edges in ways that are non-optimal for isel. Start by eliminating these
1212 /// blocks so we can split them the way we want them.
1213 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) {
1214 bool MadeChange = false;
1215 // Note that this intentionally skips the entry block.
1216 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) {
1217 BasicBlock *BB = &*I++;
1218 // XXX-disabled: Do not eliminate the added fake basic block.
1219 if (!BB->getCanEliminateBlock()) {
1223 // If this block doesn't end with an uncond branch, ignore it.
1224 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
1225 if (!BI || !BI->isUnconditional())
1228 // If the instruction before the branch (skipping debug info) isn't a phi
1229 // node, then other stuff is happening here.
1230 BasicBlock::iterator BBI = BI->getIterator();
1231 if (BBI != BB->begin()) {
1233 while (isa<DbgInfoIntrinsic>(BBI)) {
1234 if (BBI == BB->begin())
1238 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
1242 // Do not break infinite loops.
1243 BasicBlock *DestBB = BI->getSuccessor(0);
1247 if (!canMergeBlocks(BB, DestBB))
1250 eliminateMostlyEmptyBlock(BB);
1256 /// Return true if we can merge BB into DestBB if there is a single
1257 /// unconditional branch between them, and BB contains no other non-phi
1259 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB,
1260 const BasicBlock *DestBB) const {
1261 // We only want to eliminate blocks whose phi nodes are used by phi nodes in
1262 // the successor. If there are more complex condition (e.g. preheaders),
1263 // don't mess around with them.
1264 BasicBlock::const_iterator BBI = BB->begin();
1265 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
1266 for (const User *U : PN->users()) {
1267 const Instruction *UI = cast<Instruction>(U);
1268 if (UI->getParent() != DestBB || !isa<PHINode>(UI))
1270 // If User is inside DestBB block and it is a PHINode then check
1271 // incoming value. If incoming value is not from BB then this is
1272 // a complex condition (e.g. preheaders) we want to avoid here.
1273 if (UI->getParent() == DestBB) {
1274 if (const PHINode *UPN = dyn_cast<PHINode>(UI))
1275 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
1276 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
1277 if (Insn && Insn->getParent() == BB &&
1278 Insn->getParent() != UPN->getIncomingBlock(I))
1285 // If BB and DestBB contain any common predecessors, then the phi nodes in BB
1286 // and DestBB may have conflicting incoming values for the block. If so, we
1287 // can't merge the block.
1288 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
1289 if (!DestBBPN) return true; // no conflict.
1291 // Collect the preds of BB.
1292 SmallPtrSet<const BasicBlock*, 16> BBPreds;
1293 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
1294 // It is faster to get preds from a PHI than with pred_iterator.
1295 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1296 BBPreds.insert(BBPN->getIncomingBlock(i));
1298 BBPreds.insert(pred_begin(BB), pred_end(BB));
1301 // Walk the preds of DestBB.
1302 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
1303 BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
1304 if (BBPreds.count(Pred)) { // Common predecessor?
1305 BBI = DestBB->begin();
1306 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
1307 const Value *V1 = PN->getIncomingValueForBlock(Pred);
1308 const Value *V2 = PN->getIncomingValueForBlock(BB);
1310 // If V2 is a phi node in BB, look up what the mapped value will be.
1311 if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
1312 if (V2PN->getParent() == BB)
1313 V2 = V2PN->getIncomingValueForBlock(Pred);
1315 // If there is a conflict, bail out.
1316 if (V1 != V2) return false;
1325 /// Eliminate a basic block that has only phi's and an unconditional branch in
1327 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
1328 BranchInst *BI = cast<BranchInst>(BB->getTerminator());
1329 BasicBlock *DestBB = BI->getSuccessor(0);
1331 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB);
1333 // If the destination block has a single pred, then this is a trivial edge,
1334 // just collapse it.
1335 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
1336 if (SinglePred != DestBB) {
1337 // Remember if SinglePred was the entry block of the function. If so, we
1338 // will need to move BB back to the entry position.
1339 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
1340 MergeBasicBlockIntoOnlyPred(DestBB, nullptr);
1342 if (isEntry && BB != &BB->getParent()->getEntryBlock())
1343 BB->moveBefore(&BB->getParent()->getEntryBlock());
1345 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
1350 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB
1351 // to handle the new incoming edges it is about to have.
1353 for (BasicBlock::iterator BBI = DestBB->begin();
1354 (PN = dyn_cast<PHINode>(BBI)); ++BBI) {
1355 // Remove the incoming value for BB, and remember it.
1356 Value *InVal = PN->removeIncomingValue(BB, false);
1358 // Two options: either the InVal is a phi node defined in BB or it is some
1359 // value that dominates BB.
1360 PHINode *InValPhi = dyn_cast<PHINode>(InVal);
1361 if (InValPhi && InValPhi->getParent() == BB) {
1362 // Add all of the input values of the input PHI as inputs of this phi.
1363 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
1364 PN->addIncoming(InValPhi->getIncomingValue(i),
1365 InValPhi->getIncomingBlock(i));
1367 // Otherwise, add one instance of the dominating value for each edge that
1368 // we will be adding.
1369 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
1370 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1371 PN->addIncoming(InVal, BBPN->getIncomingBlock(i));
1373 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
1374 PN->addIncoming(InVal, *PI);
1379 // The PHIs are now updated, change everything that refers to BB to use
1380 // DestBB and remove BB.
1381 BB->replaceAllUsesWith(DestBB);
1382 BB->eraseFromParent();
1385 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
1388 // Computes a map of base pointer relocation instructions to corresponding
1389 // derived pointer relocation instructions given a vector of all relocate calls
1390 static void computeBaseDerivedRelocateMap(
1391 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls,
1392 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>>
1394 // Collect information in two maps: one primarily for locating the base object
1395 // while filling the second map; the second map is the final structure holding
1396 // a mapping between Base and corresponding Derived relocate calls
1397 DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap;
1398 for (auto *ThisRelocate : AllRelocateCalls) {
1399 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(),
1400 ThisRelocate->getDerivedPtrIndex());
1401 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate));
1403 for (auto &Item : RelocateIdxMap) {
1404 std::pair<unsigned, unsigned> Key = Item.first;
1405 if (Key.first == Key.second)
1406 // Base relocation: nothing to insert
1409 GCRelocateInst *I = Item.second;
1410 auto BaseKey = std::make_pair(Key.first, Key.first);
1412 // We're iterating over RelocateIdxMap so we cannot modify it.
1413 auto MaybeBase = RelocateIdxMap.find(BaseKey);
1414 if (MaybeBase == RelocateIdxMap.end())
1415 // TODO: We might want to insert a new base object relocate and gep off
1416 // that, if there are enough derived object relocates.
1419 RelocateInstMap[MaybeBase->second].push_back(I);
1423 // Accepts a GEP and extracts the operands into a vector provided they're all
1424 // small integer constants
1425 static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP,
1426 SmallVectorImpl<Value *> &OffsetV) {
1427 for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
1428 // Only accept small constant integer operands
1429 auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i));
1430 if (!Op || Op->getZExtValue() > 20)
1434 for (unsigned i = 1; i < GEP->getNumOperands(); i++)
1435 OffsetV.push_back(GEP->getOperand(i));
1439 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to
1440 // replace, computes a replacement, and affects it.
1442 simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase,
1443 const SmallVectorImpl<GCRelocateInst *> &Targets) {
1444 bool MadeChange = false;
1445 for (GCRelocateInst *ToReplace : Targets) {
1446 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&
1447 "Not relocating a derived object of the original base object");
1448 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) {
1449 // A duplicate relocate call. TODO: coalesce duplicates.
1453 if (RelocatedBase->getParent() != ToReplace->getParent()) {
1454 // Base and derived relocates are in different basic blocks.
1455 // In this case transform is only valid when base dominates derived
1456 // relocate. However it would be too expensive to check dominance
1457 // for each such relocate, so we skip the whole transformation.
1461 Value *Base = ToReplace->getBasePtr();
1462 auto Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr());
1463 if (!Derived || Derived->getPointerOperand() != Base)
1466 SmallVector<Value *, 2> OffsetV;
1467 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV))
1470 // Create a Builder and replace the target callsite with a gep
1471 assert(RelocatedBase->getNextNode() && "Should always have one since it's not a terminator");
1473 // Insert after RelocatedBase
1474 IRBuilder<> Builder(RelocatedBase->getNextNode());
1475 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc());
1477 // If gc_relocate does not match the actual type, cast it to the right type.
1478 // In theory, there must be a bitcast after gc_relocate if the type does not
1479 // match, and we should reuse it to get the derived pointer. But it could be
1483 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
1488 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
1492 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
1493 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
1495 // In this case, we can not find the bitcast any more. So we insert a new bitcast
1496 // no matter there is already one or not. In this way, we can handle all cases, and
1497 // the extra bitcast should be optimized away in later passes.
1498 Value *ActualRelocatedBase = RelocatedBase;
1499 if (RelocatedBase->getType() != Base->getType()) {
1500 ActualRelocatedBase =
1501 Builder.CreateBitCast(RelocatedBase, Base->getType());
1503 Value *Replacement = Builder.CreateGEP(
1504 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV));
1505 Replacement->takeName(ToReplace);
1506 // If the newly generated derived pointer's type does not match the original derived
1507 // pointer's type, cast the new derived pointer to match it. Same reasoning as above.
1508 Value *ActualReplacement = Replacement;
1509 if (Replacement->getType() != ToReplace->getType()) {
1511 Builder.CreateBitCast(Replacement, ToReplace->getType());
1513 ToReplace->replaceAllUsesWith(ActualReplacement);
1514 ToReplace->eraseFromParent();
1524 // %ptr = gep %base + 15
1525 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1526 // %base' = relocate(%tok, i32 4, i32 4)
1527 // %ptr' = relocate(%tok, i32 4, i32 5)
1528 // %val = load %ptr'
1533 // %ptr = gep %base + 15
1534 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1535 // %base' = gc.relocate(%tok, i32 4, i32 4)
1536 // %ptr' = gep %base' + 15
1537 // %val = load %ptr'
1538 bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) {
1539 bool MadeChange = false;
1540 SmallVector<GCRelocateInst *, 2> AllRelocateCalls;
1542 for (auto *U : I.users())
1543 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U))
1544 // Collect all the relocate calls associated with a statepoint
1545 AllRelocateCalls.push_back(Relocate);
1547 // We need atleast one base pointer relocation + one derived pointer
1548 // relocation to mangle
1549 if (AllRelocateCalls.size() < 2)
1552 // RelocateInstMap is a mapping from the base relocate instruction to the
1553 // corresponding derived relocate instructions
1554 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap;
1555 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap);
1556 if (RelocateInstMap.empty())
1559 for (auto &Item : RelocateInstMap)
1560 // Item.first is the RelocatedBase to offset against
1561 // Item.second is the vector of Targets to replace
1562 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second);
1566 /// SinkCast - Sink the specified cast instruction into its user blocks
1567 static bool SinkCast(CastInst *CI) {
1568 BasicBlock *DefBB = CI->getParent();
1570 /// InsertedCasts - Only insert a cast in each block once.
1571 DenseMap<BasicBlock*, CastInst*> InsertedCasts;
1573 bool MadeChange = false;
1574 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
1576 Use &TheUse = UI.getUse();
1577 Instruction *User = cast<Instruction>(*UI);
1579 // Figure out which BB this cast is used in. For PHI's this is the
1580 // appropriate predecessor block.
1581 BasicBlock *UserBB = User->getParent();
1582 if (PHINode *PN = dyn_cast<PHINode>(User)) {
1583 UserBB = PN->getIncomingBlock(TheUse);
1586 // Preincrement use iterator so we don't invalidate it.
1589 // If the block selected to receive the cast is an EH pad that does not
1590 // allow non-PHI instructions before the terminator, we can't sink the
1592 if (UserBB->getTerminator()->isEHPad())
1595 // If this user is in the same block as the cast, don't change the cast.
1596 if (UserBB == DefBB) continue;
1598 // If we have already inserted a cast into this block, use it.
1599 CastInst *&InsertedCast = InsertedCasts[UserBB];
1601 if (!InsertedCast) {
1602 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1603 assert(InsertPt != UserBB->end());
1604 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0),
1605 CI->getType(), "", &*InsertPt);
1608 // Replace a use of the cast with a use of the new cast.
1609 TheUse = InsertedCast;
1614 // If we removed all uses, nuke the cast.
1615 if (CI->use_empty()) {
1616 CI->eraseFromParent();
1623 /// If the specified cast instruction is a noop copy (e.g. it's casting from
1624 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
1625 /// reduce the number of virtual registers that must be created and coalesced.
1627 /// Return true if any changes are made.
1629 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI,
1630 const DataLayout &DL) {
1631 // If this is a noop copy,
1632 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1633 EVT DstVT = TLI.getValueType(DL, CI->getType());
1635 // This is an fp<->int conversion?
1636 if (SrcVT.isInteger() != DstVT.isInteger())
1639 // If this is an extension, it will be a zero or sign extension, which
1641 if (SrcVT.bitsLT(DstVT)) return false;
1643 // If these values will be promoted, find out what they will be promoted
1644 // to. This helps us consider truncates on PPC as noop copies when they
1646 if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
1647 TargetLowering::TypePromoteInteger)
1648 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
1649 if (TLI.getTypeAction(CI->getContext(), DstVT) ==
1650 TargetLowering::TypePromoteInteger)
1651 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
1653 // If, after promotion, these are the same types, this is a noop copy.
1657 return SinkCast(CI);
1660 /// Try to combine CI into a call to the llvm.uadd.with.overflow intrinsic if
1663 /// Return true if any changes were made.
1664 static bool CombineUAddWithOverflow(CmpInst *CI) {
1668 m_UAddWithOverflow(m_Value(A), m_Value(B), m_Instruction(AddI))))
1671 Type *Ty = AddI->getType();
1672 if (!isa<IntegerType>(Ty))
1675 // We don't want to move around uses of condition values this late, so we we
1676 // check if it is legal to create the call to the intrinsic in the basic
1677 // block containing the icmp:
1679 if (AddI->getParent() != CI->getParent() && !AddI->hasOneUse())
1683 // Someday m_UAddWithOverflow may get smarter, but this is a safe assumption
1685 if (AddI->hasOneUse())
1686 assert(*AddI->user_begin() == CI && "expected!");
1689 Module *M = CI->getModule();
1690 Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, Ty);
1692 auto *InsertPt = AddI->hasOneUse() ? CI : AddI;
1694 auto *UAddWithOverflow =
1695 CallInst::Create(F, {A, B}, "uadd.overflow", InsertPt);
1696 auto *UAdd = ExtractValueInst::Create(UAddWithOverflow, 0, "uadd", InsertPt);
1698 ExtractValueInst::Create(UAddWithOverflow, 1, "overflow", InsertPt);
1700 CI->replaceAllUsesWith(Overflow);
1701 AddI->replaceAllUsesWith(UAdd);
1702 CI->eraseFromParent();
1703 AddI->eraseFromParent();
1707 /// Sink the given CmpInst into user blocks to reduce the number of virtual
1708 /// registers that must be created and coalesced. This is a clear win except on
1709 /// targets with multiple condition code registers (PowerPC), where it might
1710 /// lose; some adjustment may be wanted there.
1712 /// Return true if any changes are made.
1713 static bool SinkCmpExpression(CmpInst *CI) {
1714 BasicBlock *DefBB = CI->getParent();
1716 /// Only insert a cmp in each block once.
1717 DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
1719 bool MadeChange = false;
1720 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
1722 Use &TheUse = UI.getUse();
1723 Instruction *User = cast<Instruction>(*UI);
1725 // Preincrement use iterator so we don't invalidate it.
1728 // Don't bother for PHI nodes.
1729 if (isa<PHINode>(User))
1732 // Figure out which BB this cmp is used in.
1733 BasicBlock *UserBB = User->getParent();
1735 // If this user is in the same block as the cmp, don't change the cmp.
1736 if (UserBB == DefBB) continue;
1738 // If we have already inserted a cmp into this block, use it.
1739 CmpInst *&InsertedCmp = InsertedCmps[UserBB];
1742 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1743 assert(InsertPt != UserBB->end());
1745 CmpInst::Create(CI->getOpcode(), CI->getPredicate(),
1746 CI->getOperand(0), CI->getOperand(1), "", &*InsertPt);
1749 // Replace a use of the cmp with a use of the new cmp.
1750 TheUse = InsertedCmp;
1755 // If we removed all uses, nuke the cmp.
1756 if (CI->use_empty()) {
1757 CI->eraseFromParent();
1764 static bool OptimizeCmpExpression(CmpInst *CI) {
1765 if (SinkCmpExpression(CI))
1768 if (CombineUAddWithOverflow(CI))
1774 /// Check if the candidates could be combined with a shift instruction, which
1776 /// 1. Truncate instruction
1777 /// 2. And instruction and the imm is a mask of the low bits:
1778 /// imm & (imm+1) == 0
1779 static bool isExtractBitsCandidateUse(Instruction *User) {
1780 if (!isa<TruncInst>(User)) {
1781 if (User->getOpcode() != Instruction::And ||
1782 !isa<ConstantInt>(User->getOperand(1)))
1785 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue();
1787 if ((Cimm & (Cimm + 1)).getBoolValue())
1793 /// Sink both shift and truncate instruction to the use of truncate's BB.
1795 SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI,
1796 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts,
1797 const TargetLowering &TLI, const DataLayout &DL) {
1798 BasicBlock *UserBB = User->getParent();
1799 DenseMap<BasicBlock *, CastInst *> InsertedTruncs;
1800 TruncInst *TruncI = dyn_cast<TruncInst>(User);
1801 bool MadeChange = false;
1803 for (Value::user_iterator TruncUI = TruncI->user_begin(),
1804 TruncE = TruncI->user_end();
1805 TruncUI != TruncE;) {
1807 Use &TruncTheUse = TruncUI.getUse();
1808 Instruction *TruncUser = cast<Instruction>(*TruncUI);
1809 // Preincrement use iterator so we don't invalidate it.
1813 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode());
1817 // If the use is actually a legal node, there will not be an
1818 // implicit truncate.
1819 // FIXME: always querying the result type is just an
1820 // approximation; some nodes' legality is determined by the
1821 // operand or other means. There's no good way to find out though.
1822 if (TLI.isOperationLegalOrCustom(
1823 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true)))
1826 // Don't bother for PHI nodes.
1827 if (isa<PHINode>(TruncUser))
1830 BasicBlock *TruncUserBB = TruncUser->getParent();
1832 if (UserBB == TruncUserBB)
1835 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB];
1836 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
1838 if (!InsertedShift && !InsertedTrunc) {
1839 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt();
1840 assert(InsertPt != TruncUserBB->end());
1842 if (ShiftI->getOpcode() == Instruction::AShr)
1843 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
1846 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
1850 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt();
1852 assert(TruncInsertPt != TruncUserBB->end());
1854 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift,
1855 TruncI->getType(), "", &*TruncInsertPt);
1859 TruncTheUse = InsertedTrunc;
1865 /// Sink the shift *right* instruction into user blocks if the uses could
1866 /// potentially be combined with this shift instruction and generate BitExtract
1867 /// instruction. It will only be applied if the architecture supports BitExtract
1868 /// instruction. Here is an example:
1870 /// %x.extract.shift = lshr i64 %arg1, 32
1872 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16
1876 /// %x.extract.shift.1 = lshr i64 %arg1, 32
1877 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
1879 /// CodeGen will recoginze the pattern in BB2 and generate BitExtract
1881 /// Return true if any changes are made.
1882 static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
1883 const TargetLowering &TLI,
1884 const DataLayout &DL) {
1885 BasicBlock *DefBB = ShiftI->getParent();
1887 /// Only insert instructions in each block once.
1888 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts;
1890 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType()));
1892 bool MadeChange = false;
1893 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
1895 Use &TheUse = UI.getUse();
1896 Instruction *User = cast<Instruction>(*UI);
1897 // Preincrement use iterator so we don't invalidate it.
1900 // Don't bother for PHI nodes.
1901 if (isa<PHINode>(User))
1904 if (!isExtractBitsCandidateUse(User))
1907 BasicBlock *UserBB = User->getParent();
1909 if (UserBB == DefBB) {
1910 // If the shift and truncate instruction are in the same BB. The use of
1911 // the truncate(TruncUse) may still introduce another truncate if not
1912 // legal. In this case, we would like to sink both shift and truncate
1913 // instruction to the BB of TruncUse.
1916 // i64 shift.result = lshr i64 opnd, imm
1917 // trunc.result = trunc shift.result to i16
1920 // ----> We will have an implicit truncate here if the architecture does
1921 // not have i16 compare.
1922 // cmp i16 trunc.result, opnd2
1924 if (isa<TruncInst>(User) && shiftIsLegal
1925 // If the type of the truncate is legal, no trucate will be
1926 // introduced in other basic blocks.
1928 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
1930 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL);
1934 // If we have already inserted a shift into this block, use it.
1935 BinaryOperator *&InsertedShift = InsertedShifts[UserBB];
1937 if (!InsertedShift) {
1938 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1939 assert(InsertPt != UserBB->end());
1941 if (ShiftI->getOpcode() == Instruction::AShr)
1942 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
1945 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
1951 // Replace a use of the shift with a use of the new shift.
1952 TheUse = InsertedShift;
1955 // If we removed all uses, nuke the shift.
1956 if (ShiftI->use_empty())
1957 ShiftI->eraseFromParent();
1962 // Translate a masked load intrinsic like
1963 // <16 x i32 > @llvm.masked.load( <16 x i32>* %addr, i32 align,
1964 // <16 x i1> %mask, <16 x i32> %passthru)
1965 // to a chain of basic blocks, with loading element one-by-one if
1966 // the appropriate mask bit is set
1968 // %1 = bitcast i8* %addr to i32*
1969 // %2 = extractelement <16 x i1> %mask, i32 0
1970 // %3 = icmp eq i1 %2, true
1971 // br i1 %3, label %cond.load, label %else
1973 //cond.load: ; preds = %0
1974 // %4 = getelementptr i32* %1, i32 0
1975 // %5 = load i32* %4
1976 // %6 = insertelement <16 x i32> undef, i32 %5, i32 0
1979 //else: ; preds = %0, %cond.load
1980 // %res.phi.else = phi <16 x i32> [ %6, %cond.load ], [ undef, %0 ]
1981 // %7 = extractelement <16 x i1> %mask, i32 1
1982 // %8 = icmp eq i1 %7, true
1983 // br i1 %8, label %cond.load1, label %else2
1985 //cond.load1: ; preds = %else
1986 // %9 = getelementptr i32* %1, i32 1
1987 // %10 = load i32* %9
1988 // %11 = insertelement <16 x i32> %res.phi.else, i32 %10, i32 1
1991 //else2: ; preds = %else, %cond.load1
1992 // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ]
1993 // %12 = extractelement <16 x i1> %mask, i32 2
1994 // %13 = icmp eq i1 %12, true
1995 // br i1 %13, label %cond.load4, label %else5
1997 static void ScalarizeMaskedLoad(CallInst *CI) {
1998 Value *Ptr = CI->getArgOperand(0);
1999 Value *Alignment = CI->getArgOperand(1);
2000 Value *Mask = CI->getArgOperand(2);
2001 Value *Src0 = CI->getArgOperand(3);
2003 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue();
2004 VectorType *VecType = dyn_cast<VectorType>(CI->getType());
2005 assert(VecType && "Unexpected return type of masked load intrinsic");
2007 Type *EltTy = CI->getType()->getVectorElementType();
2009 IRBuilder<> Builder(CI->getContext());
2010 Instruction *InsertPt = CI;
2011 BasicBlock *IfBlock = CI->getParent();
2012 BasicBlock *CondBlock = nullptr;
2013 BasicBlock *PrevIfBlock = CI->getParent();
2015 Builder.SetInsertPoint(InsertPt);
2016 Builder.SetCurrentDebugLocation(CI->getDebugLoc());
2018 // Short-cut if the mask is all-true.
2019 bool IsAllOnesMask = isa<Constant>(Mask) &&
2020 cast<Constant>(Mask)->isAllOnesValue();
2022 if (IsAllOnesMask) {
2023 Value *NewI = Builder.CreateAlignedLoad(Ptr, AlignVal);
2024 CI->replaceAllUsesWith(NewI);
2025 CI->eraseFromParent();
2029 // Adjust alignment for the scalar instruction.
2030 AlignVal = std::min(AlignVal, VecType->getScalarSizeInBits()/8);
2031 // Bitcast %addr fron i8* to EltTy*
2033 EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace());
2034 Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType);
2035 unsigned VectorWidth = VecType->getNumElements();
2037 Value *UndefVal = UndefValue::get(VecType);
2039 // The result vector
2040 Value *VResult = UndefVal;
2042 if (isa<ConstantVector>(Mask)) {
2043 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
2044 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue())
2047 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
2048 LoadInst* Load = Builder.CreateAlignedLoad(Gep, AlignVal);
2049 VResult = Builder.CreateInsertElement(VResult, Load,
2050 Builder.getInt32(Idx));
2052 Value *NewI = Builder.CreateSelect(Mask, VResult, Src0);
2053 CI->replaceAllUsesWith(NewI);
2054 CI->eraseFromParent();
2058 PHINode *Phi = nullptr;
2059 Value *PrevPhi = UndefVal;
2061 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
2063 // Fill the "else" block, created in the previous iteration
2065 // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ]
2066 // %mask_1 = extractelement <16 x i1> %mask, i32 Idx
2067 // %to_load = icmp eq i1 %mask_1, true
2068 // br i1 %to_load, label %cond.load, label %else
2071 Phi = Builder.CreatePHI(VecType, 2, "res.phi.else");
2072 Phi->addIncoming(VResult, CondBlock);
2073 Phi->addIncoming(PrevPhi, PrevIfBlock);
2078 Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx));
2079 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate,
2080 ConstantInt::get(Predicate->getType(), 1));
2082 // Create "cond" block
2084 // %EltAddr = getelementptr i32* %1, i32 0
2085 // %Elt = load i32* %EltAddr
2086 // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx
2088 CondBlock = IfBlock->splitBasicBlock(InsertPt->getIterator(), "cond.load");
2089 Builder.SetInsertPoint(InsertPt);
2092 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
2093 LoadInst *Load = Builder.CreateAlignedLoad(Gep, AlignVal);
2094 VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx));
2096 // Create "else" block, fill it in the next iteration
2097 BasicBlock *NewIfBlock =
2098 CondBlock->splitBasicBlock(InsertPt->getIterator(), "else");
2099 Builder.SetInsertPoint(InsertPt);
2100 Instruction *OldBr = IfBlock->getTerminator();
2101 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr);
2102 OldBr->eraseFromParent();
2103 PrevIfBlock = IfBlock;
2104 IfBlock = NewIfBlock;
2107 Phi = Builder.CreatePHI(VecType, 2, "res.phi.select");
2108 Phi->addIncoming(VResult, CondBlock);
2109 Phi->addIncoming(PrevPhi, PrevIfBlock);
2110 Value *NewI = Builder.CreateSelect(Mask, Phi, Src0);
2111 CI->replaceAllUsesWith(NewI);
2112 CI->eraseFromParent();
2115 // Translate a masked store intrinsic, like
2116 // void @llvm.masked.store(<16 x i32> %src, <16 x i32>* %addr, i32 align,
2118 // to a chain of basic blocks, that stores element one-by-one if
2119 // the appropriate mask bit is set
2121 // %1 = bitcast i8* %addr to i32*
2122 // %2 = extractelement <16 x i1> %mask, i32 0
2123 // %3 = icmp eq i1 %2, true
2124 // br i1 %3, label %cond.store, label %else
2126 // cond.store: ; preds = %0
2127 // %4 = extractelement <16 x i32> %val, i32 0
2128 // %5 = getelementptr i32* %1, i32 0
2129 // store i32 %4, i32* %5
2132 // else: ; preds = %0, %cond.store
2133 // %6 = extractelement <16 x i1> %mask, i32 1
2134 // %7 = icmp eq i1 %6, true
2135 // br i1 %7, label %cond.store1, label %else2
2137 // cond.store1: ; preds = %else
2138 // %8 = extractelement <16 x i32> %val, i32 1
2139 // %9 = getelementptr i32* %1, i32 1
2140 // store i32 %8, i32* %9
2143 static void ScalarizeMaskedStore(CallInst *CI) {
2144 Value *Src = CI->getArgOperand(0);
2145 Value *Ptr = CI->getArgOperand(1);
2146 Value *Alignment = CI->getArgOperand(2);
2147 Value *Mask = CI->getArgOperand(3);
2149 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue();
2150 VectorType *VecType = dyn_cast<VectorType>(Src->getType());
2151 assert(VecType && "Unexpected data type in masked store intrinsic");
2153 Type *EltTy = VecType->getElementType();
2155 IRBuilder<> Builder(CI->getContext());
2156 Instruction *InsertPt = CI;
2157 BasicBlock *IfBlock = CI->getParent();
2158 Builder.SetInsertPoint(InsertPt);
2159 Builder.SetCurrentDebugLocation(CI->getDebugLoc());
2161 // Short-cut if the mask is all-true.
2162 bool IsAllOnesMask = isa<Constant>(Mask) &&
2163 cast<Constant>(Mask)->isAllOnesValue();
2165 if (IsAllOnesMask) {
2166 Builder.CreateAlignedStore(Src, Ptr, AlignVal);
2167 CI->eraseFromParent();
2171 // Adjust alignment for the scalar instruction.
2172 AlignVal = std::max(AlignVal, VecType->getScalarSizeInBits()/8);
2173 // Bitcast %addr fron i8* to EltTy*
2175 EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace());
2176 Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType);
2177 unsigned VectorWidth = VecType->getNumElements();
2179 if (isa<ConstantVector>(Mask)) {
2180 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
2181 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue())
2183 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx));
2185 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
2186 Builder.CreateAlignedStore(OneElt, Gep, AlignVal);
2188 CI->eraseFromParent();
2192 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
2194 // Fill the "else" block, created in the previous iteration
2196 // %mask_1 = extractelement <16 x i1> %mask, i32 Idx
2197 // %to_store = icmp eq i1 %mask_1, true
2198 // br i1 %to_store, label %cond.store, label %else
2200 Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx));
2201 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate,
2202 ConstantInt::get(Predicate->getType(), 1));
2204 // Create "cond" block
2206 // %OneElt = extractelement <16 x i32> %Src, i32 Idx
2207 // %EltAddr = getelementptr i32* %1, i32 0
2208 // %store i32 %OneElt, i32* %EltAddr
2210 BasicBlock *CondBlock =
2211 IfBlock->splitBasicBlock(InsertPt->getIterator(), "cond.store");
2212 Builder.SetInsertPoint(InsertPt);
2214 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx));
2216 Builder.CreateInBoundsGEP(EltTy, FirstEltPtr, Builder.getInt32(Idx));
2217 Builder.CreateAlignedStore(OneElt, Gep, AlignVal);
2219 // Create "else" block, fill it in the next iteration
2220 BasicBlock *NewIfBlock =
2221 CondBlock->splitBasicBlock(InsertPt->getIterator(), "else");
2222 Builder.SetInsertPoint(InsertPt);
2223 Instruction *OldBr = IfBlock->getTerminator();
2224 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr);
2225 OldBr->eraseFromParent();
2226 IfBlock = NewIfBlock;
2228 CI->eraseFromParent();
2231 // Translate a masked gather intrinsic like
2232 // <16 x i32 > @llvm.masked.gather.v16i32( <16 x i32*> %Ptrs, i32 4,
2233 // <16 x i1> %Mask, <16 x i32> %Src)
2234 // to a chain of basic blocks, with loading element one-by-one if
2235 // the appropriate mask bit is set
2237 // % Ptrs = getelementptr i32, i32* %base, <16 x i64> %ind
2238 // % Mask0 = extractelement <16 x i1> %Mask, i32 0
2239 // % ToLoad0 = icmp eq i1 % Mask0, true
2240 // br i1 % ToLoad0, label %cond.load, label %else
2243 // % Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0
2244 // % Load0 = load i32, i32* % Ptr0, align 4
2245 // % Res0 = insertelement <16 x i32> undef, i32 % Load0, i32 0
2249 // %res.phi.else = phi <16 x i32>[% Res0, %cond.load], [undef, % 0]
2250 // % Mask1 = extractelement <16 x i1> %Mask, i32 1
2251 // % ToLoad1 = icmp eq i1 % Mask1, true
2252 // br i1 % ToLoad1, label %cond.load1, label %else2
2255 // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1
2256 // % Load1 = load i32, i32* % Ptr1, align 4
2257 // % Res1 = insertelement <16 x i32> %res.phi.else, i32 % Load1, i32 1
2260 // % Result = select <16 x i1> %Mask, <16 x i32> %res.phi.select, <16 x i32> %Src
2261 // ret <16 x i32> %Result
2262 static void ScalarizeMaskedGather(CallInst *CI) {
2263 Value *Ptrs = CI->getArgOperand(0);
2264 Value *Alignment = CI->getArgOperand(1);
2265 Value *Mask = CI->getArgOperand(2);
2266 Value *Src0 = CI->getArgOperand(3);
2268 VectorType *VecType = dyn_cast<VectorType>(CI->getType());
2270 assert(VecType && "Unexpected return type of masked load intrinsic");
2272 IRBuilder<> Builder(CI->getContext());
2273 Instruction *InsertPt = CI;
2274 BasicBlock *IfBlock = CI->getParent();
2275 BasicBlock *CondBlock = nullptr;
2276 BasicBlock *PrevIfBlock = CI->getParent();
2277 Builder.SetInsertPoint(InsertPt);
2278 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue();
2280 Builder.SetCurrentDebugLocation(CI->getDebugLoc());
2282 Value *UndefVal = UndefValue::get(VecType);
2284 // The result vector
2285 Value *VResult = UndefVal;
2286 unsigned VectorWidth = VecType->getNumElements();
2288 // Shorten the way if the mask is a vector of constants.
2289 bool IsConstMask = isa<ConstantVector>(Mask);
2292 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
2293 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue())
2295 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx),
2296 "Ptr" + Twine(Idx));
2297 LoadInst *Load = Builder.CreateAlignedLoad(Ptr, AlignVal,
2298 "Load" + Twine(Idx));
2299 VResult = Builder.CreateInsertElement(VResult, Load,
2300 Builder.getInt32(Idx),
2301 "Res" + Twine(Idx));
2303 Value *NewI = Builder.CreateSelect(Mask, VResult, Src0);
2304 CI->replaceAllUsesWith(NewI);
2305 CI->eraseFromParent();
2309 PHINode *Phi = nullptr;
2310 Value *PrevPhi = UndefVal;
2312 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
2314 // Fill the "else" block, created in the previous iteration
2316 // %Mask1 = extractelement <16 x i1> %Mask, i32 1
2317 // %ToLoad1 = icmp eq i1 %Mask1, true
2318 // br i1 %ToLoad1, label %cond.load, label %else
2321 Phi = Builder.CreatePHI(VecType, 2, "res.phi.else");
2322 Phi->addIncoming(VResult, CondBlock);
2323 Phi->addIncoming(PrevPhi, PrevIfBlock);
2328 Value *Predicate = Builder.CreateExtractElement(Mask,
2329 Builder.getInt32(Idx),
2330 "Mask" + Twine(Idx));
2331 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate,
2332 ConstantInt::get(Predicate->getType(), 1),
2333 "ToLoad" + Twine(Idx));
2335 // Create "cond" block
2337 // %EltAddr = getelementptr i32* %1, i32 0
2338 // %Elt = load i32* %EltAddr
2339 // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx
2341 CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.load");
2342 Builder.SetInsertPoint(InsertPt);
2344 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx),
2345 "Ptr" + Twine(Idx));
2346 LoadInst *Load = Builder.CreateAlignedLoad(Ptr, AlignVal,
2347 "Load" + Twine(Idx));
2348 VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx),
2349 "Res" + Twine(Idx));
2351 // Create "else" block, fill it in the next iteration
2352 BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else");
2353 Builder.SetInsertPoint(InsertPt);
2354 Instruction *OldBr = IfBlock->getTerminator();
2355 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr);
2356 OldBr->eraseFromParent();
2357 PrevIfBlock = IfBlock;
2358 IfBlock = NewIfBlock;
2361 Phi = Builder.CreatePHI(VecType, 2, "res.phi.select");
2362 Phi->addIncoming(VResult, CondBlock);
2363 Phi->addIncoming(PrevPhi, PrevIfBlock);
2364 Value *NewI = Builder.CreateSelect(Mask, Phi, Src0);
2365 CI->replaceAllUsesWith(NewI);
2366 CI->eraseFromParent();
2369 // Translate a masked scatter intrinsic, like
2370 // void @llvm.masked.scatter.v16i32(<16 x i32> %Src, <16 x i32*>* %Ptrs, i32 4,
2372 // to a chain of basic blocks, that stores element one-by-one if
2373 // the appropriate mask bit is set.
2375 // % Ptrs = getelementptr i32, i32* %ptr, <16 x i64> %ind
2376 // % Mask0 = extractelement <16 x i1> % Mask, i32 0
2377 // % ToStore0 = icmp eq i1 % Mask0, true
2378 // br i1 %ToStore0, label %cond.store, label %else
2381 // % Elt0 = extractelement <16 x i32> %Src, i32 0
2382 // % Ptr0 = extractelement <16 x i32*> %Ptrs, i32 0
2383 // store i32 %Elt0, i32* % Ptr0, align 4
2387 // % Mask1 = extractelement <16 x i1> % Mask, i32 1
2388 // % ToStore1 = icmp eq i1 % Mask1, true
2389 // br i1 % ToStore1, label %cond.store1, label %else2
2392 // % Elt1 = extractelement <16 x i32> %Src, i32 1
2393 // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1
2394 // store i32 % Elt1, i32* % Ptr1, align 4
2397 static void ScalarizeMaskedScatter(CallInst *CI) {
2398 Value *Src = CI->getArgOperand(0);
2399 Value *Ptrs = CI->getArgOperand(1);
2400 Value *Alignment = CI->getArgOperand(2);
2401 Value *Mask = CI->getArgOperand(3);
2403 assert(isa<VectorType>(Src->getType()) &&
2404 "Unexpected data type in masked scatter intrinsic");
2405 assert(isa<VectorType>(Ptrs->getType()) &&
2406 isa<PointerType>(Ptrs->getType()->getVectorElementType()) &&
2407 "Vector of pointers is expected in masked scatter intrinsic");
2409 IRBuilder<> Builder(CI->getContext());
2410 Instruction *InsertPt = CI;
2411 BasicBlock *IfBlock = CI->getParent();
2412 Builder.SetInsertPoint(InsertPt);
2413 Builder.SetCurrentDebugLocation(CI->getDebugLoc());
2415 unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue();
2416 unsigned VectorWidth = Src->getType()->getVectorNumElements();
2418 // Shorten the way if the mask is a vector of constants.
2419 bool IsConstMask = isa<ConstantVector>(Mask);
2422 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
2423 if (cast<ConstantVector>(Mask)->getOperand(Idx)->isNullValue())
2425 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx),
2426 "Elt" + Twine(Idx));
2427 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx),
2428 "Ptr" + Twine(Idx));
2429 Builder.CreateAlignedStore(OneElt, Ptr, AlignVal);
2431 CI->eraseFromParent();
2434 for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) {
2435 // Fill the "else" block, created in the previous iteration
2437 // % Mask1 = extractelement <16 x i1> % Mask, i32 Idx
2438 // % ToStore = icmp eq i1 % Mask1, true
2439 // br i1 % ToStore, label %cond.store, label %else
2441 Value *Predicate = Builder.CreateExtractElement(Mask,
2442 Builder.getInt32(Idx),
2443 "Mask" + Twine(Idx));
2445 Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate,
2446 ConstantInt::get(Predicate->getType(), 1),
2447 "ToStore" + Twine(Idx));
2449 // Create "cond" block
2451 // % Elt1 = extractelement <16 x i32> %Src, i32 1
2452 // % Ptr1 = extractelement <16 x i32*> %Ptrs, i32 1
2453 // %store i32 % Elt1, i32* % Ptr1
2455 BasicBlock *CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.store");
2456 Builder.SetInsertPoint(InsertPt);
2458 Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx),
2459 "Elt" + Twine(Idx));
2460 Value *Ptr = Builder.CreateExtractElement(Ptrs, Builder.getInt32(Idx),
2461 "Ptr" + Twine(Idx));
2462 Builder.CreateAlignedStore(OneElt, Ptr, AlignVal);
2464 // Create "else" block, fill it in the next iteration
2465 BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else");
2466 Builder.SetInsertPoint(InsertPt);
2467 Instruction *OldBr = IfBlock->getTerminator();
2468 BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr);
2469 OldBr->eraseFromParent();
2470 IfBlock = NewIfBlock;
2472 CI->eraseFromParent();
2475 /// If counting leading or trailing zeros is an expensive operation and a zero
2476 /// input is defined, add a check for zero to avoid calling the intrinsic.
2478 /// We want to transform:
2479 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
2483 /// %cmpz = icmp eq i64 %A, 0
2484 /// br i1 %cmpz, label %cond.end, label %cond.false
2486 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
2487 /// br label %cond.end
2489 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
2491 /// If the transform is performed, return true and set ModifiedDT to true.
2492 static bool despeculateCountZeros(IntrinsicInst *CountZeros,
2493 const TargetLowering *TLI,
2494 const DataLayout *DL,
2499 // If a zero input is undefined, it doesn't make sense to despeculate that.
2500 if (match(CountZeros->getOperand(1), m_One()))
2503 // If it's cheap to speculate, there's nothing to do.
2504 auto IntrinsicID = CountZeros->getIntrinsicID();
2505 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) ||
2506 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz()))
2509 // Only handle legal scalar cases. Anything else requires too much work.
2510 Type *Ty = CountZeros->getType();
2511 unsigned SizeInBits = Ty->getPrimitiveSizeInBits();
2512 if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSize())
2515 // The intrinsic will be sunk behind a compare against zero and branch.
2516 BasicBlock *StartBlock = CountZeros->getParent();
2517 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false");
2519 // Create another block after the count zero intrinsic. A PHI will be added
2520 // in this block to select the result of the intrinsic or the bit-width
2521 // constant if the input to the intrinsic is zero.
2522 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros));
2523 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end");
2525 // Set up a builder to create a compare, conditional branch, and PHI.
2526 IRBuilder<> Builder(CountZeros->getContext());
2527 Builder.SetInsertPoint(StartBlock->getTerminator());
2528 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc());
2530 // Replace the unconditional branch that was created by the first split with
2531 // a compare against zero and a conditional branch.
2532 Value *Zero = Constant::getNullValue(Ty);
2533 Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz");
2534 Builder.CreateCondBr(Cmp, EndBlock, CallBlock);
2535 StartBlock->getTerminator()->eraseFromParent();
2537 // Create a PHI in the end block to select either the output of the intrinsic
2538 // or the bit width of the operand.
2539 Builder.SetInsertPoint(&EndBlock->front());
2540 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
2541 CountZeros->replaceAllUsesWith(PN);
2542 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
2543 PN->addIncoming(BitWidth, StartBlock);
2544 PN->addIncoming(CountZeros, CallBlock);
2546 // We are explicitly handling the zero case, so we can set the intrinsic's
2547 // undefined zero argument to 'true'. This will also prevent reprocessing the
2548 // intrinsic; we only despeculate when a zero input is defined.
2549 CountZeros->setArgOperand(1, Builder.getTrue());
2554 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool& ModifiedDT) {
2555 BasicBlock *BB = CI->getParent();
2557 // Lower inline assembly if we can.
2558 // If we found an inline asm expession, and if the target knows how to
2559 // lower it to normal LLVM code, do so now.
2560 if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
2561 if (TLI->ExpandInlineAsm(CI)) {
2562 // Avoid invalidating the iterator.
2563 CurInstIterator = BB->begin();
2564 // Avoid processing instructions out of order, which could cause
2565 // reuse before a value is defined.
2569 // Sink address computing for memory operands into the block.
2570 if (optimizeInlineAsmInst(CI))
2574 // Align the pointer arguments to this call if the target thinks it's a good
2576 unsigned MinSize, PrefAlign;
2577 if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
2578 for (auto &Arg : CI->arg_operands()) {
2579 // We want to align both objects whose address is used directly and
2580 // objects whose address is used in casts and GEPs, though it only makes
2581 // sense for GEPs if the offset is a multiple of the desired alignment and
2582 // if size - offset meets the size threshold.
2583 if (!Arg->getType()->isPointerTy())
2585 APInt Offset(DL->getPointerSizeInBits(
2586 cast<PointerType>(Arg->getType())->getAddressSpace()),
2588 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
2589 uint64_t Offset2 = Offset.getLimitedValue();
2590 if ((Offset2 & (PrefAlign-1)) != 0)
2593 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign &&
2594 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
2595 AI->setAlignment(PrefAlign);
2596 // Global variables can only be aligned if they are defined in this
2597 // object (i.e. they are uniquely initialized in this object), and
2598 // over-aligning global variables that have an explicit section is
2601 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() &&
2602 GV->getAlignment() < PrefAlign &&
2603 DL->getTypeAllocSize(GV->getType()->getElementType()) >=
2605 GV->setAlignment(PrefAlign);
2607 // If this is a memcpy (or similar) then we may be able to improve the
2609 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
2610 unsigned Align = getKnownAlignment(MI->getDest(), *DL);
2611 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
2612 Align = std::min(Align, getKnownAlignment(MTI->getSource(), *DL));
2613 if (Align > MI->getAlignment())
2614 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align));
2618 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
2620 switch (II->getIntrinsicID()) {
2622 case Intrinsic::objectsize: {
2623 // Lower all uses of llvm.objectsize.*
2624 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
2625 Type *ReturnTy = CI->getType();
2626 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
2628 // Substituting this can cause recursive simplifications, which can
2629 // invalidate our iterator. Use a WeakVH to hold onto it in case this
2631 WeakVH IterHandle(&*CurInstIterator);
2633 replaceAndRecursivelySimplify(CI, RetVal,
2636 // If the iterator instruction was recursively deleted, start over at the
2637 // start of the block.
2638 if (IterHandle != CurInstIterator.getNodePtrUnchecked()) {
2639 CurInstIterator = BB->begin();
2644 case Intrinsic::masked_load: {
2645 // Scalarize unsupported vector masked load
2646 if (!TTI->isLegalMaskedLoad(CI->getType())) {
2647 ScalarizeMaskedLoad(CI);
2653 case Intrinsic::masked_store: {
2654 if (!TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType())) {
2655 ScalarizeMaskedStore(CI);
2661 case Intrinsic::masked_gather: {
2662 if (!TTI->isLegalMaskedGather(CI->getType())) {
2663 ScalarizeMaskedGather(CI);
2669 case Intrinsic::masked_scatter: {
2670 if (!TTI->isLegalMaskedScatter(CI->getArgOperand(0)->getType())) {
2671 ScalarizeMaskedScatter(CI);
2677 case Intrinsic::aarch64_stlxr:
2678 case Intrinsic::aarch64_stxr: {
2679 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0));
2680 if (!ExtVal || !ExtVal->hasOneUse() ||
2681 ExtVal->getParent() == CI->getParent())
2683 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
2684 ExtVal->moveBefore(CI);
2685 // Mark this instruction as "inserted by CGP", so that other
2686 // optimizations don't touch it.
2687 InsertedInsts.insert(ExtVal);
2690 case Intrinsic::invariant_group_barrier:
2691 II->replaceAllUsesWith(II->getArgOperand(0));
2692 II->eraseFromParent();
2695 case Intrinsic::cttz:
2696 case Intrinsic::ctlz:
2697 // If counting zeros is expensive, try to avoid it.
2698 return despeculateCountZeros(II, TLI, DL, ModifiedDT);
2702 // Unknown address space.
2703 // TODO: Target hook to pick which address space the intrinsic cares
2705 unsigned AddrSpace = ~0u;
2706 SmallVector<Value*, 2> PtrOps;
2708 if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy, AddrSpace))
2709 while (!PtrOps.empty())
2710 if (optimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy, AddrSpace))
2715 // From here on out we're working with named functions.
2716 if (!CI->getCalledFunction()) return false;
2718 // Lower all default uses of _chk calls. This is very similar
2719 // to what InstCombineCalls does, but here we are only lowering calls
2720 // to fortified library functions (e.g. __memcpy_chk) that have the default
2721 // "don't know" as the objectsize. Anything else should be left alone.
2722 FortifiedLibCallSimplifier Simplifier(TLInfo, true);
2723 if (Value *V = Simplifier.optimizeCall(CI)) {
2724 CI->replaceAllUsesWith(V);
2725 CI->eraseFromParent();
2731 /// Look for opportunities to duplicate return instructions to the predecessor
2732 /// to enable tail call optimizations. The case it is currently looking for is:
2735 /// %tmp0 = tail call i32 @f0()
2736 /// br label %return
2738 /// %tmp1 = tail call i32 @f1()
2739 /// br label %return
2741 /// %tmp2 = tail call i32 @f2()
2742 /// br label %return
2744 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
2752 /// %tmp0 = tail call i32 @f0()
2755 /// %tmp1 = tail call i32 @f1()
2758 /// %tmp2 = tail call i32 @f2()
2761 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB) {
2765 ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator());
2769 PHINode *PN = nullptr;
2770 BitCastInst *BCI = nullptr;
2771 Value *V = RI->getReturnValue();
2773 BCI = dyn_cast<BitCastInst>(V);
2775 V = BCI->getOperand(0);
2777 PN = dyn_cast<PHINode>(V);
2782 if (PN && PN->getParent() != BB)
2785 // It's not safe to eliminate the sign / zero extension of the return value.
2786 // See llvm::isInTailCallPosition().
2787 const Function *F = BB->getParent();
2788 AttributeSet CallerAttrs = F->getAttributes();
2789 if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) ||
2790 CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
2793 // Make sure there are no instructions between the PHI and return, or that the
2794 // return is the first instruction in the block.
2796 BasicBlock::iterator BI = BB->begin();
2797 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI));
2799 // Also skip over the bitcast.
2804 BasicBlock::iterator BI = BB->begin();
2805 while (isa<DbgInfoIntrinsic>(BI)) ++BI;
2810 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
2812 SmallVector<CallInst*, 4> TailCalls;
2814 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
2815 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I));
2816 // Make sure the phi value is indeed produced by the tail call.
2817 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) &&
2818 TLI->mayBeEmittedAsTailCall(CI))
2819 TailCalls.push_back(CI);
2822 SmallPtrSet<BasicBlock*, 4> VisitedBBs;
2823 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
2824 if (!VisitedBBs.insert(*PI).second)
2827 BasicBlock::InstListType &InstList = (*PI)->getInstList();
2828 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin();
2829 BasicBlock::InstListType::reverse_iterator RE = InstList.rend();
2830 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI));
2834 CallInst *CI = dyn_cast<CallInst>(&*RI);
2835 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI))
2836 TailCalls.push_back(CI);
2840 bool Changed = false;
2841 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) {
2842 CallInst *CI = TailCalls[i];
2845 // Conservatively require the attributes of the call to match those of the
2846 // return. Ignore noalias because it doesn't affect the call sequence.
2847 AttributeSet CalleeAttrs = CS.getAttributes();
2848 if (AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex).
2849 removeAttribute(Attribute::NoAlias) !=
2850 AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex).
2851 removeAttribute(Attribute::NoAlias))
2854 // Make sure the call instruction is followed by an unconditional branch to
2855 // the return block.
2856 BasicBlock *CallBB = CI->getParent();
2857 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator());
2858 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
2861 // Duplicate the return into CallBB.
2862 (void)FoldReturnIntoUncondBranch(RI, BB, CallBB);
2863 ModifiedDT = Changed = true;
2867 // If we eliminated all predecessors of the block, delete the block now.
2868 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB))
2869 BB->eraseFromParent();
2874 //===----------------------------------------------------------------------===//
2875 // Memory Optimization
2876 //===----------------------------------------------------------------------===//
2880 /// This is an extended version of TargetLowering::AddrMode
2881 /// which holds actual Value*'s for register values.
2882 struct ExtAddrMode : public TargetLowering::AddrMode {
2885 ExtAddrMode() : BaseReg(nullptr), ScaledReg(nullptr) {}
2886 void print(raw_ostream &OS) const;
2889 bool operator==(const ExtAddrMode& O) const {
2890 return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) &&
2891 (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) &&
2892 (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale);
2897 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
2903 void ExtAddrMode::print(raw_ostream &OS) const {
2904 bool NeedPlus = false;
2907 OS << (NeedPlus ? " + " : "")
2909 BaseGV->printAsOperand(OS, /*PrintType=*/false);
2914 OS << (NeedPlus ? " + " : "")
2920 OS << (NeedPlus ? " + " : "")
2922 BaseReg->printAsOperand(OS, /*PrintType=*/false);
2926 OS << (NeedPlus ? " + " : "")
2928 ScaledReg->printAsOperand(OS, /*PrintType=*/false);
2934 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2935 void ExtAddrMode::dump() const {
2941 /// \brief This class provides transaction based operation on the IR.
2942 /// Every change made through this class is recorded in the internal state and
2943 /// can be undone (rollback) until commit is called.
2944 class TypePromotionTransaction {
2946 /// \brief This represents the common interface of the individual transaction.
2947 /// Each class implements the logic for doing one specific modification on
2948 /// the IR via the TypePromotionTransaction.
2949 class TypePromotionAction {
2951 /// The Instruction modified.
2955 /// \brief Constructor of the action.
2956 /// The constructor performs the related action on the IR.
2957 TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
2959 virtual ~TypePromotionAction() {}
2961 /// \brief Undo the modification done by this action.
2962 /// When this method is called, the IR must be in the same state as it was
2963 /// before this action was applied.
2964 /// \pre Undoing the action works if and only if the IR is in the exact same
2965 /// state as it was directly after this action was applied.
2966 virtual void undo() = 0;
2968 /// \brief Advocate every change made by this action.
2969 /// When the results on the IR of the action are to be kept, it is important
2970 /// to call this function, otherwise hidden information may be kept forever.
2971 virtual void commit() {
2972 // Nothing to be done, this action is not doing anything.
2976 /// \brief Utility to remember the position of an instruction.
2977 class InsertionHandler {
2978 /// Position of an instruction.
2979 /// Either an instruction:
2980 /// - Is the first in a basic block: BB is used.
2981 /// - Has a previous instructon: PrevInst is used.
2983 Instruction *PrevInst;
2986 /// Remember whether or not the instruction had a previous instruction.
2987 bool HasPrevInstruction;
2990 /// \brief Record the position of \p Inst.
2991 InsertionHandler(Instruction *Inst) {
2992 BasicBlock::iterator It = Inst->getIterator();
2993 HasPrevInstruction = (It != (Inst->getParent()->begin()));
2994 if (HasPrevInstruction)
2995 Point.PrevInst = &*--It;
2997 Point.BB = Inst->getParent();
3000 /// \brief Insert \p Inst at the recorded position.
3001 void insert(Instruction *Inst) {
3002 if (HasPrevInstruction) {
3003 if (Inst->getParent())
3004 Inst->removeFromParent();
3005 Inst->insertAfter(Point.PrevInst);
3007 Instruction *Position = &*Point.BB->getFirstInsertionPt();
3008 if (Inst->getParent())
3009 Inst->moveBefore(Position);
3011 Inst->insertBefore(Position);
3016 /// \brief Move an instruction before another.
3017 class InstructionMoveBefore : public TypePromotionAction {
3018 /// Original position of the instruction.
3019 InsertionHandler Position;
3022 /// \brief Move \p Inst before \p Before.
3023 InstructionMoveBefore(Instruction *Inst, Instruction *Before)
3024 : TypePromotionAction(Inst), Position(Inst) {
3025 DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n");
3026 Inst->moveBefore(Before);
3029 /// \brief Move the instruction back to its original position.
3030 void undo() override {
3031 DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
3032 Position.insert(Inst);
3036 /// \brief Set the operand of an instruction with a new value.
3037 class OperandSetter : public TypePromotionAction {
3038 /// Original operand of the instruction.
3040 /// Index of the modified instruction.
3044 /// \brief Set \p Idx operand of \p Inst with \p NewVal.
3045 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
3046 : TypePromotionAction(Inst), Idx(Idx) {
3047 DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
3048 << "for:" << *Inst << "\n"
3049 << "with:" << *NewVal << "\n");
3050 Origin = Inst->getOperand(Idx);
3051 Inst->setOperand(Idx, NewVal);
3054 /// \brief Restore the original value of the instruction.
3055 void undo() override {
3056 DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
3057 << "for: " << *Inst << "\n"
3058 << "with: " << *Origin << "\n");
3059 Inst->setOperand(Idx, Origin);
3063 /// \brief Hide the operands of an instruction.
3064 /// Do as if this instruction was not using any of its operands.
3065 class OperandsHider : public TypePromotionAction {
3066 /// The list of original operands.
3067 SmallVector<Value *, 4> OriginalValues;
3070 /// \brief Remove \p Inst from the uses of the operands of \p Inst.
3071 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
3072 DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
3073 unsigned NumOpnds = Inst->getNumOperands();
3074 OriginalValues.reserve(NumOpnds);
3075 for (unsigned It = 0; It < NumOpnds; ++It) {
3076 // Save the current operand.
3077 Value *Val = Inst->getOperand(It);
3078 OriginalValues.push_back(Val);
3080 // We could use OperandSetter here, but that would imply an overhead
3081 // that we are not willing to pay.
3082 Inst->setOperand(It, UndefValue::get(Val->getType()));
3086 /// \brief Restore the original list of uses.
3087 void undo() override {
3088 DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
3089 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
3090 Inst->setOperand(It, OriginalValues[It]);
3094 /// \brief Build a truncate instruction.
3095 class TruncBuilder : public TypePromotionAction {
3098 /// \brief Build a truncate instruction of \p Opnd producing a \p Ty
3100 /// trunc Opnd to Ty.
3101 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
3102 IRBuilder<> Builder(Opnd);
3103 Val = Builder.CreateTrunc(Opnd, Ty, "promoted");
3104 DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
3107 /// \brief Get the built value.
3108 Value *getBuiltValue() { return Val; }
3110 /// \brief Remove the built instruction.
3111 void undo() override {
3112 DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
3113 if (Instruction *IVal = dyn_cast<Instruction>(Val))
3114 IVal->eraseFromParent();
3118 /// \brief Build a sign extension instruction.
3119 class SExtBuilder : public TypePromotionAction {
3122 /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty
3124 /// sext Opnd to Ty.
3125 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
3126 : TypePromotionAction(InsertPt) {
3127 IRBuilder<> Builder(InsertPt);
3128 Val = Builder.CreateSExt(Opnd, Ty, "promoted");
3129 DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
3132 /// \brief Get the built value.
3133 Value *getBuiltValue() { return Val; }
3135 /// \brief Remove the built instruction.
3136 void undo() override {
3137 DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
3138 if (Instruction *IVal = dyn_cast<Instruction>(Val))
3139 IVal->eraseFromParent();
3143 /// \brief Build a zero extension instruction.
3144 class ZExtBuilder : public TypePromotionAction {
3147 /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty
3149 /// zext Opnd to Ty.
3150 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
3151 : TypePromotionAction(InsertPt) {
3152 IRBuilder<> Builder(InsertPt);
3153 Val = Builder.CreateZExt(Opnd, Ty, "promoted");
3154 DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
3157 /// \brief Get the built value.
3158 Value *getBuiltValue() { return Val; }
3160 /// \brief Remove the built instruction.
3161 void undo() override {
3162 DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
3163 if (Instruction *IVal = dyn_cast<Instruction>(Val))
3164 IVal->eraseFromParent();
3168 /// \brief Mutate an instruction to another type.
3169 class TypeMutator : public TypePromotionAction {
3170 /// Record the original type.
3174 /// \brief Mutate the type of \p Inst into \p NewTy.
3175 TypeMutator(Instruction *Inst, Type *NewTy)
3176 : TypePromotionAction(Inst), OrigTy(Inst->getType()) {
3177 DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
3179 Inst->mutateType(NewTy);
3182 /// \brief Mutate the instruction back to its original type.
3183 void undo() override {
3184 DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
3186 Inst->mutateType(OrigTy);
3190 /// \brief Replace the uses of an instruction by another instruction.
3191 class UsesReplacer : public TypePromotionAction {
3192 /// Helper structure to keep track of the replaced uses.
3193 struct InstructionAndIdx {
3194 /// The instruction using the instruction.
3196 /// The index where this instruction is used for Inst.
3198 InstructionAndIdx(Instruction *Inst, unsigned Idx)
3199 : Inst(Inst), Idx(Idx) {}
3202 /// Keep track of the original uses (pair Instruction, Index).
3203 SmallVector<InstructionAndIdx, 4> OriginalUses;
3204 typedef SmallVectorImpl<InstructionAndIdx>::iterator use_iterator;
3207 /// \brief Replace all the use of \p Inst by \p New.
3208 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) {
3209 DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
3211 // Record the original uses.
3212 for (Use &U : Inst->uses()) {
3213 Instruction *UserI = cast<Instruction>(U.getUser());
3214 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
3216 // Now, we can replace the uses.
3217 Inst->replaceAllUsesWith(New);
3220 /// \brief Reassign the original uses of Inst to Inst.
3221 void undo() override {
3222 DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
3223 for (use_iterator UseIt = OriginalUses.begin(),
3224 EndIt = OriginalUses.end();
3225 UseIt != EndIt; ++UseIt) {
3226 UseIt->Inst->setOperand(UseIt->Idx, Inst);
3231 /// \brief Remove an instruction from the IR.
3232 class InstructionRemover : public TypePromotionAction {
3233 /// Original position of the instruction.
3234 InsertionHandler Inserter;
3235 /// Helper structure to hide all the link to the instruction. In other
3236 /// words, this helps to do as if the instruction was removed.
3237 OperandsHider Hider;
3238 /// Keep track of the uses replaced, if any.
3239 UsesReplacer *Replacer;
3242 /// \brief Remove all reference of \p Inst and optinally replace all its
3244 /// \pre If !Inst->use_empty(), then New != nullptr
3245 InstructionRemover(Instruction *Inst, Value *New = nullptr)
3246 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
3249 Replacer = new UsesReplacer(Inst, New);
3250 DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
3251 Inst->removeFromParent();
3254 ~InstructionRemover() override { delete Replacer; }
3256 /// \brief Really remove the instruction.
3257 void commit() override { delete Inst; }
3259 /// \brief Resurrect the instruction and reassign it to the proper uses if
3260 /// new value was provided when build this action.
3261 void undo() override {
3262 DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
3263 Inserter.insert(Inst);
3271 /// Restoration point.
3272 /// The restoration point is a pointer to an action instead of an iterator
3273 /// because the iterator may be invalidated but not the pointer.
3274 typedef const TypePromotionAction *ConstRestorationPt;
3275 /// Advocate every changes made in that transaction.
3277 /// Undo all the changes made after the given point.
3278 void rollback(ConstRestorationPt Point);
3279 /// Get the current restoration point.
3280 ConstRestorationPt getRestorationPoint() const;
3282 /// \name API for IR modification with state keeping to support rollback.
3284 /// Same as Instruction::setOperand.
3285 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
3286 /// Same as Instruction::eraseFromParent.
3287 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr);
3288 /// Same as Value::replaceAllUsesWith.
3289 void replaceAllUsesWith(Instruction *Inst, Value *New);
3290 /// Same as Value::mutateType.
3291 void mutateType(Instruction *Inst, Type *NewTy);
3292 /// Same as IRBuilder::createTrunc.
3293 Value *createTrunc(Instruction *Opnd, Type *Ty);
3294 /// Same as IRBuilder::createSExt.
3295 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty);
3296 /// Same as IRBuilder::createZExt.
3297 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty);
3298 /// Same as Instruction::moveBefore.
3299 void moveBefore(Instruction *Inst, Instruction *Before);
3303 /// The ordered list of actions made so far.
3304 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions;
3305 typedef SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator CommitPt;
3308 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx,
3311 make_unique<TypePromotionTransaction::OperandSetter>(Inst, Idx, NewVal));
3314 void TypePromotionTransaction::eraseInstruction(Instruction *Inst,
3317 make_unique<TypePromotionTransaction::InstructionRemover>(Inst, NewVal));
3320 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst,
3322 Actions.push_back(make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New));
3325 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) {
3326 Actions.push_back(make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy));
3329 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd,
3331 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty));
3332 Value *Val = Ptr->getBuiltValue();
3333 Actions.push_back(std::move(Ptr));
3337 Value *TypePromotionTransaction::createSExt(Instruction *Inst,
3338 Value *Opnd, Type *Ty) {
3339 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty));
3340 Value *Val = Ptr->getBuiltValue();
3341 Actions.push_back(std::move(Ptr));
3345 Value *TypePromotionTransaction::createZExt(Instruction *Inst,
3346 Value *Opnd, Type *Ty) {
3347 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty));
3348 Value *Val = Ptr->getBuiltValue();
3349 Actions.push_back(std::move(Ptr));
3353 void TypePromotionTransaction::moveBefore(Instruction *Inst,
3354 Instruction *Before) {
3356 make_unique<TypePromotionTransaction::InstructionMoveBefore>(Inst, Before));
3359 TypePromotionTransaction::ConstRestorationPt
3360 TypePromotionTransaction::getRestorationPoint() const {
3361 return !Actions.empty() ? Actions.back().get() : nullptr;
3364 void TypePromotionTransaction::commit() {
3365 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt;
3371 void TypePromotionTransaction::rollback(
3372 TypePromotionTransaction::ConstRestorationPt Point) {
3373 while (!Actions.empty() && Point != Actions.back().get()) {
3374 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val();
3379 /// \brief A helper class for matching addressing modes.
3381 /// This encapsulates the logic for matching the target-legal addressing modes.
3382 class AddressingModeMatcher {
3383 SmallVectorImpl<Instruction*> &AddrModeInsts;
3384 const TargetMachine &TM;
3385 const TargetLowering &TLI;
3386 const DataLayout &DL;
3388 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
3389 /// the memory instruction that we're computing this address for.
3392 Instruction *MemoryInst;
3394 /// This is the addressing mode that we're building up. This is
3395 /// part of the return value of this addressing mode matching stuff.
3396 ExtAddrMode &AddrMode;
3398 /// The instructions inserted by other CodeGenPrepare optimizations.
3399 const SetOfInstrs &InsertedInsts;
3400 /// A map from the instructions to their type before promotion.
3401 InstrToOrigTy &PromotedInsts;
3402 /// The ongoing transaction where every action should be registered.
3403 TypePromotionTransaction &TPT;
3405 /// This is set to true when we should not do profitability checks.
3406 /// When true, IsProfitableToFoldIntoAddressingMode always returns true.
3407 bool IgnoreProfitability;
3409 AddressingModeMatcher(SmallVectorImpl<Instruction *> &AMI,
3410 const TargetMachine &TM, Type *AT, unsigned AS,
3411 Instruction *MI, ExtAddrMode &AM,
3412 const SetOfInstrs &InsertedInsts,
3413 InstrToOrigTy &PromotedInsts,
3414 TypePromotionTransaction &TPT)
3415 : AddrModeInsts(AMI), TM(TM),
3416 TLI(*TM.getSubtargetImpl(*MI->getParent()->getParent())
3417 ->getTargetLowering()),
3418 DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS),
3419 MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts),
3420 PromotedInsts(PromotedInsts), TPT(TPT) {
3421 IgnoreProfitability = false;
3425 /// Find the maximal addressing mode that a load/store of V can fold,
3426 /// give an access type of AccessTy. This returns a list of involved
3427 /// instructions in AddrModeInsts.
3428 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
3430 /// \p PromotedInsts maps the instructions to their type before promotion.
3431 /// \p The ongoing transaction where every action should be registered.
3432 static ExtAddrMode Match(Value *V, Type *AccessTy, unsigned AS,
3433 Instruction *MemoryInst,
3434 SmallVectorImpl<Instruction*> &AddrModeInsts,
3435 const TargetMachine &TM,
3436 const SetOfInstrs &InsertedInsts,
3437 InstrToOrigTy &PromotedInsts,
3438 TypePromotionTransaction &TPT) {
3441 bool Success = AddressingModeMatcher(AddrModeInsts, TM, AccessTy, AS,
3442 MemoryInst, Result, InsertedInsts,
3443 PromotedInsts, TPT).matchAddr(V, 0);
3444 (void)Success; assert(Success && "Couldn't select *anything*?");
3448 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
3449 bool matchAddr(Value *V, unsigned Depth);
3450 bool matchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth,
3451 bool *MovedAway = nullptr);
3452 bool isProfitableToFoldIntoAddressingMode(Instruction *I,
3453 ExtAddrMode &AMBefore,
3454 ExtAddrMode &AMAfter);
3455 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
3456 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost,
3457 Value *PromotedOperand) const;
3460 /// Try adding ScaleReg*Scale to the current addressing mode.
3461 /// Return true and update AddrMode if this addr mode is legal for the target,
3463 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale,
3465 // If Scale is 1, then this is the same as adding ScaleReg to the addressing
3466 // mode. Just process that directly.
3468 return matchAddr(ScaleReg, Depth);
3470 // If the scale is 0, it takes nothing to add this.
3474 // If we already have a scale of this value, we can add to it, otherwise, we
3475 // need an available scale field.
3476 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
3479 ExtAddrMode TestAddrMode = AddrMode;
3481 // Add scale to turn X*4+X*3 -> X*7. This could also do things like
3482 // [A+B + A*7] -> [B+A*8].
3483 TestAddrMode.Scale += Scale;
3484 TestAddrMode.ScaledReg = ScaleReg;
3486 // If the new address isn't legal, bail out.
3487 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace))
3490 // It was legal, so commit it.
3491 AddrMode = TestAddrMode;
3493 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
3494 // to see if ScaleReg is actually X+C. If so, we can turn this into adding
3495 // X*Scale + C*Scale to addr mode.
3496 ConstantInt *CI = nullptr; Value *AddLHS = nullptr;
3497 if (isa<Instruction>(ScaleReg) && // not a constant expr.
3498 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) {
3499 TestAddrMode.ScaledReg = AddLHS;
3500 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale;
3502 // If this addressing mode is legal, commit it and remember that we folded
3503 // this instruction.
3504 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) {
3505 AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
3506 AddrMode = TestAddrMode;
3511 // Otherwise, not (x+c)*scale, just return what we have.
3515 /// This is a little filter, which returns true if an addressing computation
3516 /// involving I might be folded into a load/store accessing it.
3517 /// This doesn't need to be perfect, but needs to accept at least
3518 /// the set of instructions that MatchOperationAddr can.
3519 static bool MightBeFoldableInst(Instruction *I) {
3520 switch (I->getOpcode()) {
3521 case Instruction::BitCast:
3522 case Instruction::AddrSpaceCast:
3523 // Don't touch identity bitcasts.
3524 if (I->getType() == I->getOperand(0)->getType())
3526 return I->getType()->isPointerTy() || I->getType()->isIntegerTy();
3527 case Instruction::PtrToInt:
3528 // PtrToInt is always a noop, as we know that the int type is pointer sized.
3530 case Instruction::IntToPtr:
3531 // We know the input is intptr_t, so this is foldable.
3533 case Instruction::Add:
3535 case Instruction::Mul:
3536 case Instruction::Shl:
3537 // Can only handle X*C and X << C.
3538 return isa<ConstantInt>(I->getOperand(1));
3539 case Instruction::GetElementPtr:
3546 /// \brief Check whether or not \p Val is a legal instruction for \p TLI.
3547 /// \note \p Val is assumed to be the product of some type promotion.
3548 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed
3549 /// to be legal, as the non-promoted value would have had the same state.
3550 static bool isPromotedInstructionLegal(const TargetLowering &TLI,
3551 const DataLayout &DL, Value *Val) {
3552 Instruction *PromotedInst = dyn_cast<Instruction>(Val);
3555 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode());
3556 // If the ISDOpcode is undefined, it was undefined before the promotion.
3559 // Otherwise, check if the promoted instruction is legal or not.
3560 return TLI.isOperationLegalOrCustom(
3561 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType()));
3564 /// \brief Hepler class to perform type promotion.
3565 class TypePromotionHelper {
3566 /// \brief Utility function to check whether or not a sign or zero extension
3567 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
3568 /// either using the operands of \p Inst or promoting \p Inst.
3569 /// The type of the extension is defined by \p IsSExt.
3570 /// In other words, check if:
3571 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
3572 /// #1 Promotion applies:
3573 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
3574 /// #2 Operand reuses:
3575 /// ext opnd1 to ConsideredExtType.
3576 /// \p PromotedInsts maps the instructions to their type before promotion.
3577 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType,
3578 const InstrToOrigTy &PromotedInsts, bool IsSExt);
3580 /// \brief Utility function to determine if \p OpIdx should be promoted when
3581 /// promoting \p Inst.
3582 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) {
3583 return !(isa<SelectInst>(Inst) && OpIdx == 0);
3586 /// \brief Utility function to promote the operand of \p Ext when this
3587 /// operand is a promotable trunc or sext or zext.
3588 /// \p PromotedInsts maps the instructions to their type before promotion.
3589 /// \p CreatedInstsCost[out] contains the cost of all instructions
3590 /// created to promote the operand of Ext.
3591 /// Newly added extensions are inserted in \p Exts.
3592 /// Newly added truncates are inserted in \p Truncs.
3593 /// Should never be called directly.
3594 /// \return The promoted value which is used instead of Ext.
3595 static Value *promoteOperandForTruncAndAnyExt(
3596 Instruction *Ext, TypePromotionTransaction &TPT,
3597 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
3598 SmallVectorImpl<Instruction *> *Exts,
3599 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI);
3601 /// \brief Utility function to promote the operand of \p Ext when this
3602 /// operand is promotable and is not a supported trunc or sext.
3603 /// \p PromotedInsts maps the instructions to their type before promotion.
3604 /// \p CreatedInstsCost[out] contains the cost of all the instructions
3605 /// created to promote the operand of Ext.
3606 /// Newly added extensions are inserted in \p Exts.
3607 /// Newly added truncates are inserted in \p Truncs.
3608 /// Should never be called directly.
3609 /// \return The promoted value which is used instead of Ext.
3610 static Value *promoteOperandForOther(Instruction *Ext,
3611 TypePromotionTransaction &TPT,
3612 InstrToOrigTy &PromotedInsts,
3613 unsigned &CreatedInstsCost,
3614 SmallVectorImpl<Instruction *> *Exts,
3615 SmallVectorImpl<Instruction *> *Truncs,
3616 const TargetLowering &TLI, bool IsSExt);
3618 /// \see promoteOperandForOther.
3619 static Value *signExtendOperandForOther(
3620 Instruction *Ext, TypePromotionTransaction &TPT,
3621 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
3622 SmallVectorImpl<Instruction *> *Exts,
3623 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
3624 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
3625 Exts, Truncs, TLI, true);
3628 /// \see promoteOperandForOther.
3629 static Value *zeroExtendOperandForOther(
3630 Instruction *Ext, TypePromotionTransaction &TPT,
3631 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
3632 SmallVectorImpl<Instruction *> *Exts,
3633 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
3634 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
3635 Exts, Truncs, TLI, false);
3639 /// Type for the utility function that promotes the operand of Ext.
3640 typedef Value *(*Action)(Instruction *Ext, TypePromotionTransaction &TPT,
3641 InstrToOrigTy &PromotedInsts,
3642 unsigned &CreatedInstsCost,
3643 SmallVectorImpl<Instruction *> *Exts,
3644 SmallVectorImpl<Instruction *> *Truncs,
3645 const TargetLowering &TLI);
3646 /// \brief Given a sign/zero extend instruction \p Ext, return the approriate
3647 /// action to promote the operand of \p Ext instead of using Ext.
3648 /// \return NULL if no promotable action is possible with the current
3650 /// \p InsertedInsts keeps track of all the instructions inserted by the
3651 /// other CodeGenPrepare optimizations. This information is important
3652 /// because we do not want to promote these instructions as CodeGenPrepare
3653 /// will reinsert them later. Thus creating an infinite loop: create/remove.
3654 /// \p PromotedInsts maps the instructions to their type before promotion.
3655 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts,
3656 const TargetLowering &TLI,
3657 const InstrToOrigTy &PromotedInsts);
3660 bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
3661 Type *ConsideredExtType,
3662 const InstrToOrigTy &PromotedInsts,
3664 // The promotion helper does not know how to deal with vector types yet.
3665 // To be able to fix that, we would need to fix the places where we
3666 // statically extend, e.g., constants and such.
3667 if (Inst->getType()->isVectorTy())
3670 // We can always get through zext.
3671 if (isa<ZExtInst>(Inst))
3674 // sext(sext) is ok too.
3675 if (IsSExt && isa<SExtInst>(Inst))
3678 // We can get through binary operator, if it is legal. In other words, the
3679 // binary operator must have a nuw or nsw flag.
3680 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
3681 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) &&
3682 ((!IsSExt && BinOp->hasNoUnsignedWrap()) ||
3683 (IsSExt && BinOp->hasNoSignedWrap())))
3686 // Check if we can do the following simplification.
3687 // ext(trunc(opnd)) --> ext(opnd)
3688 if (!isa<TruncInst>(Inst))
3691 Value *OpndVal = Inst->getOperand(0);
3692 // Check if we can use this operand in the extension.
3693 // If the type is larger than the result type of the extension, we cannot.
3694 if (!OpndVal->getType()->isIntegerTy() ||
3695 OpndVal->getType()->getIntegerBitWidth() >
3696 ConsideredExtType->getIntegerBitWidth())
3699 // If the operand of the truncate is not an instruction, we will not have
3700 // any information on the dropped bits.
3701 // (Actually we could for constant but it is not worth the extra logic).
3702 Instruction *Opnd = dyn_cast<Instruction>(OpndVal);
3706 // Check if the source of the type is narrow enough.
3707 // I.e., check that trunc just drops extended bits of the same kind of
3709 // #1 get the type of the operand and check the kind of the extended bits.
3710 const Type *OpndType;
3711 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
3712 if (It != PromotedInsts.end() && It->second.getInt() == IsSExt)
3713 OpndType = It->second.getPointer();
3714 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd)))
3715 OpndType = Opnd->getOperand(0)->getType();
3719 // #2 check that the truncate just drops extended bits.
3720 return Inst->getType()->getIntegerBitWidth() >=
3721 OpndType->getIntegerBitWidth();
3724 TypePromotionHelper::Action TypePromotionHelper::getAction(
3725 Instruction *Ext, const SetOfInstrs &InsertedInsts,
3726 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
3727 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
3728 "Unexpected instruction type");
3729 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0));
3730 Type *ExtTy = Ext->getType();
3731 bool IsSExt = isa<SExtInst>(Ext);
3732 // If the operand of the extension is not an instruction, we cannot
3734 // If it, check we can get through.
3735 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt))
3738 // Do not promote if the operand has been added by codegenprepare.
3739 // Otherwise, it means we are undoing an optimization that is likely to be
3740 // redone, thus causing potential infinite loop.
3741 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
3744 // SExt or Trunc instructions.
3745 // Return the related handler.
3746 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) ||
3747 isa<ZExtInst>(ExtOpnd))
3748 return promoteOperandForTruncAndAnyExt;
3750 // Regular instruction.
3751 // Abort early if we will have to insert non-free instructions.
3752 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType()))
3754 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther;
3757 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt(
3758 llvm::Instruction *SExt, TypePromotionTransaction &TPT,
3759 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
3760 SmallVectorImpl<Instruction *> *Exts,
3761 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
3762 // By construction, the operand of SExt is an instruction. Otherwise we cannot
3763 // get through it and this method should not be called.
3764 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
3765 Value *ExtVal = SExt;
3766 bool HasMergedNonFreeExt = false;
3767 if (isa<ZExtInst>(SExtOpnd)) {
3768 // Replace s|zext(zext(opnd))
3770 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd);
3772 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType());
3773 TPT.replaceAllUsesWith(SExt, ZExt);
3774 TPT.eraseInstruction(SExt);
3777 // Replace z|sext(trunc(opnd)) or sext(sext(opnd))
3779 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0));
3781 CreatedInstsCost = 0;
3783 // Remove dead code.
3784 if (SExtOpnd->use_empty())
3785 TPT.eraseInstruction(SExtOpnd);
3787 // Check if the extension is still needed.
3788 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal);
3789 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) {
3792 Exts->push_back(ExtInst);
3793 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt;
3798 // At this point we have: ext ty opnd to ty.
3799 // Reassign the uses of ExtInst to the opnd and remove ExtInst.
3800 Value *NextVal = ExtInst->getOperand(0);
3801 TPT.eraseInstruction(ExtInst, NextVal);
3805 Value *TypePromotionHelper::promoteOperandForOther(
3806 Instruction *Ext, TypePromotionTransaction &TPT,
3807 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
3808 SmallVectorImpl<Instruction *> *Exts,
3809 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI,
3811 // By construction, the operand of Ext is an instruction. Otherwise we cannot
3812 // get through it and this method should not be called.
3813 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0));
3814 CreatedInstsCost = 0;
3815 if (!ExtOpnd->hasOneUse()) {
3816 // ExtOpnd will be promoted.
3817 // All its uses, but Ext, will need to use a truncated value of the
3818 // promoted version.
3819 // Create the truncate now.
3820 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType());
3821 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) {
3822 ITrunc->removeFromParent();
3823 // Insert it just after the definition.
3824 ITrunc->insertAfter(ExtOpnd);
3826 Truncs->push_back(ITrunc);
3829 TPT.replaceAllUsesWith(ExtOpnd, Trunc);
3830 // Restore the operand of Ext (which has been replaced by the previous call
3831 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
3832 TPT.setOperand(Ext, 0, ExtOpnd);
3835 // Get through the Instruction:
3836 // 1. Update its type.
3837 // 2. Replace the uses of Ext by Inst.
3838 // 3. Extend each operand that needs to be extended.
3840 // Remember the original type of the instruction before promotion.
3841 // This is useful to know that the high bits are sign extended bits.
3842 PromotedInsts.insert(std::pair<Instruction *, TypeIsSExt>(
3843 ExtOpnd, TypeIsSExt(ExtOpnd->getType(), IsSExt)));
3845 TPT.mutateType(ExtOpnd, Ext->getType());
3847 TPT.replaceAllUsesWith(Ext, ExtOpnd);
3849 Instruction *ExtForOpnd = Ext;
3851 DEBUG(dbgs() << "Propagate Ext to operands\n");
3852 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
3854 DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
3855 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() ||
3856 !shouldExtOperand(ExtOpnd, OpIdx)) {
3857 DEBUG(dbgs() << "No need to propagate\n");
3860 // Check if we can statically extend the operand.
3861 Value *Opnd = ExtOpnd->getOperand(OpIdx);
3862 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
3863 DEBUG(dbgs() << "Statically extend\n");
3864 unsigned BitWidth = Ext->getType()->getIntegerBitWidth();
3865 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
3866 : Cst->getValue().zext(BitWidth);
3867 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal));
3870 // UndefValue are typed, so we have to statically sign extend them.
3871 if (isa<UndefValue>(Opnd)) {
3872 DEBUG(dbgs() << "Statically extend\n");
3873 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType()));
3877 // Otherwise we have to explicity sign extend the operand.
3878 // Check if Ext was reused to extend an operand.
3880 // If yes, create a new one.
3881 DEBUG(dbgs() << "More operands to ext\n");
3882 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType())
3883 : TPT.createZExt(Ext, Opnd, Ext->getType());
3884 if (!isa<Instruction>(ValForExtOpnd)) {
3885 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd);
3888 ExtForOpnd = cast<Instruction>(ValForExtOpnd);
3891 Exts->push_back(ExtForOpnd);
3892 TPT.setOperand(ExtForOpnd, 0, Opnd);
3894 // Move the sign extension before the insertion point.
3895 TPT.moveBefore(ExtForOpnd, ExtOpnd);
3896 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd);
3897 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd);
3898 // If more sext are required, new instructions will have to be created.
3899 ExtForOpnd = nullptr;
3901 if (ExtForOpnd == Ext) {
3902 DEBUG(dbgs() << "Extension is useless now\n");
3903 TPT.eraseInstruction(Ext);
3908 /// Check whether or not promoting an instruction to a wider type is profitable.
3909 /// \p NewCost gives the cost of extension instructions created by the
3911 /// \p OldCost gives the cost of extension instructions before the promotion
3912 /// plus the number of instructions that have been
3913 /// matched in the addressing mode the promotion.
3914 /// \p PromotedOperand is the value that has been promoted.
3915 /// \return True if the promotion is profitable, false otherwise.
3916 bool AddressingModeMatcher::isPromotionProfitable(
3917 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const {
3918 DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n');
3919 // The cost of the new extensions is greater than the cost of the
3920 // old extension plus what we folded.
3921 // This is not profitable.
3922 if (NewCost > OldCost)
3924 if (NewCost < OldCost)
3926 // The promotion is neutral but it may help folding the sign extension in
3927 // loads for instance.
3928 // Check that we did not create an illegal instruction.
3929 return isPromotedInstructionLegal(TLI, DL, PromotedOperand);
3932 /// Given an instruction or constant expr, see if we can fold the operation
3933 /// into the addressing mode. If so, update the addressing mode and return
3934 /// true, otherwise return false without modifying AddrMode.
3935 /// If \p MovedAway is not NULL, it contains the information of whether or
3936 /// not AddrInst has to be folded into the addressing mode on success.
3937 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing
3938 /// because it has been moved away.
3939 /// Thus AddrInst must not be added in the matched instructions.
3940 /// This state can happen when AddrInst is a sext, since it may be moved away.
3941 /// Therefore, AddrInst may not be valid when MovedAway is true and it must
3942 /// not be referenced anymore.
3943 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
3946 // Avoid exponential behavior on extremely deep expression trees.
3947 if (Depth >= 5) return false;
3949 // By default, all matched instructions stay in place.
3954 case Instruction::PtrToInt:
3955 // PtrToInt is always a noop, as we know that the int type is pointer sized.
3956 return matchAddr(AddrInst->getOperand(0), Depth);
3957 case Instruction::IntToPtr: {
3958 auto AS = AddrInst->getType()->getPointerAddressSpace();
3959 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
3960 // This inttoptr is a no-op if the integer type is pointer sized.
3961 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy)
3962 return matchAddr(AddrInst->getOperand(0), Depth);
3965 case Instruction::BitCast:
3966 // BitCast is always a noop, and we can handle it as long as it is
3967 // int->int or pointer->pointer (we don't want int<->fp or something).
3968 if ((AddrInst->getOperand(0)->getType()->isPointerTy() ||
3969 AddrInst->getOperand(0)->getType()->isIntegerTy()) &&
3970 // Don't touch identity bitcasts. These were probably put here by LSR,
3971 // and we don't want to mess around with them. Assume it knows what it
3973 AddrInst->getOperand(0)->getType() != AddrInst->getType())
3974 return matchAddr(AddrInst->getOperand(0), Depth);
3976 case Instruction::AddrSpaceCast: {
3978 = AddrInst->getOperand(0)->getType()->getPointerAddressSpace();
3979 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace();
3980 if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
3981 return matchAddr(AddrInst->getOperand(0), Depth);
3984 case Instruction::Add: {
3985 // Check to see if we can merge in the RHS then the LHS. If so, we win.
3986 ExtAddrMode BackupAddrMode = AddrMode;
3987 unsigned OldSize = AddrModeInsts.size();
3988 // Start a transaction at this point.
3989 // The LHS may match but not the RHS.
3990 // Therefore, we need a higher level restoration point to undo partially
3991 // matched operation.
3992 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
3993 TPT.getRestorationPoint();
3995 if (matchAddr(AddrInst->getOperand(1), Depth+1) &&
3996 matchAddr(AddrInst->getOperand(0), Depth+1))
3999 // Restore the old addr mode info.
4000 AddrMode = BackupAddrMode;
4001 AddrModeInsts.resize(OldSize);
4002 TPT.rollback(LastKnownGood);
4004 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS.
4005 if (matchAddr(AddrInst->getOperand(0), Depth+1) &&
4006 matchAddr(AddrInst->getOperand(1), Depth+1))
4009 // Otherwise we definitely can't merge the ADD in.
4010 AddrMode = BackupAddrMode;
4011 AddrModeInsts.resize(OldSize);
4012 TPT.rollback(LastKnownGood);
4015 //case Instruction::Or:
4016 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
4018 case Instruction::Mul:
4019 case Instruction::Shl: {
4020 // Can only handle X*C and X << C.
4021 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
4024 int64_t Scale = RHS->getSExtValue();
4025 if (Opcode == Instruction::Shl)
4026 Scale = 1LL << Scale;
4028 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth);
4030 case Instruction::GetElementPtr: {
4031 // Scan the GEP. We check it if it contains constant offsets and at most
4032 // one variable offset.
4033 int VariableOperand = -1;
4034 unsigned VariableScale = 0;
4036 int64_t ConstantOffset = 0;
4037 gep_type_iterator GTI = gep_type_begin(AddrInst);
4038 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
4039 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
4040 const StructLayout *SL = DL.getStructLayout(STy);
4042 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
4043 ConstantOffset += SL->getElementOffset(Idx);
4045 uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType());
4046 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
4047 ConstantOffset += CI->getSExtValue()*TypeSize;
4048 } else if (TypeSize) { // Scales of zero don't do anything.
4049 // We only allow one variable index at the moment.
4050 if (VariableOperand != -1)
4053 // Remember the variable index.
4054 VariableOperand = i;
4055 VariableScale = TypeSize;
4060 // A common case is for the GEP to only do a constant offset. In this case,
4061 // just add it to the disp field and check validity.
4062 if (VariableOperand == -1) {
4063 AddrMode.BaseOffs += ConstantOffset;
4064 if (ConstantOffset == 0 ||
4065 TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) {
4066 // Check to see if we can fold the base pointer in too.
4067 if (matchAddr(AddrInst->getOperand(0), Depth+1))
4070 AddrMode.BaseOffs -= ConstantOffset;
4074 // Save the valid addressing mode in case we can't match.
4075 ExtAddrMode BackupAddrMode = AddrMode;
4076 unsigned OldSize = AddrModeInsts.size();
4078 // See if the scale and offset amount is valid for this target.
4079 AddrMode.BaseOffs += ConstantOffset;
4081 // Match the base operand of the GEP.
4082 if (!matchAddr(AddrInst->getOperand(0), Depth+1)) {
4083 // If it couldn't be matched, just stuff the value in a register.
4084 if (AddrMode.HasBaseReg) {
4085 AddrMode = BackupAddrMode;
4086 AddrModeInsts.resize(OldSize);
4089 AddrMode.HasBaseReg = true;
4090 AddrMode.BaseReg = AddrInst->getOperand(0);
4093 // Match the remaining variable portion of the GEP.
4094 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
4096 // If it couldn't be matched, try stuffing the base into a register
4097 // instead of matching it, and retrying the match of the scale.
4098 AddrMode = BackupAddrMode;
4099 AddrModeInsts.resize(OldSize);
4100 if (AddrMode.HasBaseReg)
4102 AddrMode.HasBaseReg = true;
4103 AddrMode.BaseReg = AddrInst->getOperand(0);
4104 AddrMode.BaseOffs += ConstantOffset;
4105 if (!matchScaledValue(AddrInst->getOperand(VariableOperand),
4106 VariableScale, Depth)) {
4107 // If even that didn't work, bail.
4108 AddrMode = BackupAddrMode;
4109 AddrModeInsts.resize(OldSize);
4116 case Instruction::SExt:
4117 case Instruction::ZExt: {
4118 Instruction *Ext = dyn_cast<Instruction>(AddrInst);
4122 // Try to move this ext out of the way of the addressing mode.
4123 // Ask for a method for doing so.
4124 TypePromotionHelper::Action TPH =
4125 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
4129 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4130 TPT.getRestorationPoint();
4131 unsigned CreatedInstsCost = 0;
4132 unsigned ExtCost = !TLI.isExtFree(Ext);
4133 Value *PromotedOperand =
4134 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI);
4135 // SExt has been moved away.
4136 // Thus either it will be rematched later in the recursive calls or it is
4137 // gone. Anyway, we must not fold it into the addressing mode at this point.
4141 // addr = gep base, idx
4143 // promotedOpnd = ext opnd <- no match here
4144 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls)
4145 // addr = gep base, op <- match
4149 assert(PromotedOperand &&
4150 "TypePromotionHelper should have filtered out those cases");
4152 ExtAddrMode BackupAddrMode = AddrMode;
4153 unsigned OldSize = AddrModeInsts.size();
4155 if (!matchAddr(PromotedOperand, Depth) ||
4156 // The total of the new cost is equal to the cost of the created
4158 // The total of the old cost is equal to the cost of the extension plus
4159 // what we have saved in the addressing mode.
4160 !isPromotionProfitable(CreatedInstsCost,
4161 ExtCost + (AddrModeInsts.size() - OldSize),
4163 AddrMode = BackupAddrMode;
4164 AddrModeInsts.resize(OldSize);
4165 DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
4166 TPT.rollback(LastKnownGood);
4175 /// If we can, try to add the value of 'Addr' into the current addressing mode.
4176 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode
4177 /// unmodified. This assumes that Addr is either a pointer type or intptr_t
4180 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) {
4181 // Start a transaction at this point that we will rollback if the matching
4183 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4184 TPT.getRestorationPoint();
4185 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
4186 // Fold in immediates if legal for the target.
4187 AddrMode.BaseOffs += CI->getSExtValue();
4188 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4190 AddrMode.BaseOffs -= CI->getSExtValue();
4191 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
4192 // If this is a global variable, try to fold it into the addressing mode.
4193 if (!AddrMode.BaseGV) {
4194 AddrMode.BaseGV = GV;
4195 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4197 AddrMode.BaseGV = nullptr;
4199 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
4200 ExtAddrMode BackupAddrMode = AddrMode;
4201 unsigned OldSize = AddrModeInsts.size();
4203 // Check to see if it is possible to fold this operation.
4204 bool MovedAway = false;
4205 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) {
4206 // This instruction may have been moved away. If so, there is nothing
4210 // Okay, it's possible to fold this. Check to see if it is actually
4211 // *profitable* to do so. We use a simple cost model to avoid increasing
4212 // register pressure too much.
4213 if (I->hasOneUse() ||
4214 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
4215 AddrModeInsts.push_back(I);
4219 // It isn't profitable to do this, roll back.
4220 //cerr << "NOT FOLDING: " << *I;
4221 AddrMode = BackupAddrMode;
4222 AddrModeInsts.resize(OldSize);
4223 TPT.rollback(LastKnownGood);
4225 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
4226 if (matchOperationAddr(CE, CE->getOpcode(), Depth))
4228 TPT.rollback(LastKnownGood);
4229 } else if (isa<ConstantPointerNull>(Addr)) {
4230 // Null pointer gets folded without affecting the addressing mode.
4234 // Worse case, the target should support [reg] addressing modes. :)
4235 if (!AddrMode.HasBaseReg) {
4236 AddrMode.HasBaseReg = true;
4237 AddrMode.BaseReg = Addr;
4238 // Still check for legality in case the target supports [imm] but not [i+r].
4239 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4241 AddrMode.HasBaseReg = false;
4242 AddrMode.BaseReg = nullptr;
4245 // If the base register is already taken, see if we can do [r+r].
4246 if (AddrMode.Scale == 0) {
4248 AddrMode.ScaledReg = Addr;
4249 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4252 AddrMode.ScaledReg = nullptr;
4255 TPT.rollback(LastKnownGood);
4259 /// Check to see if all uses of OpVal by the specified inline asm call are due
4260 /// to memory operands. If so, return true, otherwise return false.
4261 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
4262 const TargetMachine &TM) {
4263 const Function *F = CI->getParent()->getParent();
4264 const TargetLowering *TLI = TM.getSubtargetImpl(*F)->getTargetLowering();
4265 const TargetRegisterInfo *TRI = TM.getSubtargetImpl(*F)->getRegisterInfo();
4266 TargetLowering::AsmOperandInfoVector TargetConstraints =
4267 TLI->ParseConstraints(F->getParent()->getDataLayout(), TRI,
4268 ImmutableCallSite(CI));
4269 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
4270 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
4272 // Compute the constraint code and ConstraintType to use.
4273 TLI->ComputeConstraintToUse(OpInfo, SDValue());
4275 // If this asm operand is our Value*, and if it isn't an indirect memory
4276 // operand, we can't fold it!
4277 if (OpInfo.CallOperandVal == OpVal &&
4278 (OpInfo.ConstraintType != TargetLowering::C_Memory ||
4279 !OpInfo.isIndirect))
4286 /// Recursively walk all the uses of I until we find a memory use.
4287 /// If we find an obviously non-foldable instruction, return true.
4288 /// Add the ultimately found memory instructions to MemoryUses.
4289 static bool FindAllMemoryUses(
4291 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses,
4292 SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetMachine &TM) {
4293 // If we already considered this instruction, we're done.
4294 if (!ConsideredInsts.insert(I).second)
4297 // If this is an obviously unfoldable instruction, bail out.
4298 if (!MightBeFoldableInst(I))
4301 // Loop over all the uses, recursively processing them.
4302 for (Use &U : I->uses()) {
4303 Instruction *UserI = cast<Instruction>(U.getUser());
4305 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
4306 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo()));
4310 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
4311 unsigned opNo = U.getOperandNo();
4312 if (opNo == 0) return true; // Storing addr, not into addr.
4313 MemoryUses.push_back(std::make_pair(SI, opNo));
4317 if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
4318 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
4319 if (!IA) return true;
4321 // If this is a memory operand, we're cool, otherwise bail out.
4322 if (!IsOperandAMemoryOperand(CI, IA, I, TM))
4327 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TM))
4334 /// Return true if Val is already known to be live at the use site that we're
4335 /// folding it into. If so, there is no cost to include it in the addressing
4336 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the
4337 /// instruction already.
4338 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
4339 Value *KnownLive2) {
4340 // If Val is either of the known-live values, we know it is live!
4341 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2)
4344 // All values other than instructions and arguments (e.g. constants) are live.
4345 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true;
4347 // If Val is a constant sized alloca in the entry block, it is live, this is
4348 // true because it is just a reference to the stack/frame pointer, which is
4349 // live for the whole function.
4350 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
4351 if (AI->isStaticAlloca())
4354 // Check to see if this value is already used in the memory instruction's
4355 // block. If so, it's already live into the block at the very least, so we
4356 // can reasonably fold it.
4357 return Val->isUsedInBasicBlock(MemoryInst->getParent());
4360 /// It is possible for the addressing mode of the machine to fold the specified
4361 /// instruction into a load or store that ultimately uses it.
4362 /// However, the specified instruction has multiple uses.
4363 /// Given this, it may actually increase register pressure to fold it
4364 /// into the load. For example, consider this code:
4368 /// use(Y) -> nonload/store
4372 /// In this case, Y has multiple uses, and can be folded into the load of Z
4373 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
4374 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one
4375 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
4376 /// number of computations either.
4378 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
4379 /// X was live across 'load Z' for other reasons, we actually *would* want to
4380 /// fold the addressing mode in the Z case. This would make Y die earlier.
4381 bool AddressingModeMatcher::
4382 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
4383 ExtAddrMode &AMAfter) {
4384 if (IgnoreProfitability) return true;
4386 // AMBefore is the addressing mode before this instruction was folded into it,
4387 // and AMAfter is the addressing mode after the instruction was folded. Get
4388 // the set of registers referenced by AMAfter and subtract out those
4389 // referenced by AMBefore: this is the set of values which folding in this
4390 // address extends the lifetime of.
4392 // Note that there are only two potential values being referenced here,
4393 // BaseReg and ScaleReg (global addresses are always available, as are any
4394 // folded immediates).
4395 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
4397 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
4398 // lifetime wasn't extended by adding this instruction.
4399 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
4401 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
4402 ScaledReg = nullptr;
4404 // If folding this instruction (and it's subexprs) didn't extend any live
4405 // ranges, we're ok with it.
4406 if (!BaseReg && !ScaledReg)
4409 // If all uses of this instruction are ultimately load/store/inlineasm's,
4410 // check to see if their addressing modes will include this instruction. If
4411 // so, we can fold it into all uses, so it doesn't matter if it has multiple
4413 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
4414 SmallPtrSet<Instruction*, 16> ConsideredInsts;
4415 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TM))
4416 return false; // Has a non-memory, non-foldable use!
4418 // Now that we know that all uses of this instruction are part of a chain of
4419 // computation involving only operations that could theoretically be folded
4420 // into a memory use, loop over each of these uses and see if they could
4421 // *actually* fold the instruction.
4422 SmallVector<Instruction*, 32> MatchedAddrModeInsts;
4423 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
4424 Instruction *User = MemoryUses[i].first;
4425 unsigned OpNo = MemoryUses[i].second;
4427 // Get the access type of this use. If the use isn't a pointer, we don't
4428 // know what it accesses.
4429 Value *Address = User->getOperand(OpNo);
4430 PointerType *AddrTy = dyn_cast<PointerType>(Address->getType());
4433 Type *AddressAccessTy = AddrTy->getElementType();
4434 unsigned AS = AddrTy->getAddressSpace();
4436 // Do a match against the root of this address, ignoring profitability. This
4437 // will tell us if the addressing mode for the memory operation will
4438 // *actually* cover the shared instruction.
4440 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4441 TPT.getRestorationPoint();
4442 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TM, AddressAccessTy, AS,
4443 MemoryInst, Result, InsertedInsts,
4444 PromotedInsts, TPT);
4445 Matcher.IgnoreProfitability = true;
4446 bool Success = Matcher.matchAddr(Address, 0);
4447 (void)Success; assert(Success && "Couldn't select *anything*?");
4449 // The match was to check the profitability, the changes made are not
4450 // part of the original matcher. Therefore, they should be dropped
4451 // otherwise the original matcher will not present the right state.
4452 TPT.rollback(LastKnownGood);
4454 // If the match didn't cover I, then it won't be shared by it.
4455 if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(),
4456 I) == MatchedAddrModeInsts.end())
4459 MatchedAddrModeInsts.clear();
4465 } // end anonymous namespace
4467 /// Return true if the specified values are defined in a
4468 /// different basic block than BB.
4469 static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
4470 if (Instruction *I = dyn_cast<Instruction>(V))
4471 return I->getParent() != BB;
4475 /// Load and Store Instructions often have addressing modes that can do
4476 /// significant amounts of computation. As such, instruction selection will try
4477 /// to get the load or store to do as much computation as possible for the
4478 /// program. The problem is that isel can only see within a single block. As
4479 /// such, we sink as much legal addressing mode work into the block as possible.
4481 /// This method is used to optimize both load/store and inline asms with memory
4483 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
4484 Type *AccessTy, unsigned AddrSpace) {
4487 // Try to collapse single-value PHI nodes. This is necessary to undo
4488 // unprofitable PRE transformations.
4489 SmallVector<Value*, 8> worklist;
4490 SmallPtrSet<Value*, 16> Visited;
4491 worklist.push_back(Addr);
4493 // Use a worklist to iteratively look through PHI nodes, and ensure that
4494 // the addressing mode obtained from the non-PHI roots of the graph
4496 Value *Consensus = nullptr;
4497 unsigned NumUsesConsensus = 0;
4498 bool IsNumUsesConsensusValid = false;
4499 SmallVector<Instruction*, 16> AddrModeInsts;
4500 ExtAddrMode AddrMode;
4501 TypePromotionTransaction TPT;
4502 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4503 TPT.getRestorationPoint();
4504 while (!worklist.empty()) {
4505 Value *V = worklist.back();
4506 worklist.pop_back();
4508 // Break use-def graph loops.
4509 if (!Visited.insert(V).second) {
4510 Consensus = nullptr;
4514 // For a PHI node, push all of its incoming values.
4515 if (PHINode *P = dyn_cast<PHINode>(V)) {
4516 for (Value *IncValue : P->incoming_values())
4517 worklist.push_back(IncValue);
4521 // For non-PHIs, determine the addressing mode being computed.
4522 SmallVector<Instruction*, 16> NewAddrModeInsts;
4523 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
4524 V, AccessTy, AddrSpace, MemoryInst, NewAddrModeInsts, *TM,
4525 InsertedInsts, PromotedInsts, TPT);
4527 // This check is broken into two cases with very similar code to avoid using
4528 // getNumUses() as much as possible. Some values have a lot of uses, so
4529 // calling getNumUses() unconditionally caused a significant compile-time
4533 AddrMode = NewAddrMode;
4534 AddrModeInsts = NewAddrModeInsts;
4536 } else if (NewAddrMode == AddrMode) {
4537 if (!IsNumUsesConsensusValid) {
4538 NumUsesConsensus = Consensus->getNumUses();
4539 IsNumUsesConsensusValid = true;
4542 // Ensure that the obtained addressing mode is equivalent to that obtained
4543 // for all other roots of the PHI traversal. Also, when choosing one
4544 // such root as representative, select the one with the most uses in order
4545 // to keep the cost modeling heuristics in AddressingModeMatcher
4547 unsigned NumUses = V->getNumUses();
4548 if (NumUses > NumUsesConsensus) {
4550 NumUsesConsensus = NumUses;
4551 AddrModeInsts = NewAddrModeInsts;
4556 Consensus = nullptr;
4560 // If the addressing mode couldn't be determined, or if multiple different
4561 // ones were determined, bail out now.
4563 TPT.rollback(LastKnownGood);
4568 // Check to see if any of the instructions supersumed by this addr mode are
4569 // non-local to I's BB.
4570 bool AnyNonLocal = false;
4571 for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) {
4572 if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) {
4578 // If all the instructions matched are already in this BB, don't do anything.
4580 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n");
4584 // Insert this computation right after this user. Since our caller is
4585 // scanning from the top of the BB to the bottom, reuse of the expr are
4586 // guaranteed to happen later.
4587 IRBuilder<> Builder(MemoryInst);
4589 // Now that we determined the addressing expression we want to use and know
4590 // that we have to sink it into this block. Check to see if we have already
4591 // done this for some other load/store instr in this block. If so, reuse the
4593 Value *&SunkAddr = SunkAddrs[Addr];
4595 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "
4596 << *MemoryInst << "\n");
4597 if (SunkAddr->getType() != Addr->getType())
4598 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType());
4599 } else if (AddrSinkUsingGEPs ||
4600 (!AddrSinkUsingGEPs.getNumOccurrences() && TM &&
4601 TM->getSubtargetImpl(*MemoryInst->getParent()->getParent())
4603 // By default, we use the GEP-based method when AA is used later. This
4604 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
4605 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
4606 << *MemoryInst << "\n");
4607 Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
4608 Value *ResultPtr = nullptr, *ResultIndex = nullptr;
4610 // First, find the pointer.
4611 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
4612 ResultPtr = AddrMode.BaseReg;
4613 AddrMode.BaseReg = nullptr;
4616 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
4617 // We can't add more than one pointer together, nor can we scale a
4618 // pointer (both of which seem meaningless).
4619 if (ResultPtr || AddrMode.Scale != 1)
4622 ResultPtr = AddrMode.ScaledReg;
4626 if (AddrMode.BaseGV) {
4630 ResultPtr = AddrMode.BaseGV;
4633 // If the real base value actually came from an inttoptr, then the matcher
4634 // will look through it and provide only the integer value. In that case,
4636 if (!ResultPtr && AddrMode.BaseReg) {
4638 Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), "sunkaddr");
4639 AddrMode.BaseReg = nullptr;
4640 } else if (!ResultPtr && AddrMode.Scale == 1) {
4642 Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), "sunkaddr");
4647 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) {
4648 SunkAddr = Constant::getNullValue(Addr->getType());
4649 } else if (!ResultPtr) {
4653 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace());
4654 Type *I8Ty = Builder.getInt8Ty();
4656 // Start with the base register. Do this first so that subsequent address
4657 // matching finds it last, which will prevent it from trying to match it
4658 // as the scaled value in case it happens to be a mul. That would be
4659 // problematic if we've sunk a different mul for the scale, because then
4660 // we'd end up sinking both muls.
4661 if (AddrMode.BaseReg) {
4662 Value *V = AddrMode.BaseReg;
4663 if (V->getType() != IntPtrTy)
4664 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
4669 // Add the scale value.
4670 if (AddrMode.Scale) {
4671 Value *V = AddrMode.ScaledReg;
4672 if (V->getType() == IntPtrTy) {
4674 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
4675 cast<IntegerType>(V->getType())->getBitWidth()) {
4676 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
4678 // It is only safe to sign extend the BaseReg if we know that the math
4679 // required to create it did not overflow before we extend it. Since
4680 // the original IR value was tossed in favor of a constant back when
4681 // the AddrMode was created we need to bail out gracefully if widths
4682 // do not match instead of extending it.
4683 Instruction *I = dyn_cast_or_null<Instruction>(ResultIndex);
4684 if (I && (ResultIndex != AddrMode.BaseReg))
4685 I->eraseFromParent();
4689 if (AddrMode.Scale != 1)
4690 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
4693 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr");
4698 // Add in the Base Offset if present.
4699 if (AddrMode.BaseOffs) {
4700 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
4702 // We need to add this separately from the scale above to help with
4703 // SDAG consecutive load/store merging.
4704 if (ResultPtr->getType() != I8PtrTy)
4705 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);
4706 ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr");
4713 SunkAddr = ResultPtr;
4715 if (ResultPtr->getType() != I8PtrTy)
4716 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);
4717 SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr");
4720 if (SunkAddr->getType() != Addr->getType())
4721 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType());
4724 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
4725 << *MemoryInst << "\n");
4726 Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
4727 Value *Result = nullptr;
4729 // Start with the base register. Do this first so that subsequent address
4730 // matching finds it last, which will prevent it from trying to match it
4731 // as the scaled value in case it happens to be a mul. That would be
4732 // problematic if we've sunk a different mul for the scale, because then
4733 // we'd end up sinking both muls.
4734 if (AddrMode.BaseReg) {
4735 Value *V = AddrMode.BaseReg;
4736 if (V->getType()->isPointerTy())
4737 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
4738 if (V->getType() != IntPtrTy)
4739 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
4743 // Add the scale value.
4744 if (AddrMode.Scale) {
4745 Value *V = AddrMode.ScaledReg;
4746 if (V->getType() == IntPtrTy) {
4748 } else if (V->getType()->isPointerTy()) {
4749 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
4750 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
4751 cast<IntegerType>(V->getType())->getBitWidth()) {
4752 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
4754 // It is only safe to sign extend the BaseReg if we know that the math
4755 // required to create it did not overflow before we extend it. Since
4756 // the original IR value was tossed in favor of a constant back when
4757 // the AddrMode was created we need to bail out gracefully if widths
4758 // do not match instead of extending it.
4759 Instruction *I = dyn_cast_or_null<Instruction>(Result);
4760 if (I && (Result != AddrMode.BaseReg))
4761 I->eraseFromParent();
4764 if (AddrMode.Scale != 1)
4765 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
4768 Result = Builder.CreateAdd(Result, V, "sunkaddr");
4773 // Add in the BaseGV if present.
4774 if (AddrMode.BaseGV) {
4775 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr");
4777 Result = Builder.CreateAdd(Result, V, "sunkaddr");
4782 // Add in the Base Offset if present.
4783 if (AddrMode.BaseOffs) {
4784 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
4786 Result = Builder.CreateAdd(Result, V, "sunkaddr");
4792 SunkAddr = Constant::getNullValue(Addr->getType());
4794 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
4797 MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
4799 // If we have no uses, recursively delete the value and all dead instructions
4801 if (Repl->use_empty()) {
4802 // This can cause recursive deletion, which can invalidate our iterator.
4803 // Use a WeakVH to hold onto it in case this happens.
4804 WeakVH IterHandle(&*CurInstIterator);
4805 BasicBlock *BB = CurInstIterator->getParent();
4807 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo);
4809 if (IterHandle != CurInstIterator.getNodePtrUnchecked()) {
4810 // If the iterator instruction was recursively deleted, start over at the
4811 // start of the block.
4812 CurInstIterator = BB->begin();
4820 /// If there are any memory operands, use OptimizeMemoryInst to sink their
4821 /// address computing into the block when possible / profitable.
4822 bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) {
4823 bool MadeChange = false;
4825 const TargetRegisterInfo *TRI =
4826 TM->getSubtargetImpl(*CS->getParent()->getParent())->getRegisterInfo();
4827 TargetLowering::AsmOperandInfoVector TargetConstraints =
4828 TLI->ParseConstraints(*DL, TRI, CS);
4830 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
4831 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
4833 // Compute the constraint code and ConstraintType to use.
4834 TLI->ComputeConstraintToUse(OpInfo, SDValue());
4836 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
4837 OpInfo.isIndirect) {
4838 Value *OpVal = CS->getArgOperand(ArgNo++);
4839 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u);
4840 } else if (OpInfo.Type == InlineAsm::isInput)
4847 /// \brief Check if all the uses of \p Inst are equivalent (or free) zero or
4848 /// sign extensions.
4849 static bool hasSameExtUse(Instruction *Inst, const TargetLowering &TLI) {
4850 assert(!Inst->use_empty() && "Input must have at least one use");
4851 const Instruction *FirstUser = cast<Instruction>(*Inst->user_begin());
4852 bool IsSExt = isa<SExtInst>(FirstUser);
4853 Type *ExtTy = FirstUser->getType();
4854 for (const User *U : Inst->users()) {
4855 const Instruction *UI = cast<Instruction>(U);
4856 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI)))
4858 Type *CurTy = UI->getType();
4859 // Same input and output types: Same instruction after CSE.
4863 // If IsSExt is true, we are in this situation:
4865 // b = sext ty1 a to ty2
4866 // c = sext ty1 a to ty3
4867 // Assuming ty2 is shorter than ty3, this could be turned into:
4869 // b = sext ty1 a to ty2
4870 // c = sext ty2 b to ty3
4871 // However, the last sext is not free.
4875 // This is a ZExt, maybe this is free to extend from one type to another.
4876 // In that case, we would not account for a different use.
4879 if (ExtTy->getScalarType()->getIntegerBitWidth() >
4880 CurTy->getScalarType()->getIntegerBitWidth()) {
4888 if (!TLI.isZExtFree(NarrowTy, LargeTy))
4891 // All uses are the same or can be derived from one another for free.
4895 /// \brief Try to form ExtLd by promoting \p Exts until they reach a
4896 /// load instruction.
4897 /// If an ext(load) can be formed, it is returned via \p LI for the load
4898 /// and \p Inst for the extension.
4899 /// Otherwise LI == nullptr and Inst == nullptr.
4900 /// When some promotion happened, \p TPT contains the proper state to
4903 /// \return true when promoting was necessary to expose the ext(load)
4904 /// opportunity, false otherwise.
4908 /// %ld = load i32* %addr
4909 /// %add = add nuw i32 %ld, 4
4910 /// %zext = zext i32 %add to i64
4914 /// %ld = load i32* %addr
4915 /// %zext = zext i32 %ld to i64
4916 /// %add = add nuw i64 %zext, 4
4918 /// Thanks to the promotion, we can match zext(load i32*) to i64.
4919 bool CodeGenPrepare::extLdPromotion(TypePromotionTransaction &TPT,
4920 LoadInst *&LI, Instruction *&Inst,
4921 const SmallVectorImpl<Instruction *> &Exts,
4922 unsigned CreatedInstsCost = 0) {
4923 // Iterate over all the extensions to see if one form an ext(load).
4924 for (auto I : Exts) {
4925 // Check if we directly have ext(load).
4926 if ((LI = dyn_cast<LoadInst>(I->getOperand(0)))) {
4928 // No promotion happened here.
4931 // Check whether or not we want to do any promotion.
4932 if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion)
4934 // Get the action to perform the promotion.
4935 TypePromotionHelper::Action TPH = TypePromotionHelper::getAction(
4936 I, InsertedInsts, *TLI, PromotedInsts);
4937 // Check if we can promote.
4940 // Save the current state.
4941 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4942 TPT.getRestorationPoint();
4943 SmallVector<Instruction *, 4> NewExts;
4944 unsigned NewCreatedInstsCost = 0;
4945 unsigned ExtCost = !TLI->isExtFree(I);
4947 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost,
4948 &NewExts, nullptr, *TLI);
4949 assert(PromotedVal &&
4950 "TypePromotionHelper should have filtered out those cases");
4952 // We would be able to merge only one extension in a load.
4953 // Therefore, if we have more than 1 new extension we heuristically
4954 // cut this search path, because it means we degrade the code quality.
4955 // With exactly 2, the transformation is neutral, because we will merge
4956 // one extension but leave one. However, we optimistically keep going,
4957 // because the new extension may be removed too.
4958 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost;
4959 TotalCreatedInstsCost -= ExtCost;
4960 if (!StressExtLdPromotion &&
4961 (TotalCreatedInstsCost > 1 ||
4962 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) {
4963 // The promotion is not profitable, rollback to the previous state.
4964 TPT.rollback(LastKnownGood);
4967 // The promotion is profitable.
4968 // Check if it exposes an ext(load).
4969 (void)extLdPromotion(TPT, LI, Inst, NewExts, TotalCreatedInstsCost);
4970 if (LI && (StressExtLdPromotion || NewCreatedInstsCost <= ExtCost ||
4971 // If we have created a new extension, i.e., now we have two
4972 // extensions. We must make sure one of them is merged with
4973 // the load, otherwise we may degrade the code quality.
4974 (LI->hasOneUse() || hasSameExtUse(LI, *TLI))))
4975 // Promotion happened.
4977 // If this does not help to expose an ext(load) then, rollback.
4978 TPT.rollback(LastKnownGood);
4980 // None of the extension can form an ext(load).
4986 /// Move a zext or sext fed by a load into the same basic block as the load,
4987 /// unless conditions are unfavorable. This allows SelectionDAG to fold the
4988 /// extend into the load.
4989 /// \p I[in/out] the extension may be modified during the process if some
4990 /// promotions apply.
4992 bool CodeGenPrepare::moveExtToFormExtLoad(Instruction *&I) {
4993 // Try to promote a chain of computation if it allows to form
4994 // an extended load.
4995 TypePromotionTransaction TPT;
4996 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4997 TPT.getRestorationPoint();
4998 SmallVector<Instruction *, 1> Exts;
5000 // Look for a load being extended.
5001 LoadInst *LI = nullptr;
5002 Instruction *OldExt = I;
5003 bool HasPromoted = extLdPromotion(TPT, LI, I, Exts);
5005 assert(!HasPromoted && !LI && "If we did not match any load instruction "
5006 "the code must remain the same");
5011 // If they're already in the same block, there's nothing to do.
5012 // Make the cheap checks first if we did not promote.
5013 // If we promoted, we need to check if it is indeed profitable.
5014 if (!HasPromoted && LI->getParent() == I->getParent())
5017 EVT VT = TLI->getValueType(*DL, I->getType());
5018 EVT LoadVT = TLI->getValueType(*DL, LI->getType());
5020 // If the load has other users and the truncate is not free, this probably
5021 // isn't worthwhile.
5022 if (!LI->hasOneUse() && TLI &&
5023 (TLI->isTypeLegal(LoadVT) || !TLI->isTypeLegal(VT)) &&
5024 !TLI->isTruncateFree(I->getType(), LI->getType())) {
5026 TPT.rollback(LastKnownGood);
5030 // Check whether the target supports casts folded into loads.
5032 if (isa<ZExtInst>(I))
5033 LType = ISD::ZEXTLOAD;
5035 assert(isa<SExtInst>(I) && "Unexpected ext type!");
5036 LType = ISD::SEXTLOAD;
5038 if (TLI && !TLI->isLoadExtLegal(LType, VT, LoadVT)) {
5040 TPT.rollback(LastKnownGood);
5044 // Move the extend into the same block as the load, so that SelectionDAG
5047 I->removeFromParent();
5053 bool CodeGenPrepare::optimizeExtUses(Instruction *I) {
5054 BasicBlock *DefBB = I->getParent();
5056 // If the result of a {s|z}ext and its source are both live out, rewrite all
5057 // other uses of the source with result of extension.
5058 Value *Src = I->getOperand(0);
5059 if (Src->hasOneUse())
5062 // Only do this xform if truncating is free.
5063 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType()))
5066 // Only safe to perform the optimization if the source is also defined in
5068 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
5071 bool DefIsLiveOut = false;
5072 for (User *U : I->users()) {
5073 Instruction *UI = cast<Instruction>(U);
5075 // Figure out which BB this ext is used in.
5076 BasicBlock *UserBB = UI->getParent();
5077 if (UserBB == DefBB) continue;
5078 DefIsLiveOut = true;
5084 // Make sure none of the uses are PHI nodes.
5085 for (User *U : Src->users()) {
5086 Instruction *UI = cast<Instruction>(U);
5087 BasicBlock *UserBB = UI->getParent();
5088 if (UserBB == DefBB) continue;
5089 // Be conservative. We don't want this xform to end up introducing
5090 // reloads just before load / store instructions.
5091 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI))
5095 // InsertedTruncs - Only insert one trunc in each block once.
5096 DenseMap<BasicBlock*, Instruction*> InsertedTruncs;
5098 bool MadeChange = false;
5099 for (Use &U : Src->uses()) {
5100 Instruction *User = cast<Instruction>(U.getUser());
5102 // Figure out which BB this ext is used in.
5103 BasicBlock *UserBB = User->getParent();
5104 if (UserBB == DefBB) continue;
5106 // Both src and def are live in this block. Rewrite the use.
5107 Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
5109 if (!InsertedTrunc) {
5110 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
5111 assert(InsertPt != UserBB->end());
5112 InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt);
5113 InsertedInsts.insert(InsertedTrunc);
5116 // Replace a use of the {s|z}ext source with a use of the result.
5125 // Find loads whose uses only use some of the loaded value's bits. Add an "and"
5126 // just after the load if the target can fold this into one extload instruction,
5127 // with the hope of eliminating some of the other later "and" instructions using
5128 // the loaded value. "and"s that are made trivially redundant by the insertion
5129 // of the new "and" are removed by this function, while others (e.g. those whose
5130 // path from the load goes through a phi) are left for isel to potentially
5163 // becomes (after a call to optimizeLoadExt for each load):
5167 // x1' = and x1, 0xff
5171 // x2' = and x2, 0xff
5178 bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) {
5180 if (!Load->isSimple() ||
5181 !(Load->getType()->isIntegerTy() || Load->getType()->isPointerTy()))
5184 // Skip loads we've already transformed or have no reason to transform.
5185 if (Load->hasOneUse()) {
5186 User *LoadUser = *Load->user_begin();
5187 if (cast<Instruction>(LoadUser)->getParent() == Load->getParent() &&
5188 !dyn_cast<PHINode>(LoadUser))
5192 // Look at all uses of Load, looking through phis, to determine how many bits
5193 // of the loaded value are needed.
5194 SmallVector<Instruction *, 8> WorkList;
5195 SmallPtrSet<Instruction *, 16> Visited;
5196 SmallVector<Instruction *, 8> AndsToMaybeRemove;
5197 for (auto *U : Load->users())
5198 WorkList.push_back(cast<Instruction>(U));
5200 EVT LoadResultVT = TLI->getValueType(*DL, Load->getType());
5201 unsigned BitWidth = LoadResultVT.getSizeInBits();
5202 APInt DemandBits(BitWidth, 0);
5203 APInt WidestAndBits(BitWidth, 0);
5205 while (!WorkList.empty()) {
5206 Instruction *I = WorkList.back();
5207 WorkList.pop_back();
5209 // Break use-def graph loops.
5210 if (!Visited.insert(I).second)
5213 // For a PHI node, push all of its users.
5214 if (auto *Phi = dyn_cast<PHINode>(I)) {
5215 for (auto *U : Phi->users())
5216 WorkList.push_back(cast<Instruction>(U));
5220 switch (I->getOpcode()) {
5221 case llvm::Instruction::And: {
5222 auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1));
5225 APInt AndBits = AndC->getValue();
5226 DemandBits |= AndBits;
5227 // Keep track of the widest and mask we see.
5228 if (AndBits.ugt(WidestAndBits))
5229 WidestAndBits = AndBits;
5230 if (AndBits == WidestAndBits && I->getOperand(0) == Load)
5231 AndsToMaybeRemove.push_back(I);
5235 case llvm::Instruction::Shl: {
5236 auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1));
5239 uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1);
5240 auto ShlDemandBits = APInt::getAllOnesValue(BitWidth).lshr(ShiftAmt);
5241 DemandBits |= ShlDemandBits;
5245 case llvm::Instruction::Trunc: {
5246 EVT TruncVT = TLI->getValueType(*DL, I->getType());
5247 unsigned TruncBitWidth = TruncVT.getSizeInBits();
5248 auto TruncBits = APInt::getAllOnesValue(TruncBitWidth).zext(BitWidth);
5249 DemandBits |= TruncBits;
5258 uint32_t ActiveBits = DemandBits.getActiveBits();
5259 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the
5260 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example,
5261 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but
5262 // (and (load x) 1) is not matched as a single instruction, rather as a LDR
5263 // followed by an AND.
5264 // TODO: Look into removing this restriction by fixing backends to either
5265 // return false for isLoadExtLegal for i1 or have them select this pattern to
5266 // a single instruction.
5268 // Also avoid hoisting if we didn't see any ands with the exact DemandBits
5269 // mask, since these are the only ands that will be removed by isel.
5270 if (ActiveBits <= 1 || !APIntOps::isMask(ActiveBits, DemandBits) ||
5271 WidestAndBits != DemandBits)
5274 LLVMContext &Ctx = Load->getType()->getContext();
5275 Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits);
5276 EVT TruncVT = TLI->getValueType(*DL, TruncTy);
5278 // Reject cases that won't be matched as extloads.
5279 if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() ||
5280 !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT))
5283 IRBuilder<> Builder(Load->getNextNode());
5284 auto *NewAnd = dyn_cast<Instruction>(
5285 Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits)));
5287 // Replace all uses of load with new and (except for the use of load in the
5289 Load->replaceAllUsesWith(NewAnd);
5290 NewAnd->setOperand(0, Load);
5292 // Remove any and instructions that are now redundant.
5293 for (auto *And : AndsToMaybeRemove)
5294 // Check that the and mask is the same as the one we decided to put on the
5296 if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) {
5297 And->replaceAllUsesWith(NewAnd);
5298 if (&*CurInstIterator == And)
5299 CurInstIterator = std::next(And->getIterator());
5300 And->eraseFromParent();
5308 /// Check if V (an operand of a select instruction) is an expensive instruction
5309 /// that is only used once.
5310 static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) {
5311 auto *I = dyn_cast<Instruction>(V);
5312 // If it's safe to speculatively execute, then it should not have side
5313 // effects; therefore, it's safe to sink and possibly *not* execute.
5314 return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) &&
5315 TTI->getUserCost(I) >= TargetTransformInfo::TCC_Expensive;
5318 /// Returns true if a SelectInst should be turned into an explicit branch.
5319 static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI,
5321 // FIXME: This should use the same heuristics as IfConversion to determine
5322 // whether a select is better represented as a branch. This requires that
5323 // branch probability metadata is preserved for the select, which is not the
5326 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
5328 // If a branch is predictable, an out-of-order CPU can avoid blocking on its
5329 // comparison condition. If the compare has more than one use, there's
5330 // probably another cmov or setcc around, so it's not worth emitting a branch.
5331 if (!Cmp || !Cmp->hasOneUse())
5334 Value *CmpOp0 = Cmp->getOperand(0);
5335 Value *CmpOp1 = Cmp->getOperand(1);
5337 // Emit "cmov on compare with a memory operand" as a branch to avoid stalls
5338 // on a load from memory. But if the load is used more than once, do not
5339 // change the select to a branch because the load is probably needed
5340 // regardless of whether the branch is taken or not.
5341 if ((isa<LoadInst>(CmpOp0) && CmpOp0->hasOneUse()) ||
5342 (isa<LoadInst>(CmpOp1) && CmpOp1->hasOneUse()))
5345 // If either operand of the select is expensive and only needed on one side
5346 // of the select, we should form a branch.
5347 if (sinkSelectOperand(TTI, SI->getTrueValue()) ||
5348 sinkSelectOperand(TTI, SI->getFalseValue()))
5355 /// If we have a SelectInst that will likely profit from branch prediction,
5356 /// turn it into a branch.
5357 bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
5358 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1);
5360 // Can we convert the 'select' to CF ?
5361 if (DisableSelectToBranch || OptSize || !TLI || VectorCond)
5364 TargetLowering::SelectSupportKind SelectKind;
5366 SelectKind = TargetLowering::VectorMaskSelect;
5367 else if (SI->getType()->isVectorTy())
5368 SelectKind = TargetLowering::ScalarCondVectorVal;
5370 SelectKind = TargetLowering::ScalarValSelect;
5372 // Do we have efficient codegen support for this kind of 'selects' ?
5373 if (TLI->isSelectSupported(SelectKind)) {
5374 // We have efficient codegen support for the select instruction.
5375 // Check if it is profitable to keep this 'select'.
5376 if (!TLI->isPredictableSelectExpensive() ||
5377 !isFormingBranchFromSelectProfitable(TTI, SI))
5383 // Transform a sequence like this:
5385 // %cmp = cmp uge i32 %a, %b
5386 // %sel = select i1 %cmp, i32 %c, i32 %d
5390 // %cmp = cmp uge i32 %a, %b
5391 // br i1 %cmp, label %select.true, label %select.false
5393 // br label %select.end
5395 // br label %select.end
5397 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ]
5399 // In addition, we may sink instructions that produce %c or %d from
5400 // the entry block into the destination(s) of the new branch.
5401 // If the true or false blocks do not contain a sunken instruction, that
5402 // block and its branch may be optimized away. In that case, one side of the
5403 // first branch will point directly to select.end, and the corresponding PHI
5404 // predecessor block will be the start block.
5406 // First, we split the block containing the select into 2 blocks.
5407 BasicBlock *StartBlock = SI->getParent();
5408 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(SI));
5409 BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end");
5411 // Delete the unconditional branch that was just created by the split.
5412 StartBlock->getTerminator()->eraseFromParent();
5414 // These are the new basic blocks for the conditional branch.
5415 // At least one will become an actual new basic block.
5416 BasicBlock *TrueBlock = nullptr;
5417 BasicBlock *FalseBlock = nullptr;
5419 // Sink expensive instructions into the conditional blocks to avoid executing
5420 // them speculatively.
5421 if (sinkSelectOperand(TTI, SI->getTrueValue())) {
5422 TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink",
5423 EndBlock->getParent(), EndBlock);
5424 auto *TrueBranch = BranchInst::Create(EndBlock, TrueBlock);
5425 auto *TrueInst = cast<Instruction>(SI->getTrueValue());
5426 TrueInst->moveBefore(TrueBranch);
5428 if (sinkSelectOperand(TTI, SI->getFalseValue())) {
5429 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink",
5430 EndBlock->getParent(), EndBlock);
5431 auto *FalseBranch = BranchInst::Create(EndBlock, FalseBlock);
5432 auto *FalseInst = cast<Instruction>(SI->getFalseValue());
5433 FalseInst->moveBefore(FalseBranch);
5436 // If there was nothing to sink, then arbitrarily choose the 'false' side
5437 // for a new input value to the PHI.
5438 if (TrueBlock == FalseBlock) {
5439 assert(TrueBlock == nullptr &&
5440 "Unexpected basic block transform while optimizing select");
5442 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false",
5443 EndBlock->getParent(), EndBlock);
5444 BranchInst::Create(EndBlock, FalseBlock);
5447 // Insert the real conditional branch based on the original condition.
5448 // If we did not create a new block for one of the 'true' or 'false' paths
5449 // of the condition, it means that side of the branch goes to the end block
5450 // directly and the path originates from the start block from the point of
5451 // view of the new PHI.
5452 if (TrueBlock == nullptr) {
5453 BranchInst::Create(EndBlock, FalseBlock, SI->getCondition(), SI);
5454 TrueBlock = StartBlock;
5455 } else if (FalseBlock == nullptr) {
5456 BranchInst::Create(TrueBlock, EndBlock, SI->getCondition(), SI);
5457 FalseBlock = StartBlock;
5459 BranchInst::Create(TrueBlock, FalseBlock, SI->getCondition(), SI);
5462 // The select itself is replaced with a PHI Node.
5463 PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front());
5465 PN->addIncoming(SI->getTrueValue(), TrueBlock);
5466 PN->addIncoming(SI->getFalseValue(), FalseBlock);
5468 SI->replaceAllUsesWith(PN);
5469 SI->eraseFromParent();
5471 // Instruct OptimizeBlock to skip to the next block.
5472 CurInstIterator = StartBlock->end();
5473 ++NumSelectsExpanded;
5477 static bool isBroadcastShuffle(ShuffleVectorInst *SVI) {
5478 SmallVector<int, 16> Mask(SVI->getShuffleMask());
5480 for (unsigned i = 0; i < Mask.size(); ++i) {
5481 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem)
5483 SplatElem = Mask[i];
5489 /// Some targets have expensive vector shifts if the lanes aren't all the same
5490 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases
5491 /// it's often worth sinking a shufflevector splat down to its use so that
5492 /// codegen can spot all lanes are identical.
5493 bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
5494 BasicBlock *DefBB = SVI->getParent();
5496 // Only do this xform if variable vector shifts are particularly expensive.
5497 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType()))
5500 // We only expect better codegen by sinking a shuffle if we can recognise a
5502 if (!isBroadcastShuffle(SVI))
5505 // InsertedShuffles - Only insert a shuffle in each block once.
5506 DenseMap<BasicBlock*, Instruction*> InsertedShuffles;
5508 bool MadeChange = false;
5509 for (User *U : SVI->users()) {
5510 Instruction *UI = cast<Instruction>(U);
5512 // Figure out which BB this ext is used in.
5513 BasicBlock *UserBB = UI->getParent();
5514 if (UserBB == DefBB) continue;
5516 // For now only apply this when the splat is used by a shift instruction.
5517 if (!UI->isShift()) continue;
5519 // Everything checks out, sink the shuffle if the user's block doesn't
5520 // already have a copy.
5521 Instruction *&InsertedShuffle = InsertedShuffles[UserBB];
5523 if (!InsertedShuffle) {
5524 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
5525 assert(InsertPt != UserBB->end());
5527 new ShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1),
5528 SVI->getOperand(2), "", &*InsertPt);
5531 UI->replaceUsesOfWith(SVI, InsertedShuffle);
5535 // If we removed all uses, nuke the shuffle.
5536 if (SVI->use_empty()) {
5537 SVI->eraseFromParent();
5544 bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) {
5548 Value *Cond = SI->getCondition();
5549 Type *OldType = Cond->getType();
5550 LLVMContext &Context = Cond->getContext();
5551 MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType));
5552 unsigned RegWidth = RegType.getSizeInBits();
5554 if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth())
5557 // If the register width is greater than the type width, expand the condition
5558 // of the switch instruction and each case constant to the width of the
5559 // register. By widening the type of the switch condition, subsequent
5560 // comparisons (for case comparisons) will not need to be extended to the
5561 // preferred register width, so we will potentially eliminate N-1 extends,
5562 // where N is the number of cases in the switch.
5563 auto *NewType = Type::getIntNTy(Context, RegWidth);
5565 // Zero-extend the switch condition and case constants unless the switch
5566 // condition is a function argument that is already being sign-extended.
5567 // In that case, we can avoid an unnecessary mask/extension by sign-extending
5568 // everything instead.
5569 Instruction::CastOps ExtType = Instruction::ZExt;
5570 if (auto *Arg = dyn_cast<Argument>(Cond))
5571 if (Arg->hasSExtAttr())
5572 ExtType = Instruction::SExt;
5574 auto *ExtInst = CastInst::Create(ExtType, Cond, NewType);
5575 ExtInst->insertBefore(SI);
5576 SI->setCondition(ExtInst);
5577 for (SwitchInst::CaseIt Case : SI->cases()) {
5578 APInt NarrowConst = Case.getCaseValue()->getValue();
5579 APInt WideConst = (ExtType == Instruction::ZExt) ?
5580 NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth);
5581 Case.setValue(ConstantInt::get(Context, WideConst));
5588 /// \brief Helper class to promote a scalar operation to a vector one.
5589 /// This class is used to move downward extractelement transition.
5591 /// a = vector_op <2 x i32>
5592 /// b = extractelement <2 x i32> a, i32 0
5597 /// a = vector_op <2 x i32>
5598 /// c = vector_op a (equivalent to scalar_op on the related lane)
5599 /// * d = extractelement <2 x i32> c, i32 0
5601 /// Assuming both extractelement and store can be combine, we get rid of the
5603 class VectorPromoteHelper {
5604 /// DataLayout associated with the current module.
5605 const DataLayout &DL;
5607 /// Used to perform some checks on the legality of vector operations.
5608 const TargetLowering &TLI;
5610 /// Used to estimated the cost of the promoted chain.
5611 const TargetTransformInfo &TTI;
5613 /// The transition being moved downwards.
5614 Instruction *Transition;
5615 /// The sequence of instructions to be promoted.
5616 SmallVector<Instruction *, 4> InstsToBePromoted;
5617 /// Cost of combining a store and an extract.
5618 unsigned StoreExtractCombineCost;
5619 /// Instruction that will be combined with the transition.
5620 Instruction *CombineInst;
5622 /// \brief The instruction that represents the current end of the transition.
5623 /// Since we are faking the promotion until we reach the end of the chain
5624 /// of computation, we need a way to get the current end of the transition.
5625 Instruction *getEndOfTransition() const {
5626 if (InstsToBePromoted.empty())
5628 return InstsToBePromoted.back();
5631 /// \brief Return the index of the original value in the transition.
5632 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value,
5633 /// c, is at index 0.
5634 unsigned getTransitionOriginalValueIdx() const {
5635 assert(isa<ExtractElementInst>(Transition) &&
5636 "Other kind of transitions are not supported yet");
5640 /// \brief Return the index of the index in the transition.
5641 /// E.g., for "extractelement <2 x i32> c, i32 0" the index
5643 unsigned getTransitionIdx() const {
5644 assert(isa<ExtractElementInst>(Transition) &&
5645 "Other kind of transitions are not supported yet");
5649 /// \brief Get the type of the transition.
5650 /// This is the type of the original value.
5651 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the
5652 /// transition is <2 x i32>.
5653 Type *getTransitionType() const {
5654 return Transition->getOperand(getTransitionOriginalValueIdx())->getType();
5657 /// \brief Promote \p ToBePromoted by moving \p Def downward through.
5658 /// I.e., we have the following sequence:
5659 /// Def = Transition <ty1> a to <ty2>
5660 /// b = ToBePromoted <ty2> Def, ...
5662 /// b = ToBePromoted <ty1> a, ...
5663 /// Def = Transition <ty1> ToBePromoted to <ty2>
5664 void promoteImpl(Instruction *ToBePromoted);
5666 /// \brief Check whether or not it is profitable to promote all the
5667 /// instructions enqueued to be promoted.
5668 bool isProfitableToPromote() {
5669 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx());
5670 unsigned Index = isa<ConstantInt>(ValIdx)
5671 ? cast<ConstantInt>(ValIdx)->getZExtValue()
5673 Type *PromotedType = getTransitionType();
5675 StoreInst *ST = cast<StoreInst>(CombineInst);
5676 unsigned AS = ST->getPointerAddressSpace();
5677 unsigned Align = ST->getAlignment();
5678 // Check if this store is supported.
5679 if (!TLI.allowsMisalignedMemoryAccesses(
5680 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS,
5682 // If this is not supported, there is no way we can combine
5683 // the extract with the store.
5687 // The scalar chain of computation has to pay for the transition
5688 // scalar to vector.
5689 // The vector chain has to account for the combining cost.
5690 uint64_t ScalarCost =
5691 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index);
5692 uint64_t VectorCost = StoreExtractCombineCost;
5693 for (const auto &Inst : InstsToBePromoted) {
5694 // Compute the cost.
5695 // By construction, all instructions being promoted are arithmetic ones.
5696 // Moreover, one argument is a constant that can be viewed as a splat
5698 Value *Arg0 = Inst->getOperand(0);
5699 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) ||
5700 isa<ConstantFP>(Arg0);
5701 TargetTransformInfo::OperandValueKind Arg0OVK =
5702 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue
5703 : TargetTransformInfo::OK_AnyValue;
5704 TargetTransformInfo::OperandValueKind Arg1OVK =
5705 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue
5706 : TargetTransformInfo::OK_AnyValue;
5707 ScalarCost += TTI.getArithmeticInstrCost(
5708 Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK);
5709 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType,
5712 DEBUG(dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
5713 << ScalarCost << "\nVector: " << VectorCost << '\n');
5714 return ScalarCost > VectorCost;
5717 /// \brief Generate a constant vector with \p Val with the same
5718 /// number of elements as the transition.
5719 /// \p UseSplat defines whether or not \p Val should be replicated
5720 /// across the whole vector.
5721 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>,
5722 /// otherwise we generate a vector with as many undef as possible:
5723 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only
5724 /// used at the index of the extract.
5725 Value *getConstantVector(Constant *Val, bool UseSplat) const {
5726 unsigned ExtractIdx = UINT_MAX;
5728 // If we cannot determine where the constant must be, we have to
5729 // use a splat constant.
5730 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx());
5731 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx))
5732 ExtractIdx = CstVal->getSExtValue();
5737 unsigned End = getTransitionType()->getVectorNumElements();
5739 return ConstantVector::getSplat(End, Val);
5741 SmallVector<Constant *, 4> ConstVec;
5742 UndefValue *UndefVal = UndefValue::get(Val->getType());
5743 for (unsigned Idx = 0; Idx != End; ++Idx) {
5744 if (Idx == ExtractIdx)
5745 ConstVec.push_back(Val);
5747 ConstVec.push_back(UndefVal);
5749 return ConstantVector::get(ConstVec);
5752 /// \brief Check if promoting to a vector type an operand at \p OperandIdx
5753 /// in \p Use can trigger undefined behavior.
5754 static bool canCauseUndefinedBehavior(const Instruction *Use,
5755 unsigned OperandIdx) {
5756 // This is not safe to introduce undef when the operand is on
5757 // the right hand side of a division-like instruction.
5758 if (OperandIdx != 1)
5760 switch (Use->getOpcode()) {
5763 case Instruction::SDiv:
5764 case Instruction::UDiv:
5765 case Instruction::SRem:
5766 case Instruction::URem:
5768 case Instruction::FDiv:
5769 case Instruction::FRem:
5770 return !Use->hasNoNaNs();
5772 llvm_unreachable(nullptr);
5776 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI,
5777 const TargetTransformInfo &TTI, Instruction *Transition,
5778 unsigned CombineCost)
5779 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition),
5780 StoreExtractCombineCost(CombineCost), CombineInst(nullptr) {
5781 assert(Transition && "Do not know how to promote null");
5784 /// \brief Check if we can promote \p ToBePromoted to \p Type.
5785 bool canPromote(const Instruction *ToBePromoted) const {
5786 // We could support CastInst too.
5787 return isa<BinaryOperator>(ToBePromoted);
5790 /// \brief Check if it is profitable to promote \p ToBePromoted
5791 /// by moving downward the transition through.
5792 bool shouldPromote(const Instruction *ToBePromoted) const {
5793 // Promote only if all the operands can be statically expanded.
5794 // Indeed, we do not want to introduce any new kind of transitions.
5795 for (const Use &U : ToBePromoted->operands()) {
5796 const Value *Val = U.get();
5797 if (Val == getEndOfTransition()) {
5798 // If the use is a division and the transition is on the rhs,
5799 // we cannot promote the operation, otherwise we may create a
5800 // division by zero.
5801 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()))
5805 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) &&
5806 !isa<ConstantFP>(Val))
5809 // Check that the resulting operation is legal.
5810 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode());
5813 return StressStoreExtract ||
5814 TLI.isOperationLegalOrCustom(
5815 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true));
5818 /// \brief Check whether or not \p Use can be combined
5819 /// with the transition.
5820 /// I.e., is it possible to do Use(Transition) => AnotherUse?
5821 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); }
5823 /// \brief Record \p ToBePromoted as part of the chain to be promoted.
5824 void enqueueForPromotion(Instruction *ToBePromoted) {
5825 InstsToBePromoted.push_back(ToBePromoted);
5828 /// \brief Set the instruction that will be combined with the transition.
5829 void recordCombineInstruction(Instruction *ToBeCombined) {
5830 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine");
5831 CombineInst = ToBeCombined;
5834 /// \brief Promote all the instructions enqueued for promotion if it is
5836 /// \return True if the promotion happened, false otherwise.
5838 // Check if there is something to promote.
5839 // Right now, if we do not have anything to combine with,
5840 // we assume the promotion is not profitable.
5841 if (InstsToBePromoted.empty() || !CombineInst)
5845 if (!StressStoreExtract && !isProfitableToPromote())
5849 for (auto &ToBePromoted : InstsToBePromoted)
5850 promoteImpl(ToBePromoted);
5851 InstsToBePromoted.clear();
5855 } // End of anonymous namespace.
5857 void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) {
5858 // At this point, we know that all the operands of ToBePromoted but Def
5859 // can be statically promoted.
5860 // For Def, we need to use its parameter in ToBePromoted:
5861 // b = ToBePromoted ty1 a
5862 // Def = Transition ty1 b to ty2
5863 // Move the transition down.
5864 // 1. Replace all uses of the promoted operation by the transition.
5865 // = ... b => = ... Def.
5866 assert(ToBePromoted->getType() == Transition->getType() &&
5867 "The type of the result of the transition does not match "
5869 ToBePromoted->replaceAllUsesWith(Transition);
5870 // 2. Update the type of the uses.
5871 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def.
5872 Type *TransitionTy = getTransitionType();
5873 ToBePromoted->mutateType(TransitionTy);
5874 // 3. Update all the operands of the promoted operation with promoted
5876 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a.
5877 for (Use &U : ToBePromoted->operands()) {
5878 Value *Val = U.get();
5879 Value *NewVal = nullptr;
5880 if (Val == Transition)
5881 NewVal = Transition->getOperand(getTransitionOriginalValueIdx());
5882 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) ||
5883 isa<ConstantFP>(Val)) {
5884 // Use a splat constant if it is not safe to use undef.
5885 NewVal = getConstantVector(
5886 cast<Constant>(Val),
5887 isa<UndefValue>(Val) ||
5888 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()));
5890 llvm_unreachable("Did you modified shouldPromote and forgot to update "
5892 ToBePromoted->setOperand(U.getOperandNo(), NewVal);
5894 Transition->removeFromParent();
5895 Transition->insertAfter(ToBePromoted);
5896 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted);
5899 /// Some targets can do store(extractelement) with one instruction.
5900 /// Try to push the extractelement towards the stores when the target
5901 /// has this feature and this is profitable.
5902 bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) {
5903 unsigned CombineCost = UINT_MAX;
5904 if (DisableStoreExtract || !TLI ||
5905 (!StressStoreExtract &&
5906 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(),
5907 Inst->getOperand(1), CombineCost)))
5910 // At this point we know that Inst is a vector to scalar transition.
5911 // Try to move it down the def-use chain, until:
5912 // - We can combine the transition with its single use
5913 // => we got rid of the transition.
5914 // - We escape the current basic block
5915 // => we would need to check that we are moving it at a cheaper place and
5916 // we do not do that for now.
5917 BasicBlock *Parent = Inst->getParent();
5918 DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n');
5919 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost);
5920 // If the transition has more than one use, assume this is not going to be
5922 while (Inst->hasOneUse()) {
5923 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin());
5924 DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n');
5926 if (ToBePromoted->getParent() != Parent) {
5927 DEBUG(dbgs() << "Instruction to promote is in a different block ("
5928 << ToBePromoted->getParent()->getName()
5929 << ") than the transition (" << Parent->getName() << ").\n");
5933 if (VPH.canCombine(ToBePromoted)) {
5934 DEBUG(dbgs() << "Assume " << *Inst << '\n'
5935 << "will be combined with: " << *ToBePromoted << '\n');
5936 VPH.recordCombineInstruction(ToBePromoted);
5937 bool Changed = VPH.promote();
5938 NumStoreExtractExposed += Changed;
5942 DEBUG(dbgs() << "Try promoting.\n");
5943 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted))
5946 DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
5948 VPH.enqueueForPromotion(ToBePromoted);
5949 Inst = ToBePromoted;
5954 bool CodeGenPrepare::optimizeInst(Instruction *I, bool& ModifiedDT) {
5955 // Bail out if we inserted the instruction to prevent optimizations from
5956 // stepping on each other's toes.
5957 if (InsertedInsts.count(I))
5960 if (PHINode *P = dyn_cast<PHINode>(I)) {
5961 // It is possible for very late stage optimizations (such as SimplifyCFG)
5962 // to introduce PHI nodes too late to be cleaned up. If we detect such a
5963 // trivial PHI, go ahead and zap it here.
5964 if (Value *V = SimplifyInstruction(P, *DL, TLInfo, nullptr)) {
5965 P->replaceAllUsesWith(V);
5966 P->eraseFromParent();
5973 if (CastInst *CI = dyn_cast<CastInst>(I)) {
5974 // If the source of the cast is a constant, then this should have
5975 // already been constant folded. The only reason NOT to constant fold
5976 // it is if something (e.g. LSR) was careful to place the constant
5977 // evaluation in a block other than then one that uses it (e.g. to hoist
5978 // the address of globals out of a loop). If this is the case, we don't
5979 // want to forward-subst the cast.
5980 if (isa<Constant>(CI->getOperand(0)))
5983 if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL))
5986 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
5987 /// Sink a zext or sext into its user blocks if the target type doesn't
5988 /// fit in one register
5990 TLI->getTypeAction(CI->getContext(),
5991 TLI->getValueType(*DL, CI->getType())) ==
5992 TargetLowering::TypeExpandInteger) {
5993 return SinkCast(CI);
5995 bool MadeChange = moveExtToFormExtLoad(I);
5996 return MadeChange | optimizeExtUses(I);
6002 if (CmpInst *CI = dyn_cast<CmpInst>(I))
6003 if (!TLI || !TLI->hasMultipleConditionRegisters())
6004 return OptimizeCmpExpression(CI);
6006 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
6007 stripInvariantGroupMetadata(*LI);
6009 bool Modified = optimizeLoadExt(LI);
6010 unsigned AS = LI->getPointerAddressSpace();
6011 Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
6017 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
6018 stripInvariantGroupMetadata(*SI);
6020 unsigned AS = SI->getPointerAddressSpace();
6021 return optimizeMemoryInst(I, SI->getOperand(1),
6022 SI->getOperand(0)->getType(), AS);
6027 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
6029 if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
6030 BinOp->getOpcode() == Instruction::LShr)) {
6031 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
6032 if (TLI && CI && TLI->hasExtractBitsInsn())
6033 return OptimizeExtractBits(BinOp, CI, *TLI, *DL);
6038 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
6039 if (GEPI->hasAllZeroIndices()) {
6040 /// The GEP operand must be a pointer, so must its result -> BitCast
6041 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
6042 GEPI->getName(), GEPI);
6043 GEPI->replaceAllUsesWith(NC);
6044 GEPI->eraseFromParent();
6046 optimizeInst(NC, ModifiedDT);
6052 if (CallInst *CI = dyn_cast<CallInst>(I))
6053 return optimizeCallInst(CI, ModifiedDT);
6055 if (SelectInst *SI = dyn_cast<SelectInst>(I))
6056 return optimizeSelectInst(SI);
6058 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I))
6059 return optimizeShuffleVectorInst(SVI);
6061 if (auto *Switch = dyn_cast<SwitchInst>(I))
6062 return optimizeSwitchInst(Switch);
6064 if (isa<ExtractElementInst>(I))
6065 return optimizeExtractElementInst(I);
6070 /// Given an OR instruction, check to see if this is a bitreverse
6071 /// idiom. If so, insert the new intrinsic and return true.
6072 static bool makeBitReverse(Instruction &I, const DataLayout &DL,
6073 const TargetLowering &TLI) {
6074 if (!I.getType()->isIntegerTy() ||
6075 !TLI.isOperationLegalOrCustom(ISD::BITREVERSE,
6076 TLI.getValueType(DL, I.getType(), true)))
6079 SmallVector<Instruction*, 4> Insts;
6080 if (!recognizeBitReverseOrBSwapIdiom(&I, false, true, Insts))
6082 Instruction *LastInst = Insts.back();
6083 I.replaceAllUsesWith(LastInst);
6084 RecursivelyDeleteTriviallyDeadInstructions(&I);
6088 // In this pass we look for GEP and cast instructions that are used
6089 // across basic blocks and rewrite them to improve basic-block-at-a-time
6091 bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool& ModifiedDT) {
6093 bool MadeChange = false;
6095 CurInstIterator = BB.begin();
6096 while (CurInstIterator != BB.end()) {
6097 MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT);
6102 bool MadeBitReverse = true;
6103 while (TLI && MadeBitReverse) {
6104 MadeBitReverse = false;
6105 for (auto &I : reverse(BB)) {
6106 if (makeBitReverse(I, *DL, *TLI)) {
6107 MadeBitReverse = MadeChange = true;
6112 MadeChange |= dupRetToEnableTailCallOpts(&BB);
6117 // llvm.dbg.value is far away from the value then iSel may not be able
6118 // handle it properly. iSel will drop llvm.dbg.value if it can not
6119 // find a node corresponding to the value.
6120 bool CodeGenPrepare::placeDbgValues(Function &F) {
6121 bool MadeChange = false;
6122 for (BasicBlock &BB : F) {
6123 Instruction *PrevNonDbgInst = nullptr;
6124 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
6125 Instruction *Insn = &*BI++;
6126 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn);
6127 // Leave dbg.values that refer to an alloca alone. These
6128 // instrinsics describe the address of a variable (= the alloca)
6129 // being taken. They should not be moved next to the alloca
6130 // (and to the beginning of the scope), but rather stay close to
6131 // where said address is used.
6132 if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) {
6133 PrevNonDbgInst = Insn;
6137 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue());
6138 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) {
6139 // If VI is a phi in a block with an EHPad terminator, we can't insert
6141 if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad())
6143 DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI);
6144 DVI->removeFromParent();
6145 if (isa<PHINode>(VI))
6146 DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt());
6148 DVI->insertAfter(VI);
6157 // If there is a sequence that branches based on comparing a single bit
6158 // against zero that can be combined into a single instruction, and the
6159 // target supports folding these into a single instruction, sink the
6160 // mask and compare into the branch uses. Do this before OptimizeBlock ->
6161 // OptimizeInst -> OptimizeCmpExpression, which perturbs the pattern being
6163 bool CodeGenPrepare::sinkAndCmp(Function &F) {
6164 if (!EnableAndCmpSinking)
6166 if (!TLI || !TLI->isMaskAndBranchFoldingLegal())
6168 bool MadeChange = false;
6169 for (Function::iterator I = F.begin(), E = F.end(); I != E; ) {
6170 BasicBlock *BB = &*I++;
6172 // Does this BB end with the following?
6173 // %andVal = and %val, #single-bit-set
6174 // %icmpVal = icmp %andResult, 0
6175 // br i1 %cmpVal label %dest1, label %dest2"
6176 BranchInst *Brcc = dyn_cast<BranchInst>(BB->getTerminator());
6177 if (!Brcc || !Brcc->isConditional())
6179 ICmpInst *Cmp = dyn_cast<ICmpInst>(Brcc->getOperand(0));
6180 if (!Cmp || Cmp->getParent() != BB)
6182 ConstantInt *Zero = dyn_cast<ConstantInt>(Cmp->getOperand(1));
6183 if (!Zero || !Zero->isZero())
6185 Instruction *And = dyn_cast<Instruction>(Cmp->getOperand(0));
6186 if (!And || And->getOpcode() != Instruction::And || And->getParent() != BB)
6188 ConstantInt* Mask = dyn_cast<ConstantInt>(And->getOperand(1));
6189 if (!Mask || !Mask->getUniqueInteger().isPowerOf2())
6191 DEBUG(dbgs() << "found and; icmp ?,0; brcc\n"); DEBUG(BB->dump());
6193 // Push the "and; icmp" for any users that are conditional branches.
6194 // Since there can only be one branch use per BB, we don't need to keep
6195 // track of which BBs we insert into.
6196 for (Value::use_iterator UI = Cmp->use_begin(), E = Cmp->use_end();
6200 BranchInst *BrccUser = dyn_cast<BranchInst>(*UI);
6202 if (!BrccUser || !BrccUser->isConditional())
6204 BasicBlock *UserBB = BrccUser->getParent();
6205 if (UserBB == BB) continue;
6206 DEBUG(dbgs() << "found Brcc use\n");
6208 // Sink the "and; icmp" to use.
6210 BinaryOperator *NewAnd =
6211 BinaryOperator::CreateAnd(And->getOperand(0), And->getOperand(1), "",
6214 CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), NewAnd, Zero,
6218 DEBUG(BrccUser->getParent()->dump());
6224 /// \brief Retrieve the probabilities of a conditional branch. Returns true on
6225 /// success, or returns false if no or invalid metadata was found.
6226 static bool extractBranchMetadata(BranchInst *BI,
6227 uint64_t &ProbTrue, uint64_t &ProbFalse) {
6228 assert(BI->isConditional() &&
6229 "Looking for probabilities on unconditional branch?");
6230 auto *ProfileData = BI->getMetadata(LLVMContext::MD_prof);
6231 if (!ProfileData || ProfileData->getNumOperands() != 3)
6234 const auto *CITrue =
6235 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1));
6236 const auto *CIFalse =
6237 mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(2));
6238 if (!CITrue || !CIFalse)
6241 ProbTrue = CITrue->getValue().getZExtValue();
6242 ProbFalse = CIFalse->getValue().getZExtValue();
6247 /// \brief Scale down both weights to fit into uint32_t.
6248 static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
6249 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
6250 uint32_t Scale = (NewMax / UINT32_MAX) + 1;
6251 NewTrue = NewTrue / Scale;
6252 NewFalse = NewFalse / Scale;
6255 /// \brief Some targets prefer to split a conditional branch like:
6257 /// %0 = icmp ne i32 %a, 0
6258 /// %1 = icmp ne i32 %b, 0
6259 /// %or.cond = or i1 %0, %1
6260 /// br i1 %or.cond, label %TrueBB, label %FalseBB
6262 /// into multiple branch instructions like:
6265 /// %0 = icmp ne i32 %a, 0
6266 /// br i1 %0, label %TrueBB, label %bb2
6268 /// %1 = icmp ne i32 %b, 0
6269 /// br i1 %1, label %TrueBB, label %FalseBB
6271 /// This usually allows instruction selection to do even further optimizations
6272 /// and combine the compare with the branch instruction. Currently this is
6273 /// applied for targets which have "cheap" jump instructions.
6275 /// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
6277 bool CodeGenPrepare::splitBranchCondition(Function &F) {
6278 if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive())
6281 bool MadeChange = false;
6282 for (auto &BB : F) {
6283 // Does this BB end with the following?
6284 // %cond1 = icmp|fcmp|binary instruction ...
6285 // %cond2 = icmp|fcmp|binary instruction ...
6286 // %cond.or = or|and i1 %cond1, cond2
6287 // br i1 %cond.or label %dest1, label %dest2"
6288 BinaryOperator *LogicOp;
6289 BasicBlock *TBB, *FBB;
6290 if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB)))
6293 auto *Br1 = cast<BranchInst>(BB.getTerminator());
6294 if (Br1->getMetadata(LLVMContext::MD_unpredictable))
6298 Value *Cond1, *Cond2;
6299 if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)),
6300 m_OneUse(m_Value(Cond2)))))
6301 Opc = Instruction::And;
6302 else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)),
6303 m_OneUse(m_Value(Cond2)))))
6304 Opc = Instruction::Or;
6308 if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) ||
6309 !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) )
6312 DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump());
6315 auto *InsertBefore = std::next(Function::iterator(BB))
6316 .getNodePtrUnchecked();
6317 auto TmpBB = BasicBlock::Create(BB.getContext(),
6318 BB.getName() + ".cond.split",
6319 BB.getParent(), InsertBefore);
6321 // Update original basic block by using the first condition directly by the
6322 // branch instruction and removing the no longer needed and/or instruction.
6323 Br1->setCondition(Cond1);
6324 LogicOp->eraseFromParent();
6326 // Depending on the conditon we have to either replace the true or the false
6327 // successor of the original branch instruction.
6328 if (Opc == Instruction::And)
6329 Br1->setSuccessor(0, TmpBB);
6331 Br1->setSuccessor(1, TmpBB);
6333 // Fill in the new basic block.
6334 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB);
6335 if (auto *I = dyn_cast<Instruction>(Cond2)) {
6336 I->removeFromParent();
6337 I->insertBefore(Br2);
6340 // Update PHI nodes in both successors. The original BB needs to be
6341 // replaced in one succesor's PHI nodes, because the branch comes now from
6342 // the newly generated BB (NewBB). In the other successor we need to add one
6343 // incoming edge to the PHI nodes, because both branch instructions target
6344 // now the same successor. Depending on the original branch condition
6345 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that
6346 // we perfrom the correct update for the PHI nodes.
6347 // This doesn't change the successor order of the just created branch
6348 // instruction (or any other instruction).
6349 if (Opc == Instruction::Or)
6350 std::swap(TBB, FBB);
6352 // Replace the old BB with the new BB.
6353 for (auto &I : *TBB) {
6354 PHINode *PN = dyn_cast<PHINode>(&I);
6358 while ((i = PN->getBasicBlockIndex(&BB)) >= 0)
6359 PN->setIncomingBlock(i, TmpBB);
6362 // Add another incoming edge form the new BB.
6363 for (auto &I : *FBB) {
6364 PHINode *PN = dyn_cast<PHINode>(&I);
6367 auto *Val = PN->getIncomingValueForBlock(&BB);
6368 PN->addIncoming(Val, TmpBB);
6371 // Update the branch weights (from SelectionDAGBuilder::
6372 // FindMergedConditions).
6373 if (Opc == Instruction::Or) {
6374 // Codegen X | Y as:
6383 // We have flexibility in setting Prob for BB1 and Prob for NewBB.
6384 // The requirement is that
6385 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
6386 // = TrueProb for orignal BB.
6387 // Assuming the orignal weights are A and B, one choice is to set BB1's
6388 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
6390 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
6391 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
6392 // TmpBB, but the math is more complicated.
6393 uint64_t TrueWeight, FalseWeight;
6394 if (extractBranchMetadata(Br1, TrueWeight, FalseWeight)) {
6395 uint64_t NewTrueWeight = TrueWeight;
6396 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight;
6397 scaleWeights(NewTrueWeight, NewFalseWeight);
6398 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext())
6399 .createBranchWeights(TrueWeight, FalseWeight));
6401 NewTrueWeight = TrueWeight;
6402 NewFalseWeight = 2 * FalseWeight;
6403 scaleWeights(NewTrueWeight, NewFalseWeight);
6404 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext())
6405 .createBranchWeights(TrueWeight, FalseWeight));
6408 // Codegen X & Y as:
6416 // This requires creation of TmpBB after CurBB.
6418 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
6419 // The requirement is that
6420 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
6421 // = FalseProb for orignal BB.
6422 // Assuming the orignal weights are A and B, one choice is to set BB1's
6423 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
6425 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
6426 uint64_t TrueWeight, FalseWeight;
6427 if (extractBranchMetadata(Br1, TrueWeight, FalseWeight)) {
6428 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight;
6429 uint64_t NewFalseWeight = FalseWeight;
6430 scaleWeights(NewTrueWeight, NewFalseWeight);
6431 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext())
6432 .createBranchWeights(TrueWeight, FalseWeight));
6434 NewTrueWeight = 2 * TrueWeight;
6435 NewFalseWeight = FalseWeight;
6436 scaleWeights(NewTrueWeight, NewFalseWeight);
6437 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext())
6438 .createBranchWeights(TrueWeight, FalseWeight));
6442 // Note: No point in getting fancy here, since the DT info is never
6443 // available to CodeGenPrepare.
6448 DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();
6454 void CodeGenPrepare::stripInvariantGroupMetadata(Instruction &I) {
6455 if (auto *InvariantMD = I.getMetadata(LLVMContext::MD_invariant_group))
6456 I.dropUnknownNonDebugMetadata(InvariantMD->getMetadataID());