1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visit functions for load, store and alloca.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/Loads.h"
17 #include "llvm/IR/DataLayout.h"
18 #include "llvm/IR/LLVMContext.h"
19 #include "llvm/IR/IntrinsicInst.h"
20 #include "llvm/IR/MDBuilder.h"
21 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
22 #include "llvm/Transforms/Utils/Local.h"
25 #define DEBUG_TYPE "instcombine"
27 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
28 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
30 /// pointsToConstantGlobal - Return true if V (possibly indirectly) points to
31 /// some part of a constant global variable. This intentionally only accepts
32 /// constant expressions because we can't rewrite arbitrary instructions.
33 static bool pointsToConstantGlobal(Value *V) {
34 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
35 return GV->isConstant();
37 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
38 if (CE->getOpcode() == Instruction::BitCast ||
39 CE->getOpcode() == Instruction::AddrSpaceCast ||
40 CE->getOpcode() == Instruction::GetElementPtr)
41 return pointsToConstantGlobal(CE->getOperand(0));
46 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
47 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
48 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
49 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
50 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
51 /// the alloca, and if the source pointer is a pointer to a constant global, we
52 /// can optimize this.
54 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
55 SmallVectorImpl<Instruction *> &ToDelete) {
56 // We track lifetime intrinsics as we encounter them. If we decide to go
57 // ahead and replace the value with the global, this lets the caller quickly
58 // eliminate the markers.
60 SmallVector<std::pair<Value *, bool>, 35> ValuesToInspect;
61 ValuesToInspect.push_back(std::make_pair(V, false));
62 while (!ValuesToInspect.empty()) {
63 auto ValuePair = ValuesToInspect.pop_back_val();
64 const bool IsOffset = ValuePair.second;
65 for (auto &U : ValuePair.first->uses()) {
66 Instruction *I = cast<Instruction>(U.getUser());
68 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
69 // Ignore non-volatile loads, they are always ok.
70 if (!LI->isSimple()) return false;
74 if (isa<BitCastInst>(I) || isa<AddrSpaceCastInst>(I)) {
75 // If uses of the bitcast are ok, we are ok.
76 ValuesToInspect.push_back(std::make_pair(I, IsOffset));
79 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
80 // If the GEP has all zero indices, it doesn't offset the pointer. If it
82 ValuesToInspect.push_back(
83 std::make_pair(I, IsOffset || !GEP->hasAllZeroIndices()));
87 if (auto CS = CallSite(I)) {
88 // If this is the function being called then we treat it like a load and
93 // Inalloca arguments are clobbered by the call.
94 unsigned ArgNo = CS.getArgumentNo(&U);
95 if (CS.isInAllocaArgument(ArgNo))
98 // If this is a readonly/readnone call site, then we know it is just a
99 // load (but one that potentially returns the value itself), so we can
100 // ignore it if we know that the value isn't captured.
101 if (CS.onlyReadsMemory() &&
102 (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
105 // If this is being passed as a byval argument, the caller is making a
106 // copy, so it is only a read of the alloca.
107 if (CS.isByValArgument(ArgNo))
111 // Lifetime intrinsics can be handled by the caller.
112 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
113 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
114 II->getIntrinsicID() == Intrinsic::lifetime_end) {
115 assert(II->use_empty() && "Lifetime markers have no result to use!");
116 ToDelete.push_back(II);
121 // If this is isn't our memcpy/memmove, reject it as something we can't
123 MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
127 // If the transfer is using the alloca as a source of the transfer, then
128 // ignore it since it is a load (unless the transfer is volatile).
129 if (U.getOperandNo() == 1) {
130 if (MI->isVolatile()) return false;
134 // If we already have seen a copy, reject the second one.
135 if (TheCopy) return false;
137 // If the pointer has been offset from the start of the alloca, we can't
138 // safely handle this.
139 if (IsOffset) return false;
141 // If the memintrinsic isn't using the alloca as the dest, reject it.
142 if (U.getOperandNo() != 0) return false;
144 // If the source of the memcpy/move is not a constant global, reject it.
145 if (!pointsToConstantGlobal(MI->getSource()))
148 // Otherwise, the transform is safe. Remember the copy instruction.
155 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
156 /// modified by a copy from a constant global. If we can prove this, we can
157 /// replace any uses of the alloca with uses of the global directly.
158 static MemTransferInst *
159 isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
160 SmallVectorImpl<Instruction *> &ToDelete) {
161 MemTransferInst *TheCopy = nullptr;
162 if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
167 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
168 // Check for array size of 1 (scalar allocation).
169 if (!AI.isArrayAllocation()) {
170 // i32 1 is the canonical array size for scalar allocations.
171 if (AI.getArraySize()->getType()->isIntegerTy(32))
175 Value *V = IC.Builder->getInt32(1);
180 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
181 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
182 Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
183 AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName());
184 New->setAlignment(AI.getAlignment());
186 // Scan to the end of the allocation instructions, to skip over a block of
187 // allocas if possible...also skip interleaved debug info
189 BasicBlock::iterator It = New;
190 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
193 // Now that I is pointing to the first non-allocation-inst in the block,
194 // insert our getelementptr instruction...
196 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
197 Value *NullIdx = Constant::getNullValue(IdxTy);
198 Value *Idx[2] = {NullIdx, NullIdx};
200 GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
201 IC.InsertNewInstBefore(GEP, *It);
203 // Now make everything use the getelementptr instead of the original
205 return IC.ReplaceInstUsesWith(AI, GEP);
208 if (isa<UndefValue>(AI.getArraySize()))
209 return IC.ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
211 // Ensure that the alloca array size argument has type intptr_t, so that
212 // any casting is exposed early.
213 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
214 if (AI.getArraySize()->getType() != IntPtrTy) {
215 Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false);
223 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
224 if (auto *I = simplifyAllocaArraySize(*this, AI))
227 if (AI.getAllocatedType()->isSized()) {
228 // If the alignment is 0 (unspecified), assign it the preferred alignment.
229 if (AI.getAlignment() == 0)
230 AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
232 // Move all alloca's of zero byte objects to the entry block and merge them
233 // together. Note that we only do this for alloca's, because malloc should
234 // allocate and return a unique pointer, even for a zero byte allocation.
235 if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
236 // For a zero sized alloca there is no point in doing an array allocation.
237 // This is helpful if the array size is a complicated expression not used
239 if (AI.isArrayAllocation()) {
240 AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
244 // Get the first instruction in the entry block.
245 BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
246 Instruction *FirstInst = EntryBlock.getFirstNonPHIOrDbg();
247 if (FirstInst != &AI) {
248 // If the entry block doesn't start with a zero-size alloca then move
249 // this one to the start of the entry block. There is no problem with
250 // dominance as the array size was forced to a constant earlier already.
251 AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
252 if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
253 DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
254 AI.moveBefore(FirstInst);
258 // If the alignment of the entry block alloca is 0 (unspecified),
259 // assign it the preferred alignment.
260 if (EntryAI->getAlignment() == 0)
261 EntryAI->setAlignment(
262 DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
263 // Replace this zero-sized alloca with the one at the start of the entry
264 // block after ensuring that the address will be aligned enough for both
266 unsigned MaxAlign = std::max(EntryAI->getAlignment(),
268 EntryAI->setAlignment(MaxAlign);
269 if (AI.getType() != EntryAI->getType())
270 return new BitCastInst(EntryAI, AI.getType());
271 return ReplaceInstUsesWith(AI, EntryAI);
276 if (AI.getAlignment()) {
277 // Check to see if this allocation is only modified by a memcpy/memmove from
278 // a constant global whose alignment is equal to or exceeds that of the
279 // allocation. If this is the case, we can change all users to use
280 // the constant global instead. This is commonly produced by the CFE by
281 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
282 // is only subsequently read.
283 SmallVector<Instruction *, 4> ToDelete;
284 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
285 unsigned SourceAlign = getOrEnforceKnownAlignment(
286 Copy->getSource(), AI.getAlignment(), DL, &AI, AC, DT);
287 if (AI.getAlignment() <= SourceAlign) {
288 DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
289 DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
290 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
291 EraseInstFromFunction(*ToDelete[i]);
292 Constant *TheSrc = cast<Constant>(Copy->getSource());
294 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
295 Instruction *NewI = ReplaceInstUsesWith(AI, Cast);
296 EraseInstFromFunction(*Copy);
303 // At last, use the generic allocation site handler to aggressively remove
305 return visitAllocSite(AI);
308 /// \brief Helper to combine a load to a new type.
310 /// This just does the work of combining a load to a new type. It handles
311 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
312 /// loaded *value* type. This will convert it to a pointer, cast the operand to
313 /// that pointer type, load it, etc.
315 /// Note that this will create all of the instructions with whatever insert
316 /// point the \c InstCombiner currently is using.
317 static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
318 const Twine &Suffix = "") {
319 Value *Ptr = LI.getPointerOperand();
320 unsigned AS = LI.getPointerAddressSpace();
321 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
322 LI.getAllMetadata(MD);
324 LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
325 IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
326 LI.getAlignment(), LI.getName() + Suffix);
327 MDBuilder MDB(NewLoad->getContext());
328 for (const auto &MDPair : MD) {
329 unsigned ID = MDPair.first;
330 MDNode *N = MDPair.second;
331 // Note, essentially every kind of metadata should be preserved here! This
332 // routine is supposed to clone a load instruction changing *only its type*.
333 // The only metadata it makes sense to drop is metadata which is invalidated
334 // when the pointer type changes. This should essentially never be the case
335 // in LLVM, but we explicitly switch over only known metadata to be
336 // conservatively correct. If you are adding metadata to LLVM which pertains
337 // to loads, you almost certainly want to add it here.
339 case LLVMContext::MD_dbg:
340 case LLVMContext::MD_tbaa:
341 case LLVMContext::MD_prof:
342 case LLVMContext::MD_fpmath:
343 case LLVMContext::MD_tbaa_struct:
344 case LLVMContext::MD_invariant_load:
345 case LLVMContext::MD_alias_scope:
346 case LLVMContext::MD_noalias:
347 case LLVMContext::MD_nontemporal:
348 case LLVMContext::MD_mem_parallel_loop_access:
349 // All of these directly apply.
350 NewLoad->setMetadata(ID, N);
353 case LLVMContext::MD_nonnull:
354 // This only directly applies if the new type is also a pointer.
355 if (NewTy->isPointerTy()) {
356 NewLoad->setMetadata(ID, N);
359 // If it's integral now, translate it to !range metadata.
360 if (NewTy->isIntegerTy()) {
361 auto *ITy = cast<IntegerType>(NewTy);
362 auto *NullInt = ConstantExpr::getPtrToInt(
363 ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
365 ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
366 NewLoad->setMetadata(LLVMContext::MD_range,
367 MDB.createRange(NonNullInt, NullInt));
371 case LLVMContext::MD_range:
372 // FIXME: It would be nice to propagate this in some way, but the type
373 // conversions make it hard. If the new type is a pointer, we could
374 // translate it to !nonnull metadata.
381 /// \brief Combine a store to a new type.
383 /// Returns the newly created store instruction.
384 static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
385 Value *Ptr = SI.getPointerOperand();
386 unsigned AS = SI.getPointerAddressSpace();
387 SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
388 SI.getAllMetadata(MD);
390 StoreInst *NewStore = IC.Builder->CreateAlignedStore(
391 V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
393 for (const auto &MDPair : MD) {
394 unsigned ID = MDPair.first;
395 MDNode *N = MDPair.second;
396 // Note, essentially every kind of metadata should be preserved here! This
397 // routine is supposed to clone a store instruction changing *only its
398 // type*. The only metadata it makes sense to drop is metadata which is
399 // invalidated when the pointer type changes. This should essentially
400 // never be the case in LLVM, but we explicitly switch over only known
401 // metadata to be conservatively correct. If you are adding metadata to
402 // LLVM which pertains to stores, you almost certainly want to add it
405 case LLVMContext::MD_dbg:
406 case LLVMContext::MD_tbaa:
407 case LLVMContext::MD_prof:
408 case LLVMContext::MD_fpmath:
409 case LLVMContext::MD_tbaa_struct:
410 case LLVMContext::MD_alias_scope:
411 case LLVMContext::MD_noalias:
412 case LLVMContext::MD_nontemporal:
413 case LLVMContext::MD_mem_parallel_loop_access:
414 // All of these directly apply.
415 NewStore->setMetadata(ID, N);
418 case LLVMContext::MD_invariant_load:
419 case LLVMContext::MD_nonnull:
420 case LLVMContext::MD_range:
421 // These don't apply for stores.
429 /// \brief Combine loads to match the type of value their uses after looking
430 /// through intervening bitcasts.
432 /// The core idea here is that if the result of a load is used in an operation,
433 /// we should load the type most conducive to that operation. For example, when
434 /// loading an integer and converting that immediately to a pointer, we should
435 /// instead directly load a pointer.
437 /// However, this routine must never change the width of a load or the number of
438 /// loads as that would introduce a semantic change. This combine is expected to
439 /// be a semantic no-op which just allows loads to more closely model the types
440 /// of their consuming operations.
442 /// Currently, we also refuse to change the precise type used for an atomic load
443 /// or a volatile load. This is debatable, and might be reasonable to change
444 /// later. However, it is risky in case some backend or other part of LLVM is
445 /// relying on the exact type loaded to select appropriate atomic operations.
446 static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
447 // FIXME: We could probably with some care handle both volatile and atomic
448 // loads here but it isn't clear that this is important.
455 Type *Ty = LI.getType();
456 const DataLayout &DL = IC.getDataLayout();
458 // Try to canonicalize loads which are only ever stored to operate over
459 // integers instead of any other type. We only do this when the loaded type
460 // is sized and has a size exactly the same as its store size and the store
461 // size is a legal integer type.
462 if (!Ty->isIntegerTy() && Ty->isSized() &&
463 DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
464 DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty)) {
465 if (std::all_of(LI.user_begin(), LI.user_end(), [&LI](User *U) {
466 auto *SI = dyn_cast<StoreInst>(U);
467 return SI && SI->getPointerOperand() != &LI;
469 LoadInst *NewLoad = combineLoadToNewType(
471 Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
472 // Replace all the stores with stores of the newly loaded value.
473 for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
474 auto *SI = cast<StoreInst>(*UI++);
475 IC.Builder->SetInsertPoint(SI);
476 combineStoreToNewValue(IC, *SI, NewLoad);
477 IC.EraseInstFromFunction(*SI);
479 assert(LI.use_empty() && "Failed to remove all users of the load!");
480 // Return the old load so the combiner can delete it safely.
485 // Fold away bit casts of the loaded value by loading the desired type.
487 if (auto *BC = dyn_cast<BitCastInst>(LI.user_back())) {
488 LoadInst *NewLoad = combineLoadToNewType(IC, LI, BC->getDestTy());
489 BC->replaceAllUsesWith(NewLoad);
490 IC.EraseInstFromFunction(*BC);
494 // FIXME: We should also canonicalize loads of vectors when their elements are
495 // cast to other types.
499 static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
500 // FIXME: We could probably with some care handle both volatile and atomic
501 // stores here but it isn't clear that this is important.
505 Type *T = LI.getType();
506 if (!T->isAggregateType())
509 assert(LI.getAlignment() && "Alignement must be set at this point");
511 if (auto *ST = dyn_cast<StructType>(T)) {
512 // If the struct only have one element, we unpack.
513 if (ST->getNumElements() == 1) {
514 LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
516 return IC.ReplaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
517 UndefValue::get(T), NewLoad, 0, LI.getName()));
521 if (auto *AT = dyn_cast<ArrayType>(T)) {
522 // If the array only have one element, we unpack.
523 if (AT->getNumElements() == 1) {
524 LoadInst *NewLoad = combineLoadToNewType(IC, LI, AT->getElementType(),
526 return IC.ReplaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
527 UndefValue::get(T), NewLoad, 0, LI.getName()));
534 // If we can determine that all possible objects pointed to by the provided
535 // pointer value are, not only dereferenceable, but also definitively less than
536 // or equal to the provided maximum size, then return true. Otherwise, return
537 // false (constant global values and allocas fall into this category).
539 // FIXME: This should probably live in ValueTracking (or similar).
540 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
541 const DataLayout &DL) {
542 SmallPtrSet<Value *, 4> Visited;
543 SmallVector<Value *, 4> Worklist(1, V);
546 Value *P = Worklist.pop_back_val();
547 P = P->stripPointerCasts();
549 if (!Visited.insert(P).second)
552 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
553 Worklist.push_back(SI->getTrueValue());
554 Worklist.push_back(SI->getFalseValue());
558 if (PHINode *PN = dyn_cast<PHINode>(P)) {
559 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
560 Worklist.push_back(PN->getIncomingValue(i));
564 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
565 if (GA->mayBeOverridden())
567 Worklist.push_back(GA->getAliasee());
571 // If we know how big this object is, and it is less than MaxSize, continue
572 // searching. Otherwise, return false.
573 if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
574 if (!AI->getAllocatedType()->isSized())
577 ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
581 uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
582 // Make sure that, even if the multiplication below would wrap as an
583 // uint64_t, we still do the right thing.
584 if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
589 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
590 if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
593 uint64_t InitSize = DL.getTypeAllocSize(GV->getType()->getElementType());
594 if (InitSize > MaxSize)
600 } while (!Worklist.empty());
605 // If we're indexing into an object of a known size, and the outer index is
606 // not a constant, but having any value but zero would lead to undefined
607 // behavior, replace it with zero.
609 // For example, if we have:
610 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
612 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
613 // ... = load i32* %arrayidx, align 4
614 // Then we know that we can replace %x in the GEP with i64 0.
616 // FIXME: We could fold any GEP index to zero that would cause UB if it were
617 // not zero. Currently, we only handle the first such index. Also, we could
618 // also search through non-zero constant indices if we kept track of the
619 // offsets those indices implied.
620 static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
621 Instruction *MemI, unsigned &Idx) {
622 if (GEPI->getNumOperands() < 2)
625 // Find the first non-zero index of a GEP. If all indices are zero, return
626 // one past the last index.
627 auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
629 for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
630 Value *V = GEPI->getOperand(I);
631 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
641 // Skip through initial 'zero' indices, and find the corresponding pointer
642 // type. See if the next index is not a constant.
643 Idx = FirstNZIdx(GEPI);
644 if (Idx == GEPI->getNumOperands())
646 if (isa<Constant>(GEPI->getOperand(Idx)))
649 SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
650 Type *AllocTy = GetElementPtrInst::getIndexedType(
651 cast<PointerType>(GEPI->getOperand(0)->getType()->getScalarType())
654 if (!AllocTy || !AllocTy->isSized())
656 const DataLayout &DL = IC.getDataLayout();
657 uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
659 // If there are more indices after the one we might replace with a zero, make
660 // sure they're all non-negative. If any of them are negative, the overall
661 // address being computed might be before the base address determined by the
662 // first non-zero index.
663 auto IsAllNonNegative = [&]() {
664 for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
665 bool KnownNonNegative, KnownNegative;
666 IC.ComputeSignBit(GEPI->getOperand(i), KnownNonNegative,
667 KnownNegative, 0, MemI);
668 if (KnownNonNegative)
676 // FIXME: If the GEP is not inbounds, and there are extra indices after the
677 // one we'll replace, those could cause the address computation to wrap
678 // (rendering the IsAllNonNegative() check below insufficient). We can do
679 // better, ignoring zero indicies (and other indicies we can prove small
680 // enough not to wrap).
681 if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
684 // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
685 // also known to be dereferenceable.
686 return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
690 // If we're indexing into an object with a variable index for the memory
691 // access, but the object has only one element, we can assume that the index
692 // will always be zero. If we replace the GEP, return it.
693 template <typename T>
694 static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
696 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
698 if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
699 Instruction *NewGEPI = GEPI->clone();
700 NewGEPI->setOperand(Idx,
701 ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
702 NewGEPI->insertBefore(GEPI);
703 MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
711 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
712 Value *Op = LI.getOperand(0);
714 // Try to canonicalize the loaded type.
715 if (Instruction *Res = combineLoadToOperationType(*this, LI))
718 // Attempt to improve the alignment.
719 unsigned KnownAlign = getOrEnforceKnownAlignment(
720 Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, AC, DT);
721 unsigned LoadAlign = LI.getAlignment();
722 unsigned EffectiveLoadAlign =
723 LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
725 if (KnownAlign > EffectiveLoadAlign)
726 LI.setAlignment(KnownAlign);
727 else if (LoadAlign == 0)
728 LI.setAlignment(EffectiveLoadAlign);
730 // Replace GEP indices if possible.
731 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
732 Worklist.Add(NewGEPI);
736 // None of the following transforms are legal for volatile/atomic loads.
737 // FIXME: Some of it is okay for atomic loads; needs refactoring.
738 if (!LI.isSimple()) return nullptr;
740 if (Instruction *Res = unpackLoadToAggregate(*this, LI))
743 // Do really simple store-to-load forwarding and load CSE, to catch cases
744 // where there are several consecutive memory accesses to the same location,
745 // separated by a few arithmetic operations.
746 BasicBlock::iterator BBI = &LI;
747 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
748 return ReplaceInstUsesWith(
749 LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
750 LI.getName() + ".cast"));
752 // load(gep null, ...) -> unreachable
753 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
754 const Value *GEPI0 = GEPI->getOperand(0);
755 // TODO: Consider a target hook for valid address spaces for this xform.
756 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
757 // Insert a new store to null instruction before the load to indicate
758 // that this code is not reachable. We do this instead of inserting
759 // an unreachable instruction directly because we cannot modify the
761 new StoreInst(UndefValue::get(LI.getType()),
762 Constant::getNullValue(Op->getType()), &LI);
763 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
767 // load null/undef -> unreachable
768 // TODO: Consider a target hook for valid address spaces for this xform.
769 if (isa<UndefValue>(Op) ||
770 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
771 // Insert a new store to null instruction before the load to indicate that
772 // this code is not reachable. We do this instead of inserting an
773 // unreachable instruction directly because we cannot modify the CFG.
774 new StoreInst(UndefValue::get(LI.getType()),
775 Constant::getNullValue(Op->getType()), &LI);
776 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
779 if (Op->hasOneUse()) {
780 // Change select and PHI nodes to select values instead of addresses: this
781 // helps alias analysis out a lot, allows many others simplifications, and
782 // exposes redundancy in the code.
784 // Note that we cannot do the transformation unless we know that the
785 // introduced loads cannot trap! Something like this is valid as long as
786 // the condition is always false: load (select bool %C, int* null, int* %G),
787 // but it would not be valid if we transformed it to load from null
790 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
791 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
792 unsigned Align = LI.getAlignment();
793 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align) &&
794 isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align)) {
795 LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
796 SI->getOperand(1)->getName()+".val");
797 LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
798 SI->getOperand(2)->getName()+".val");
799 V1->setAlignment(Align);
800 V2->setAlignment(Align);
801 return SelectInst::Create(SI->getCondition(), V1, V2);
804 // load (select (cond, null, P)) -> load P
805 if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
806 LI.getPointerAddressSpace() == 0) {
807 LI.setOperand(0, SI->getOperand(2));
811 // load (select (cond, P, null)) -> load P
812 if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
813 LI.getPointerAddressSpace() == 0) {
814 LI.setOperand(0, SI->getOperand(1));
822 /// \brief Combine stores to match the type of value being stored.
824 /// The core idea here is that the memory does not have any intrinsic type and
825 /// where we can we should match the type of a store to the type of value being
828 /// However, this routine must never change the width of a store or the number of
829 /// stores as that would introduce a semantic change. This combine is expected to
830 /// be a semantic no-op which just allows stores to more closely model the types
831 /// of their incoming values.
833 /// Currently, we also refuse to change the precise type used for an atomic or
834 /// volatile store. This is debatable, and might be reasonable to change later.
835 /// However, it is risky in case some backend or other part of LLVM is relying
836 /// on the exact type stored to select appropriate atomic operations.
838 /// \returns true if the store was successfully combined away. This indicates
839 /// the caller must erase the store instruction. We have to let the caller erase
840 /// the store instruction sas otherwise there is no way to signal whether it was
841 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
842 static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
843 // FIXME: We could probably with some care handle both volatile and atomic
844 // stores here but it isn't clear that this is important.
848 Value *V = SI.getValueOperand();
850 // Fold away bit casts of the stored value by storing the original type.
851 if (auto *BC = dyn_cast<BitCastInst>(V)) {
852 V = BC->getOperand(0);
853 combineStoreToNewValue(IC, SI, V);
857 // FIXME: We should also canonicalize loads of vectors when their elements are
858 // cast to other types.
862 static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
863 // FIXME: We could probably with some care handle both volatile and atomic
864 // stores here but it isn't clear that this is important.
868 Value *V = SI.getValueOperand();
869 Type *T = V->getType();
871 if (!T->isAggregateType())
874 if (auto *ST = dyn_cast<StructType>(T)) {
875 // If the struct only have one element, we unpack.
876 if (ST->getNumElements() == 1) {
877 V = IC.Builder->CreateExtractValue(V, 0);
878 combineStoreToNewValue(IC, SI, V);
886 /// equivalentAddressValues - Test if A and B will obviously have the same
887 /// value. This includes recognizing that %t0 and %t1 will have the same
888 /// value in code like this:
889 /// %t0 = getelementptr \@a, 0, 3
890 /// store i32 0, i32* %t0
891 /// %t1 = getelementptr \@a, 0, 3
892 /// %t2 = load i32* %t1
894 static bool equivalentAddressValues(Value *A, Value *B) {
895 // Test if the values are trivially equivalent.
896 if (A == B) return true;
898 // Test if the values come form identical arithmetic instructions.
899 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
900 // its only used to compare two uses within the same basic block, which
901 // means that they'll always either have the same value or one of them
902 // will have an undefined value.
903 if (isa<BinaryOperator>(A) ||
906 isa<GetElementPtrInst>(A))
907 if (Instruction *BI = dyn_cast<Instruction>(B))
908 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
911 // Otherwise they may not be equivalent.
915 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
916 Value *Val = SI.getOperand(0);
917 Value *Ptr = SI.getOperand(1);
919 // Try to canonicalize the stored type.
920 if (combineStoreToValueType(*this, SI))
921 return EraseInstFromFunction(SI);
923 // Attempt to improve the alignment.
924 unsigned KnownAlign = getOrEnforceKnownAlignment(
925 Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, AC, DT);
926 unsigned StoreAlign = SI.getAlignment();
927 unsigned EffectiveStoreAlign =
928 StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
930 if (KnownAlign > EffectiveStoreAlign)
931 SI.setAlignment(KnownAlign);
932 else if (StoreAlign == 0)
933 SI.setAlignment(EffectiveStoreAlign);
935 // Try to canonicalize the stored type.
936 if (unpackStoreToAggregate(*this, SI))
937 return EraseInstFromFunction(SI);
939 // Replace GEP indices if possible.
940 if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
941 Worklist.Add(NewGEPI);
945 // Don't hack volatile/atomic stores.
946 // FIXME: Some bits are legal for atomic stores; needs refactoring.
947 if (!SI.isSimple()) return nullptr;
949 // If the RHS is an alloca with a single use, zapify the store, making the
951 if (Ptr->hasOneUse()) {
952 if (isa<AllocaInst>(Ptr))
953 return EraseInstFromFunction(SI);
954 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
955 if (isa<AllocaInst>(GEP->getOperand(0))) {
956 if (GEP->getOperand(0)->hasOneUse())
957 return EraseInstFromFunction(SI);
962 // Do really simple DSE, to catch cases where there are several consecutive
963 // stores to the same location, separated by a few arithmetic operations. This
964 // situation often occurs with bitfield accesses.
965 BasicBlock::iterator BBI = &SI;
966 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
969 // Don't count debug info directives, lest they affect codegen,
970 // and we skip pointer-to-pointer bitcasts, which are NOPs.
971 if (isa<DbgInfoIntrinsic>(BBI) ||
972 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
977 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
978 // Prev store isn't volatile, and stores to the same location?
979 if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
983 EraseInstFromFunction(*PrevSI);
989 // If this is a load, we have to stop. However, if the loaded value is from
990 // the pointer we're loading and is producing the pointer we're storing,
991 // then *this* store is dead (X = load P; store X -> P).
992 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
993 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
995 return EraseInstFromFunction(SI);
997 // Otherwise, this is a load from some other location. Stores before it
1002 // Don't skip over loads or things that can modify memory.
1003 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
1007 // store X, null -> turns into 'unreachable' in SimplifyCFG
1008 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
1009 if (!isa<UndefValue>(Val)) {
1010 SI.setOperand(0, UndefValue::get(Val->getType()));
1011 if (Instruction *U = dyn_cast<Instruction>(Val))
1012 Worklist.Add(U); // Dropped a use.
1014 return nullptr; // Do not modify these!
1017 // store undef, Ptr -> noop
1018 if (isa<UndefValue>(Val))
1019 return EraseInstFromFunction(SI);
1021 // If this store is the last instruction in the basic block (possibly
1022 // excepting debug info instructions), and if the block ends with an
1023 // unconditional branch, try to move it to the successor block.
1027 } while (isa<DbgInfoIntrinsic>(BBI) ||
1028 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy()));
1029 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
1030 if (BI->isUnconditional())
1031 if (SimplifyStoreAtEndOfBlock(SI))
1032 return nullptr; // xform done!
1037 /// SimplifyStoreAtEndOfBlock - Turn things like:
1038 /// if () { *P = v1; } else { *P = v2 }
1039 /// into a phi node with a store in the successor.
1041 /// Simplify things like:
1042 /// *P = v1; if () { *P = v2; }
1043 /// into a phi node with a store in the successor.
1045 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
1046 BasicBlock *StoreBB = SI.getParent();
1048 // Check to see if the successor block has exactly two incoming edges. If
1049 // so, see if the other predecessor contains a store to the same location.
1050 // if so, insert a PHI node (if needed) and move the stores down.
1051 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1053 // Determine whether Dest has exactly two predecessors and, if so, compute
1054 // the other predecessor.
1055 pred_iterator PI = pred_begin(DestBB);
1056 BasicBlock *P = *PI;
1057 BasicBlock *OtherBB = nullptr;
1062 if (++PI == pred_end(DestBB))
1071 if (++PI != pred_end(DestBB))
1074 // Bail out if all the relevant blocks aren't distinct (this can happen,
1075 // for example, if SI is in an infinite loop)
1076 if (StoreBB == DestBB || OtherBB == DestBB)
1079 // Verify that the other block ends in a branch and is not otherwise empty.
1080 BasicBlock::iterator BBI = OtherBB->getTerminator();
1081 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1082 if (!OtherBr || BBI == OtherBB->begin())
1085 // If the other block ends in an unconditional branch, check for the 'if then
1086 // else' case. there is an instruction before the branch.
1087 StoreInst *OtherStore = nullptr;
1088 if (OtherBr->isUnconditional()) {
1090 // Skip over debugging info.
1091 while (isa<DbgInfoIntrinsic>(BBI) ||
1092 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
1093 if (BBI==OtherBB->begin())
1097 // If this isn't a store, isn't a store to the same location, or is not the
1098 // right kind of store, bail out.
1099 OtherStore = dyn_cast<StoreInst>(BBI);
1100 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
1101 !SI.isSameOperationAs(OtherStore))
1104 // Otherwise, the other block ended with a conditional branch. If one of the
1105 // destinations is StoreBB, then we have the if/then case.
1106 if (OtherBr->getSuccessor(0) != StoreBB &&
1107 OtherBr->getSuccessor(1) != StoreBB)
1110 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1111 // if/then triangle. See if there is a store to the same ptr as SI that
1112 // lives in OtherBB.
1114 // Check to see if we find the matching store.
1115 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
1116 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
1117 !SI.isSameOperationAs(OtherStore))
1121 // If we find something that may be using or overwriting the stored
1122 // value, or if we run out of instructions, we can't do the xform.
1123 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
1124 BBI == OtherBB->begin())
1128 // In order to eliminate the store in OtherBr, we have to
1129 // make sure nothing reads or overwrites the stored value in
1131 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1132 // FIXME: This should really be AA driven.
1133 if (I->mayReadFromMemory() || I->mayWriteToMemory())
1138 // Insert a PHI node now if we need it.
1139 Value *MergedVal = OtherStore->getOperand(0);
1140 if (MergedVal != SI.getOperand(0)) {
1141 PHINode *PN = PHINode::Create(MergedVal->getType(), 2, "storemerge");
1142 PN->addIncoming(SI.getOperand(0), SI.getParent());
1143 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
1144 MergedVal = InsertNewInstBefore(PN, DestBB->front());
1147 // Advance to a place where it is safe to insert the new store and
1149 BBI = DestBB->getFirstInsertionPt();
1150 StoreInst *NewSI = new StoreInst(MergedVal, SI.getOperand(1),
1154 SI.getSynchScope());
1155 InsertNewInstBefore(NewSI, *BBI);
1156 NewSI->setDebugLoc(OtherStore->getDebugLoc());
1158 // If the two stores had AA tags, merge them.
1160 SI.getAAMetadata(AATags);
1162 OtherStore->getAAMetadata(AATags, /* Merge = */ true);
1163 NewSI->setAAMetadata(AATags);
1166 // Nuke the old stores.
1167 EraseInstFromFunction(SI);
1168 EraseInstFromFunction(*OtherStore);