//
//===----------------------------------------------------------------------===//
-#include "InstCombine.h"
-#include "llvm/ADT/Optional.h"
+#include "InstCombineInternal.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/IntrinsicInst.h"
-#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/MDBuilder.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
-using namespace llvm::PatternMatch;
#define DEBUG_TYPE "instcombine"
continue;
}
- if (CallSite CS = I) {
+ if (auto CS = CallSite(I)) {
// If this is the function being called then we treat it like a load and
// ignore it.
if (CS.isCallee(&U))
continue;
+ unsigned DataOpNo = CS.getDataOperandNo(&U);
+ bool IsArgOperand = CS.isArgOperand(&U);
+
// Inalloca arguments are clobbered by the call.
- unsigned ArgNo = CS.getArgumentNo(&U);
- if (CS.isInAllocaArgument(ArgNo))
+ if (IsArgOperand && CS.isInAllocaArgument(DataOpNo))
return false;
// If this is a readonly/readnone call site, then we know it is just a
// load (but one that potentially returns the value itself), so we can
// ignore it if we know that the value isn't captured.
if (CS.onlyReadsMemory() &&
- (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo)))
+ (CS.getInstruction()->use_empty() || CS.doesNotCapture(DataOpNo)))
continue;
// If this is being passed as a byval argument, the caller is making a
// copy, so it is only a read of the alloca.
- if (CS.isByValArgument(ArgNo))
+ if (IsArgOperand && CS.isByValArgument(DataOpNo))
continue;
}
return nullptr;
}
-Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
- // Ensure that the alloca array size argument has type intptr_t, so that
- // any casting is exposed early.
- if (DL) {
- Type *IntPtrTy = DL->getIntPtrType(AI.getType());
- if (AI.getArraySize()->getType() != IntPtrTy) {
- Value *V = Builder->CreateIntCast(AI.getArraySize(),
- IntPtrTy, false);
- AI.setOperand(0, V);
- return &AI;
- }
+static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
+ // Check for array size of 1 (scalar allocation).
+ if (!AI.isArrayAllocation()) {
+ // i32 1 is the canonical array size for scalar allocations.
+ if (AI.getArraySize()->getType()->isIntegerTy(32))
+ return nullptr;
+
+ // Canonicalize it.
+ Value *V = IC.Builder->getInt32(1);
+ AI.setOperand(0, V);
+ return &AI;
}
// Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
- if (AI.isArrayAllocation()) { // Check C != 1
- if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
- Type *NewTy =
- ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
- AllocaInst *New = Builder->CreateAlloca(NewTy, nullptr, AI.getName());
- New->setAlignment(AI.getAlignment());
-
- // Scan to the end of the allocation instructions, to skip over a block of
- // allocas if possible...also skip interleaved debug info
- //
- BasicBlock::iterator It = New;
- while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
-
- // Now that I is pointing to the first non-allocation-inst in the block,
- // insert our getelementptr instruction...
- //
- Type *IdxTy = DL
- ? DL->getIntPtrType(AI.getType())
- : Type::getInt64Ty(AI.getContext());
- Value *NullIdx = Constant::getNullValue(IdxTy);
- Value *Idx[2] = { NullIdx, NullIdx };
- Instruction *GEP =
+ if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
+ Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
+ AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName());
+ New->setAlignment(AI.getAlignment());
+
+ // Scan to the end of the allocation instructions, to skip over a block of
+ // allocas if possible...also skip interleaved debug info
+ //
+ BasicBlock::iterator It(New);
+ while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It))
+ ++It;
+
+ // Now that I is pointing to the first non-allocation-inst in the block,
+ // insert our getelementptr instruction...
+ //
+ Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType());
+ Value *NullIdx = Constant::getNullValue(IdxTy);
+ Value *Idx[2] = {NullIdx, NullIdx};
+ Instruction *GEP =
GetElementPtrInst::CreateInBounds(New, Idx, New->getName() + ".sub");
- InsertNewInstBefore(GEP, *It);
+ IC.InsertNewInstBefore(GEP, *It);
- // Now make everything use the getelementptr instead of the original
- // allocation.
- return ReplaceInstUsesWith(AI, GEP);
- } else if (isa<UndefValue>(AI.getArraySize())) {
- return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
- }
+ // Now make everything use the getelementptr instead of the original
+ // allocation.
+ return IC.ReplaceInstUsesWith(AI, GEP);
}
- if (DL && AI.getAllocatedType()->isSized()) {
+ if (isa<UndefValue>(AI.getArraySize()))
+ return IC.ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
+
+ // Ensure that the alloca array size argument has type intptr_t, so that
+ // any casting is exposed early.
+ Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
+ if (AI.getArraySize()->getType() != IntPtrTy) {
+ Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false);
+ AI.setOperand(0, V);
+ return &AI;
+ }
+
+ return nullptr;
+}
+
+Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
+ if (auto *I = simplifyAllocaArraySize(*this, AI))
+ return I;
+
+ if (AI.getAllocatedType()->isSized()) {
// If the alignment is 0 (unspecified), assign it the preferred alignment.
if (AI.getAlignment() == 0)
- AI.setAlignment(DL->getPrefTypeAlignment(AI.getAllocatedType()));
+ AI.setAlignment(DL.getPrefTypeAlignment(AI.getAllocatedType()));
// Move all alloca's of zero byte objects to the entry block and merge them
// together. Note that we only do this for alloca's, because malloc should
// allocate and return a unique pointer, even for a zero byte allocation.
- if (DL->getTypeAllocSize(AI.getAllocatedType()) == 0) {
+ if (DL.getTypeAllocSize(AI.getAllocatedType()) == 0) {
// For a zero sized alloca there is no point in doing an array allocation.
// This is helpful if the array size is a complicated expression not used
// elsewhere.
// dominance as the array size was forced to a constant earlier already.
AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
- DL->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
+ DL.getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
AI.moveBefore(FirstInst);
return &AI;
}
// assign it the preferred alignment.
if (EntryAI->getAlignment() == 0)
EntryAI->setAlignment(
- DL->getPrefTypeAlignment(EntryAI->getAllocatedType()));
+ DL.getPrefTypeAlignment(EntryAI->getAllocatedType()));
// Replace this zero-sized alloca with the one at the start of the entry
// block after ensuring that the address will be aligned enough for both
// types.
// is only subsequently read.
SmallVector<Instruction *, 4> ToDelete;
if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
- unsigned SourceAlign = getOrEnforceKnownAlignment(Copy->getSource(),
- AI.getAlignment(),
- DL, AT, &AI, DT);
+ unsigned SourceAlign = getOrEnforceKnownAlignment(
+ Copy->getSource(), AI.getAlignment(), DL, &AI, AC, DT);
if (AI.getAlignment() <= SourceAlign) {
DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
///
/// Note that this will create all of the instructions with whatever insert
/// point the \c InstCombiner currently is using.
-static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy) {
+static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewTy,
+ const Twine &Suffix = "") {
Value *Ptr = LI.getPointerOperand();
unsigned AS = LI.getPointerAddressSpace();
SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
LoadInst *NewLoad = IC.Builder->CreateAlignedLoad(
IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
- LI.getAlignment(), LI.getName());
+ LI.getAlignment(), LI.getName() + Suffix);
+ MDBuilder MDB(NewLoad->getContext());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
MDNode *N = MDPair.second;
case LLVMContext::MD_noalias:
case LLVMContext::MD_nontemporal:
case LLVMContext::MD_mem_parallel_loop_access:
- case LLVMContext::MD_nonnull:
// All of these directly apply.
NewLoad->setMetadata(ID, N);
break;
+ case LLVMContext::MD_nonnull:
+ // This only directly applies if the new type is also a pointer.
+ if (NewTy->isPointerTy()) {
+ NewLoad->setMetadata(ID, N);
+ break;
+ }
+ // If it's integral now, translate it to !range metadata.
+ if (NewTy->isIntegerTy()) {
+ auto *ITy = cast<IntegerType>(NewTy);
+ auto *NullInt = ConstantExpr::getPtrToInt(
+ ConstantPointerNull::get(cast<PointerType>(Ptr->getType())), ITy);
+ auto *NonNullInt =
+ ConstantExpr::getAdd(NullInt, ConstantInt::get(ITy, 1));
+ NewLoad->setMetadata(LLVMContext::MD_range,
+ MDB.createRange(NonNullInt, NullInt));
+ }
+ break;
+ case LLVMContext::MD_align:
+ case LLVMContext::MD_dereferenceable:
+ case LLVMContext::MD_dereferenceable_or_null:
+ // These only directly apply if the new type is also a pointer.
+ if (NewTy->isPointerTy())
+ NewLoad->setMetadata(ID, N);
+ break;
case LLVMContext::MD_range:
// FIXME: It would be nice to propagate this in some way, but the type
- // conversions make it hard.
+ // conversions make it hard. If the new type is a pointer, we could
+ // translate it to !nonnull metadata.
break;
}
}
return NewLoad;
}
-/// \brief Combine integer loads to vector stores when the integers bits are
-/// just a concatenation of non-integer (and non-vector) types.
+/// \brief Combine a store to a new type.
///
-/// This specifically matches the pattern of loading an integer, right-shifting,
-/// trucating, and casting it to a non-integer type. When the shift is an exact
-/// multiple of the result non-integer type's size, this is more naturally
-/// expressed as a load of a vector and an extractelement. This shows up largely
-/// because large integers are sometimes used to represent a "generic" load or
-/// store, and only later optimization may uncover that there is a more natural
-/// type to represent the load with.
-static Instruction *combineIntegerLoadToVectorLoad(InstCombiner &IC,
- LoadInst &LI) {
- // FIXME: This is probably a reasonable transform to make for atomic stores.
- assert(LI.isSimple() && "Do not call for non-simple stores!");
-
- const DataLayout &DL = *IC.getDataLayout();
- unsigned BaseBits = LI.getType()->getIntegerBitWidth();
- Type *ElementTy = nullptr;
- int ElementSize;
-
- // We match any number of element extractions from the loaded integer. Each of
- // these should be RAUW'ed with an actual extract element instruction at the
- // given index of a loaded vector.
- struct ExtractedElement {
- Instruction *Element;
- int Index;
- };
- SmallVector<ExtractedElement, 2> Elements;
-
- // Lambda to match the bit cast in the extracted element (which is the root
- // pattern matched). Accepts the instruction and shifted bits, returns false
- // if at any point we failed to match a suitable bitcast for element
- // extraction.
- auto MatchCast = [&](Instruction *I, unsigned ShiftBits) {
- // The truncate must be casted to some element type. This cast can only be
- // a bitcast or an inttoptr cast which is the same size.
- if (!isa<BitCastInst>(I)) {
- if (auto *PC = dyn_cast<IntToPtrInst>(I)) {
- // Ensure that the pointer and integer have the exact same size.
- if (PC->getOperand(0)->getType()->getIntegerBitWidth() !=
- DL.getTypeSizeInBits(PC->getType()))
- return false;
- } else {
- // We only support bitcast and inttoptr.
- return false;
- }
- }
-
- // All of the elements inserted need to be the same type. Either capture the
- // first element type or check this element type against the previous
- // element types.
- if (!ElementTy) {
- ElementTy = I->getType();
- // We don't handle integers, sub-vectors, or any aggregate types. We
- // handle pointers and floating ponit types.
- if (!ElementTy->isSingleValueType() || ElementTy->isIntegerTy() ||
- ElementTy->isVectorTy())
- return false;
-
- ElementSize = DL.getTypeSizeInBits(ElementTy);
- // The base integer size and the shift need to be multiples of the element
- // size in bits.
- if (BaseBits % ElementSize || ShiftBits % ElementSize)
- return false;
- } else if (ElementTy != I->getType()) {
- return false;
- }
-
- // Compute the vector index and store the element with it.
- int Index =
- (DL.isLittleEndian() ? ShiftBits : BaseBits - ElementSize - ShiftBits) /
- ElementSize;
- ExtractedElement E = {I, Index};
- Elements.push_back(std::move(E));
- return true;
- };
-
- // Lambda to match the truncate in the extracted element. Accepts the
- // instruction and shifted bits. Returns false if at any point we failed to
- // match a suitable truncate for element extraction.
- auto MatchTruncate = [&](Instruction *I, unsigned ShiftBits) {
- // Handle the truncate to the bit size of the element.
- auto *T = dyn_cast<TruncInst>(I);
- if (!T)
- return false;
-
- // Walk all the users of the truncate, whuch must all be bitcasts.
- for (User *TU : T->users())
- if (!MatchCast(cast<Instruction>(TU), ShiftBits))
- return false;
- return true;
- };
+/// Returns the newly created store instruction.
+static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) {
+ Value *Ptr = SI.getPointerOperand();
+ unsigned AS = SI.getPointerAddressSpace();
+ SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
+ SI.getAllMetadata(MD);
- for (User *U : LI.users()) {
- Instruction *I = cast<Instruction>(U);
+ StoreInst *NewStore = IC.Builder->CreateAlignedStore(
+ V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
+ SI.getAlignment());
+ for (const auto &MDPair : MD) {
+ unsigned ID = MDPair.first;
+ MDNode *N = MDPair.second;
+ // Note, essentially every kind of metadata should be preserved here! This
+ // routine is supposed to clone a store instruction changing *only its
+ // type*. The only metadata it makes sense to drop is metadata which is
+ // invalidated when the pointer type changes. This should essentially
+ // never be the case in LLVM, but we explicitly switch over only known
+ // metadata to be conservatively correct. If you are adding metadata to
+ // LLVM which pertains to stores, you almost certainly want to add it
+ // here.
+ switch (ID) {
+ case LLVMContext::MD_dbg:
+ case LLVMContext::MD_tbaa:
+ case LLVMContext::MD_prof:
+ case LLVMContext::MD_fpmath:
+ case LLVMContext::MD_tbaa_struct:
+ case LLVMContext::MD_alias_scope:
+ case LLVMContext::MD_noalias:
+ case LLVMContext::MD_nontemporal:
+ case LLVMContext::MD_mem_parallel_loop_access:
+ // All of these directly apply.
+ NewStore->setMetadata(ID, N);
+ break;
- // Strip off a logical shift right and retain the shifted amount.
- ConstantInt *ShiftC;
- if (!match(I, m_LShr(m_Value(), m_ConstantInt(ShiftC)))) {
- // This must be a direct truncate.
- if (!MatchTruncate(I, 0))
- return nullptr;
- continue;
+ case LLVMContext::MD_invariant_load:
+ case LLVMContext::MD_nonnull:
+ case LLVMContext::MD_range:
+ case LLVMContext::MD_align:
+ case LLVMContext::MD_dereferenceable:
+ case LLVMContext::MD_dereferenceable_or_null:
+ // These don't apply for stores.
+ break;
}
-
- unsigned ShiftBits = ShiftC->getLimitedValue(BaseBits);
- // We can't handle shifts of more than the number of bits in the integer.
- if (ShiftBits == BaseBits)
- return nullptr;
-
- // Match all the element extraction users of the shift.
- for (User *IU : I->users())
- if (!MatchTruncate(cast<Instruction>(IU), ShiftBits))
- return nullptr;
}
- // If didn't find any extracted elements, there is nothing to do here.
- if (Elements.empty())
- return nullptr;
-
- // Create a vector load and rewrite all of the elements extracted as
- // extractelement instructions.
- VectorType *VTy = VectorType::get(ElementTy, BaseBits / ElementSize);
- LoadInst *NewLI = combineLoadToNewType(IC, LI, VTy);
-
- for (const auto &E : Elements) {
- IC.Builder->SetInsertPoint(E.Element);
- E.Element->replaceAllUsesWith(
- IC.Builder->CreateExtractElement(NewLI, IC.Builder->getInt32(E.Index)));
- IC.EraseInstFromFunction(*E.Element);
- }
-
- // Return the original load to indicate it has been combined away.
- return &LI;
+ return NewStore;
}
/// \brief Combine loads to match the type of value their uses after looking
if (LI.use_empty())
return nullptr;
+ Type *Ty = LI.getType();
+ const DataLayout &DL = IC.getDataLayout();
+
+ // Try to canonicalize loads which are only ever stored to operate over
+ // integers instead of any other type. We only do this when the loaded type
+ // is sized and has a size exactly the same as its store size and the store
+ // size is a legal integer type.
+ if (!Ty->isIntegerTy() && Ty->isSized() &&
+ DL.isLegalInteger(DL.getTypeStoreSizeInBits(Ty)) &&
+ DL.getTypeStoreSizeInBits(Ty) == DL.getTypeSizeInBits(Ty)) {
+ if (std::all_of(LI.user_begin(), LI.user_end(), [&LI](User *U) {
+ auto *SI = dyn_cast<StoreInst>(U);
+ return SI && SI->getPointerOperand() != &LI;
+ })) {
+ LoadInst *NewLoad = combineLoadToNewType(
+ IC, LI,
+ Type::getIntNTy(LI.getContext(), DL.getTypeStoreSizeInBits(Ty)));
+ // Replace all the stores with stores of the newly loaded value.
+ for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
+ auto *SI = cast<StoreInst>(*UI++);
+ IC.Builder->SetInsertPoint(SI);
+ combineStoreToNewValue(IC, *SI, NewLoad);
+ IC.EraseInstFromFunction(*SI);
+ }
+ assert(LI.use_empty() && "Failed to remove all users of the load!");
+ // Return the old load so the combiner can delete it safely.
+ return &LI;
+ }
+ }
// Fold away bit casts of the loaded value by loading the desired type.
+ // We can do this for BitCastInsts as well as casts from and to pointer types,
+ // as long as those are noops (i.e., the source or dest type have the same
+ // bitwidth as the target's pointers).
+ if (LI.hasOneUse())
+ if (auto* CI = dyn_cast<CastInst>(LI.user_back())) {
+ if (CI->isNoopCast(DL)) {
+ LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
+ CI->replaceAllUsesWith(NewLoad);
+ IC.EraseInstFromFunction(*CI);
+ return &LI;
+ }
+ }
+
// FIXME: We should also canonicalize loads of vectors when their elements are
// cast to other types.
- if (LI.hasOneUse())
- if (auto *BC = dyn_cast<BitCastInst>(LI.user_back())) {
- LoadInst *NewLoad = combineLoadToNewType(IC, LI, BC->getDestTy());
- BC->replaceAllUsesWith(NewLoad);
- IC.EraseInstFromFunction(*BC);
- return &LI;
+ return nullptr;
+}
+
+static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
+ // FIXME: We could probably with some care handle both volatile and atomic
+ // stores here but it isn't clear that this is important.
+ if (!LI.isSimple())
+ return nullptr;
+
+ Type *T = LI.getType();
+ if (!T->isAggregateType())
+ return nullptr;
+
+ assert(LI.getAlignment() && "Alignment must be set at this point");
+
+ if (auto *ST = dyn_cast<StructType>(T)) {
+ // If the struct only have one element, we unpack.
+ unsigned Count = ST->getNumElements();
+ if (Count == 1) {
+ LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
+ ".unpack");
+ return IC.ReplaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
+ UndefValue::get(T), NewLoad, 0, LI.getName()));
+ }
+
+ // We don't want to break loads with padding here as we'd loose
+ // the knowledge that padding exists for the rest of the pipeline.
+ const DataLayout &DL = IC.getDataLayout();
+ auto *SL = DL.getStructLayout(ST);
+ if (SL->hasPadding())
+ return nullptr;
+
+ auto Name = LI.getName();
+ SmallString<16> LoadName = Name;
+ LoadName += ".unpack";
+ SmallString<16> EltName = Name;
+ EltName += ".elt";
+ auto *Addr = LI.getPointerOperand();
+ Value *V = UndefValue::get(T);
+ auto *IdxType = Type::getInt32Ty(ST->getContext());
+ auto *Zero = ConstantInt::get(IdxType, 0);
+ for (unsigned i = 0; i < Count; i++) {
+ Value *Indices[2] = {
+ Zero,
+ ConstantInt::get(IdxType, i),
+ };
+ auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), EltName);
+ auto *L = IC.Builder->CreateAlignedLoad(Ptr, LI.getAlignment(),
+ LoadName);
+ V = IC.Builder->CreateInsertValue(V, L, i);
}
- // Try to combine integer loads into vector loads when the integer is just
- // loading a bag of bits that are casted into vector element chunks.
- if (LI.getType()->isIntegerTy())
- if (Instruction *R = combineIntegerLoadToVectorLoad(IC, LI))
- return R;
+ V->setName(Name);
+ return IC.ReplaceInstUsesWith(LI, V);
+ }
+
+ if (auto *AT = dyn_cast<ArrayType>(T)) {
+ // If the array only have one element, we unpack.
+ if (AT->getNumElements() == 1) {
+ LoadInst *NewLoad = combineLoadToNewType(IC, LI, AT->getElementType(),
+ ".unpack");
+ return IC.ReplaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
+ UndefValue::get(T), NewLoad, 0, LI.getName()));
+ }
+ }
+
+ return nullptr;
+}
+
+// If we can determine that all possible objects pointed to by the provided
+// pointer value are, not only dereferenceable, but also definitively less than
+// or equal to the provided maximum size, then return true. Otherwise, return
+// false (constant global values and allocas fall into this category).
+//
+// FIXME: This should probably live in ValueTracking (or similar).
+static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
+ const DataLayout &DL) {
+ SmallPtrSet<Value *, 4> Visited;
+ SmallVector<Value *, 4> Worklist(1, V);
+
+ do {
+ Value *P = Worklist.pop_back_val();
+ P = P->stripPointerCasts();
+
+ if (!Visited.insert(P).second)
+ continue;
+
+ if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
+ Worklist.push_back(SI->getTrueValue());
+ Worklist.push_back(SI->getFalseValue());
+ continue;
+ }
+
+ if (PHINode *PN = dyn_cast<PHINode>(P)) {
+ for (Value *IncValue : PN->incoming_values())
+ Worklist.push_back(IncValue);
+ continue;
+ }
+
+ if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
+ if (GA->mayBeOverridden())
+ return false;
+ Worklist.push_back(GA->getAliasee());
+ continue;
+ }
+
+ // If we know how big this object is, and it is less than MaxSize, continue
+ // searching. Otherwise, return false.
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
+ if (!AI->getAllocatedType()->isSized())
+ return false;
+
+ ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
+ if (!CS)
+ return false;
+
+ uint64_t TypeSize = DL.getTypeAllocSize(AI->getAllocatedType());
+ // Make sure that, even if the multiplication below would wrap as an
+ // uint64_t, we still do the right thing.
+ if ((CS->getValue().zextOrSelf(128)*APInt(128, TypeSize)).ugt(MaxSize))
+ return false;
+ continue;
+ }
+
+ if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
+ if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
+ return false;
+
+ uint64_t InitSize = DL.getTypeAllocSize(GV->getType()->getElementType());
+ if (InitSize > MaxSize)
+ return false;
+ continue;
+ }
+
+ return false;
+ } while (!Worklist.empty());
+
+ return true;
+}
+
+// If we're indexing into an object of a known size, and the outer index is
+// not a constant, but having any value but zero would lead to undefined
+// behavior, replace it with zero.
+//
+// For example, if we have:
+// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
+// ...
+// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
+// ... = load i32* %arrayidx, align 4
+// Then we know that we can replace %x in the GEP with i64 0.
+//
+// FIXME: We could fold any GEP index to zero that would cause UB if it were
+// not zero. Currently, we only handle the first such index. Also, we could
+// also search through non-zero constant indices if we kept track of the
+// offsets those indices implied.
+static bool canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI,
+ Instruction *MemI, unsigned &Idx) {
+ if (GEPI->getNumOperands() < 2)
+ return false;
+
+ // Find the first non-zero index of a GEP. If all indices are zero, return
+ // one past the last index.
+ auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
+ unsigned I = 1;
+ for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
+ Value *V = GEPI->getOperand(I);
+ if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
+ if (CI->isZero())
+ continue;
+
+ break;
+ }
+
+ return I;
+ };
+
+ // Skip through initial 'zero' indices, and find the corresponding pointer
+ // type. See if the next index is not a constant.
+ Idx = FirstNZIdx(GEPI);
+ if (Idx == GEPI->getNumOperands())
+ return false;
+ if (isa<Constant>(GEPI->getOperand(Idx)))
+ return false;
+
+ SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
+ Type *AllocTy = GetElementPtrInst::getIndexedType(
+ cast<PointerType>(GEPI->getOperand(0)->getType()->getScalarType())
+ ->getElementType(),
+ Ops);
+ if (!AllocTy || !AllocTy->isSized())
+ return false;
+ const DataLayout &DL = IC.getDataLayout();
+ uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy);
+
+ // If there are more indices after the one we might replace with a zero, make
+ // sure they're all non-negative. If any of them are negative, the overall
+ // address being computed might be before the base address determined by the
+ // first non-zero index.
+ auto IsAllNonNegative = [&]() {
+ for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
+ bool KnownNonNegative, KnownNegative;
+ IC.ComputeSignBit(GEPI->getOperand(i), KnownNonNegative,
+ KnownNegative, 0, MemI);
+ if (KnownNonNegative)
+ continue;
+ return false;
+ }
+
+ return true;
+ };
+
+ // FIXME: If the GEP is not inbounds, and there are extra indices after the
+ // one we'll replace, those could cause the address computation to wrap
+ // (rendering the IsAllNonNegative() check below insufficient). We can do
+ // better, ignoring zero indices (and other indices we can prove small
+ // enough not to wrap).
+ if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
+ return false;
+
+ // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
+ // also known to be dereferenceable.
+ return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
+ IsAllNonNegative();
+}
+
+// If we're indexing into an object with a variable index for the memory
+// access, but the object has only one element, we can assume that the index
+// will always be zero. If we replace the GEP, return it.
+template <typename T>
+static Instruction *replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr,
+ T &MemI) {
+ if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
+ unsigned Idx;
+ if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
+ Instruction *NewGEPI = GEPI->clone();
+ NewGEPI->setOperand(Idx,
+ ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
+ NewGEPI->insertBefore(GEPI);
+ MemI.setOperand(MemI.getPointerOperandIndex(), NewGEPI);
+ return NewGEPI;
+ }
+ }
return nullptr;
}
return Res;
// Attempt to improve the alignment.
- if (DL) {
- unsigned KnownAlign =
- getOrEnforceKnownAlignment(Op, DL->getPrefTypeAlignment(LI.getType()),
- DL, AT, &LI, DT);
- unsigned LoadAlign = LI.getAlignment();
- unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
- DL->getABITypeAlignment(LI.getType());
-
- if (KnownAlign > EffectiveLoadAlign)
- LI.setAlignment(KnownAlign);
- else if (LoadAlign == 0)
- LI.setAlignment(EffectiveLoadAlign);
+ unsigned KnownAlign = getOrEnforceKnownAlignment(
+ Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, AC, DT);
+ unsigned LoadAlign = LI.getAlignment();
+ unsigned EffectiveLoadAlign =
+ LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
+
+ if (KnownAlign > EffectiveLoadAlign)
+ LI.setAlignment(KnownAlign);
+ else if (LoadAlign == 0)
+ LI.setAlignment(EffectiveLoadAlign);
+
+ // Replace GEP indices if possible.
+ if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI)) {
+ Worklist.Add(NewGEPI);
+ return &LI;
}
// None of the following transforms are legal for volatile/atomic loads.
// FIXME: Some of it is okay for atomic loads; needs refactoring.
if (!LI.isSimple()) return nullptr;
+ if (Instruction *Res = unpackLoadToAggregate(*this, LI))
+ return Res;
+
// Do really simple store-to-load forwarding and load CSE, to catch cases
// where there are several consecutive memory accesses to the same location,
// separated by a few arithmetic operations.
- BasicBlock::iterator BBI = &LI;
- if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
+ BasicBlock::iterator BBI(LI);
+ AAMDNodes AATags;
+ if (Value *AvailableVal =
+ FindAvailableLoadedValue(Op, LI.getParent(), BBI,
+ DefMaxInstsToScan, AA, &AATags)) {
+ if (LoadInst *NLI = dyn_cast<LoadInst>(AvailableVal)) {
+ unsigned KnownIDs[] = {
+ LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
+ LLVMContext::MD_noalias, LLVMContext::MD_range,
+ LLVMContext::MD_invariant_load, LLVMContext::MD_nonnull,
+ LLVMContext::MD_invariant_group, LLVMContext::MD_align,
+ LLVMContext::MD_dereferenceable,
+ LLVMContext::MD_dereferenceable_or_null};
+ combineMetadata(NLI, &LI, KnownIDs);
+ };
+
return ReplaceInstUsesWith(
LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
LI.getName() + ".cast"));
+ }
// load(gep null, ...) -> unreachable
if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
// load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
unsigned Align = LI.getAlignment();
- if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, DL) &&
- isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, DL)) {
+ if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align) &&
+ isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align)) {
LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
SI->getOperand(1)->getName()+".val");
LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
}
// load (select (cond, null, P)) -> load P
- if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
- if (C->isNullValue()) {
- LI.setOperand(0, SI->getOperand(2));
- return &LI;
- }
+ if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
+ LI.getPointerAddressSpace() == 0) {
+ LI.setOperand(0, SI->getOperand(2));
+ return &LI;
+ }
// load (select (cond, P, null)) -> load P
- if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
- if (C->isNullValue()) {
- LI.setOperand(0, SI->getOperand(1));
- return &LI;
- }
+ if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
+ LI.getPointerAddressSpace() == 0) {
+ LI.setOperand(0, SI->getOperand(1));
+ return &LI;
+ }
}
}
return nullptr;
}
-/// \brief Helper to combine a store to use a new value.
-///
-/// This just does the work of combining a store to use a new value, potentially
-/// of a different type. It handles metadata, etc., and returns the new
-/// instruction. The new value is stored to a bitcast of the pointer argument to
-/// the original store.
-///
-/// Note that this will create the instructions with whatever insert point the
-/// \c InstCombiner currently is using.
-static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &OldSI,
- Value *V) {
- Value *Ptr = OldSI.getPointerOperand();
- unsigned AS = OldSI.getPointerAddressSpace();
- SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
- OldSI.getAllMetadata(MD);
-
- StoreInst *NewSI = IC.Builder->CreateAlignedStore(
- V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
- OldSI.getAlignment());
- for (const auto &MDPair : MD) {
- unsigned ID = MDPair.first;
- MDNode *N = MDPair.second;
- // Note, essentially every kind of metadata should be preserved here! This
- // routine is supposed to clone a store instruction changing *only its
- // type*. The only metadata it makes sense to drop is metadata which is
- // invalidated when the pointer type changes. This should essentially
- // never be the case in LLVM, but we explicitly switch over only known
- // metadata to be conservatively correct. If you are adding metadata to
- // LLVM which pertains to stores, you almost certainly want to add it
- // here.
- switch (ID) {
- case LLVMContext::MD_dbg:
- case LLVMContext::MD_tbaa:
- case LLVMContext::MD_prof:
- case LLVMContext::MD_fpmath:
- case LLVMContext::MD_tbaa_struct:
- case LLVMContext::MD_alias_scope:
- case LLVMContext::MD_noalias:
- case LLVMContext::MD_nontemporal:
- case LLVMContext::MD_mem_parallel_loop_access:
- case LLVMContext::MD_nonnull:
- // All of these directly apply.
- NewSI->setMetadata(ID, N);
- break;
-
- case LLVMContext::MD_invariant_load:
- case LLVMContext::MD_range:
- break;
- }
- }
- return NewSI;
-}
-
-/// \brief Combine integer stores to vector stores when the integers bits are
-/// just a concatenation of non-integer (and non-vector) types.
-///
-/// This specifically matches the pattern of taking a sequence of non-integer
-/// types, casting them to integers, extending, shifting, and or-ing them
-/// together to make a concatenation, and then storing the result. This shows up
-/// because large integers are sometimes used to represent a "generic" load or
-/// store, and only later optimization may uncover that there is a more natural
-/// type to represent the store with.
-///
-/// \returns true if the store was successfully combined away. This indicates
-/// the caller must erase the store instruction. We have to let the caller erase
-/// the store instruction sas otherwise there is no way to signal whether it was
-/// combined or not: IC.EraseInstFromFunction returns a null pointer.
-static bool combineIntegerStoreToVectorStore(InstCombiner &IC, StoreInst &SI) {
- // FIXME: This is probably a reasonable transform to make for atomic stores.
- assert(SI.isSimple() && "Do not call for non-simple stores!");
-
- Instruction *OrigV = dyn_cast<Instruction>(SI.getValueOperand());
- if (!OrigV)
- return false;
-
- // We only handle values which are used entirely to store to memory. If the
- // value is used directly as an SSA value, then even if there are matching
- // element insertion and element extraction, we rely on basic integer
- // combining to forward the bits and delete the intermediate math. Here we
- // just need to clean up the places where it actually reaches memory.
- SmallVector<StoreInst *, 2> Stores;
- for (User *U : OrigV->users())
- if (auto *SIU = dyn_cast<StoreInst>(U))
- Stores.push_back(SIU);
- else
- return false;
-
- const DataLayout &DL = *IC.getDataLayout();
- unsigned BaseBits = OrigV->getType()->getIntegerBitWidth();
- Type *ElementTy = nullptr;
- int ElementSize;
-
- // We need to match some number of element insertions into an integer. Each
- // insertion takes the form of an element value (and type), index (multiple of
- // the bitwidth of the type) of insertion, and the base it was inserted into.
- struct InsertedElement {
- Value *Base;
- Value *Element;
- int Index;
- };
- auto MatchInsertedElement = [&](Value *V) -> Optional<InsertedElement> {
- // Handle a null input to make it easy to loop over bases.
- if (!V)
- return Optional<InsertedElement>();
-
- assert(!V->getType()->isVectorTy() && "Must not be a vector.");
- assert(V->getType()->isIntegerTy() && "Must be an integer value.");
-
- Value *Base = nullptr, *Cast;
- ConstantInt *ShiftC = nullptr;
- auto InsertPattern = m_CombineOr(
- m_Shl(m_OneUse(m_ZExt(m_OneUse(m_Value(Cast)))), m_ConstantInt(ShiftC)),
- m_ZExt(m_OneUse(m_Value(Cast))));
- if (!match(V, m_CombineOr(m_CombineOr(m_Or(m_OneUse(m_Value(Base)),
- m_OneUse(InsertPattern)),
- m_Or(m_OneUse(InsertPattern),
- m_OneUse(m_Value(Base)))),
- InsertPattern)))
- return Optional<InsertedElement>();
-
- Value *Element;
- if (auto *BC = dyn_cast<BitCastInst>(Cast)) {
- // Bit casts are trivially correct here.
- Element = BC->getOperand(0);
- } else if (auto *PC = dyn_cast<PtrToIntInst>(Cast)) {
- Element = PC->getOperand(0);
- // If this changes the bit width at all, reject it.
- if (PC->getType()->getIntegerBitWidth() !=
- DL.getTypeSizeInBits(Element->getType()))
- return Optional<InsertedElement>();
- } else {
- // All other casts are rejected.
- return Optional<InsertedElement>();
- }
-
- // We can't handle shifts wider than the number of bits in the integer.
- unsigned ShiftBits = ShiftC ? ShiftC->getLimitedValue(BaseBits) : 0;
- if (ShiftBits == BaseBits)
- return Optional<InsertedElement>();
-
- // All of the elements inserted need to be the same type. Either capture the
- // first element type or check this element type against the previous
- // element types.
- if (!ElementTy) {
- ElementTy = Element->getType();
- // The base integer size and the shift need to be multiples of the element
- // size in bits.
- ElementSize = DL.getTypeSizeInBits(ElementTy);
- if (BaseBits % ElementSize || ShiftBits % ElementSize)
- return Optional<InsertedElement>();
- } else if (ElementTy != Element->getType()) {
- return Optional<InsertedElement>();
- }
-
- // We don't handle integers, sub-vectors, or any aggregate types. We
- // handle pointers and floating ponit types.
- if (!ElementTy->isSingleValueType() || ElementTy->isIntegerTy() ||
- ElementTy->isVectorTy())
- return Optional<InsertedElement>();
-
- int Index =
- (DL.isLittleEndian() ? ShiftBits : BaseBits - ElementSize - ShiftBits) /
- ElementSize;
- InsertedElement Result = {Base, Element, Index};
- return Result;
- };
-
- SmallVector<InsertedElement, 2> Elements;
- Value *V = OrigV;
- while (Optional<InsertedElement> E = MatchInsertedElement(V)) {
- V = E->Base;
- Elements.push_back(std::move(*E));
- }
- // If searching for elements found none, or didn't terminate in either an
- // undef or a direct zext, we can't form a vector.
- if (Elements.empty() || (V && !isa<UndefValue>(V)))
- return false;
-
- // Build a storable vector by looping over the inserted elements.
- VectorType *VTy = VectorType::get(ElementTy, BaseBits / ElementSize);
- V = UndefValue::get(VTy);
- IC.Builder->SetInsertPoint(OrigV);
- for (const auto &E : Elements)
- V = IC.Builder->CreateInsertElement(V, E.Element,
- IC.Builder->getInt32(E.Index));
-
- for (StoreInst *OldSI : Stores) {
- IC.Builder->SetInsertPoint(OldSI);
- combineStoreToNewValue(IC, *OldSI, V);
- if (OldSI != &SI)
- IC.EraseInstFromFunction(*OldSI);
- }
- return true;
-}
-
/// \brief Combine stores to match the type of value being stored.
///
/// The core idea here is that the memory does not have any intrinsic type and
///
/// \returns true if the store was successfully combined away. This indicates
/// the caller must erase the store instruction. We have to let the caller erase
-/// the store instruction sas otherwise there is no way to signal whether it was
+/// the store instruction as otherwise there is no way to signal whether it was
/// combined or not: IC.EraseInstFromFunction returns a null pointer.
static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
// FIXME: We could probably with some care handle both volatile and atomic
// Fold away bit casts of the stored value by storing the original type.
if (auto *BC = dyn_cast<BitCastInst>(V)) {
- combineStoreToNewValue(IC, SI, BC->getOperand(0));
+ V = BC->getOperand(0);
+ combineStoreToNewValue(IC, SI, V);
return true;
}
- // If this is an integer store and we have data layout, look for a pattern of
- // storing a vector as an integer (modeled as a bag of bits).
- if (V->getType()->isIntegerTy() && IC.getDataLayout() &&
- combineIntegerStoreToVectorStore(IC, SI))
- return true;
-
// FIXME: We should also canonicalize loads of vectors when their elements are
// cast to other types.
return false;
}
+static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
+ // FIXME: We could probably with some care handle both volatile and atomic
+ // stores here but it isn't clear that this is important.
+ if (!SI.isSimple())
+ return false;
+
+ Value *V = SI.getValueOperand();
+ Type *T = V->getType();
+
+ if (!T->isAggregateType())
+ return false;
+
+ if (auto *ST = dyn_cast<StructType>(T)) {
+ // If the struct only have one element, we unpack.
+ unsigned Count = ST->getNumElements();
+ if (Count == 1) {
+ V = IC.Builder->CreateExtractValue(V, 0);
+ combineStoreToNewValue(IC, SI, V);
+ return true;
+ }
+
+ // We don't want to break loads with padding here as we'd loose
+ // the knowledge that padding exists for the rest of the pipeline.
+ const DataLayout &DL = IC.getDataLayout();
+ auto *SL = DL.getStructLayout(ST);
+ if (SL->hasPadding())
+ return false;
+
+ SmallString<16> EltName = V->getName();
+ EltName += ".elt";
+ auto *Addr = SI.getPointerOperand();
+ SmallString<16> AddrName = Addr->getName();
+ AddrName += ".repack";
+ auto *IdxType = Type::getInt32Ty(ST->getContext());
+ auto *Zero = ConstantInt::get(IdxType, 0);
+ for (unsigned i = 0; i < Count; i++) {
+ Value *Indices[2] = {
+ Zero,
+ ConstantInt::get(IdxType, i),
+ };
+ auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), AddrName);
+ auto *Val = IC.Builder->CreateExtractValue(V, i, EltName);
+ IC.Builder->CreateStore(Val, Ptr);
+ }
+
+ return true;
+ }
+
+ if (auto *AT = dyn_cast<ArrayType>(T)) {
+ // If the array only have one element, we unpack.
+ if (AT->getNumElements() == 1) {
+ V = IC.Builder->CreateExtractValue(V, 0);
+ combineStoreToNewValue(IC, SI, V);
+ return true;
+ }
+ }
+
+ return false;
+}
+
/// equivalentAddressValues - Test if A and B will obviously have the same
/// value. This includes recognizing that %t0 and %t1 will have the same
/// value in code like this:
return EraseInstFromFunction(SI);
// Attempt to improve the alignment.
- if (DL) {
- unsigned KnownAlign =
- getOrEnforceKnownAlignment(Ptr, DL->getPrefTypeAlignment(Val->getType()),
- DL, AT, &SI, DT);
- unsigned StoreAlign = SI.getAlignment();
- unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
- DL->getABITypeAlignment(Val->getType());
-
- if (KnownAlign > EffectiveStoreAlign)
- SI.setAlignment(KnownAlign);
- else if (StoreAlign == 0)
- SI.setAlignment(EffectiveStoreAlign);
+ unsigned KnownAlign = getOrEnforceKnownAlignment(
+ Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, AC, DT);
+ unsigned StoreAlign = SI.getAlignment();
+ unsigned EffectiveStoreAlign =
+ StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
+
+ if (KnownAlign > EffectiveStoreAlign)
+ SI.setAlignment(KnownAlign);
+ else if (StoreAlign == 0)
+ SI.setAlignment(EffectiveStoreAlign);
+
+ // Try to canonicalize the stored type.
+ if (unpackStoreToAggregate(*this, SI))
+ return EraseInstFromFunction(SI);
+
+ // Replace GEP indices if possible.
+ if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
+ Worklist.Add(NewGEPI);
+ return &SI;
}
- // Don't hack volatile/atomic stores.
- // FIXME: Some bits are legal for atomic stores; needs refactoring.
- if (!SI.isSimple()) return nullptr;
+ // Don't hack volatile/ordered stores.
+ // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
+ if (!SI.isUnordered()) return nullptr;
// If the RHS is an alloca with a single use, zapify the store, making the
// alloca dead.
// Do really simple DSE, to catch cases where there are several consecutive
// stores to the same location, separated by a few arithmetic operations. This
// situation often occurs with bitfield accesses.
- BasicBlock::iterator BBI = &SI;
+ BasicBlock::iterator BBI(SI);
for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
--ScanInsts) {
--BBI;
if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
// Prev store isn't volatile, and stores to the same location?
- if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
+ if (PrevSI->isUnordered() && equivalentAddressValues(PrevSI->getOperand(1),
SI.getOperand(1))) {
++NumDeadStore;
++BBI;
// the pointer we're loading and is producing the pointer we're storing,
// then *this* store is dead (X = load P; store X -> P).
if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
- if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
- LI->isSimple())
+ if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
+ assert(SI.isUnordered() && "can't eliminate ordering operation");
return EraseInstFromFunction(SI);
+ }
// Otherwise, this is a load from some other location. Stores before it
// may not be dead.
if (isa<UndefValue>(Val))
return EraseInstFromFunction(SI);
+ // The code below needs to be audited and adjusted for unordered atomics
+ if (!SI.isSimple())
+ return nullptr;
+
// If this store is the last instruction in the basic block (possibly
// excepting debug info instructions), and if the block ends with an
// unconditional branch, try to move it to the successor block.
- BBI = &SI;
+ BBI = SI.getIterator();
do {
++BBI;
} while (isa<DbgInfoIntrinsic>(BBI) ||
return false;
// Verify that the other block ends in a branch and is not otherwise empty.
- BasicBlock::iterator BBI = OtherBB->getTerminator();
+ BasicBlock::iterator BBI(OtherBB->getTerminator());
BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
if (!OtherBr || BBI == OtherBB->begin())
return false;