//
// The LLVM Compiler Infrastructure
//
-// This file was developed by the LLVM research group and is distributed under
-// the University of Illinois Open Source License. See LICENSE.TXT for details.
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
Instruction *visitTrunc(TruncInst &CI);
Instruction *visitZExt(ZExtInst &CI);
Instruction *visitSExt(SExtInst &CI);
- Instruction *visitFPTrunc(CastInst &CI);
+ Instruction *visitFPTrunc(FPTruncInst &CI);
Instruction *visitFPExt(CastInst &CI);
Instruction *visitFPToUI(CastInst &CI);
Instruction *visitFPToSI(CastInst &CI);
Instruction *visitUIToFP(CastInst &CI);
Instruction *visitSIToFP(CastInst &CI);
Instruction *visitPtrToInt(CastInst &CI);
- Instruction *visitIntToPtr(CastInst &CI);
+ Instruction *visitIntToPtr(IntToPtrInst &CI);
Instruction *visitBitCast(BitCastInst &CI);
Instruction *FoldSelectOpOp(SelectInst &SI, Instruction *TI,
Instruction *FI);
AddToWorkList(C);
return C;
}
+
+ Value *InsertBitCastBefore(Value *V, const Type *Ty, Instruction &Pos) {
+ return InsertCastBefore(Instruction::BitCast, V, Ty, Pos);
+ }
+
// ReplaceInstUsesWith - This method is to be used when an instruction is
// found to be dead, replacable with another preexisting expression. Here
Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocationInst &AI);
Instruction *MatchBSwap(BinaryOperator &I);
bool SimplifyStoreAtEndOfBlock(StoreInst &SI);
+ Instruction *SimplifyMemTransfer(MemIntrinsic *MI);
+
Value *EvaluateInDifferentType(Value *V, const Type *Ty, bool isSigned);
};
return ReplaceInstUsesWith(I, NewPN);
}
+
+/// CannotBeNegativeZero - Return true if we can prove that the specified FP
+/// value is never equal to -0.0.
+///
+/// Note that this function will need to be revisited when we support nondefault
+/// rounding modes!
+///
+static bool CannotBeNegativeZero(const Value *V) {
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
+ return !CFP->getValueAPF().isNegZero();
+
+ // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
+ if (const Instruction *I = dyn_cast<Instruction>(V)) {
+ if (I->getOpcode() == Instruction::Add &&
+ isa<ConstantFP>(I->getOperand(1)) &&
+ cast<ConstantFP>(I->getOperand(1))->isNullValue())
+ return true;
+
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
+ if (II->getIntrinsicID() == Intrinsic::sqrt)
+ return CannotBeNegativeZero(II->getOperand(1));
+
+ if (const CallInst *CI = dyn_cast<CallInst>(I))
+ if (const Function *F = CI->getCalledFunction()) {
+ if (F->isDeclaration()) {
+ switch (F->getNameLen()) {
+ case 3: // abs(x) != -0.0
+ if (!strcmp(F->getNameStart(), "abs")) return true;
+ break;
+ case 4: // abs[lf](x) != -0.0
+ if (!strcmp(F->getNameStart(), "absf")) return true;
+ if (!strcmp(F->getNameStart(), "absl")) return true;
+ break;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+
Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
bool Changed = SimplifyCommutative(I);
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
if (Instruction *R = AssociativeOpt(I, AddMaskingAnd(C2)))
return R;
+ // W*X + Y*Z --> W * (X+Z) iff W == Y
+ if (I.getType()->isIntOrIntVector()) {
+ Value *W, *X, *Y, *Z;
+ if (match(LHS, m_Mul(m_Value(W), m_Value(X))) &&
+ match(RHS, m_Mul(m_Value(Y), m_Value(Z)))) {
+ if (W != Y) {
+ if (W == Z) {
+ std::swap(Y, Z);
+ } else if (Y == X) {
+ std::swap(W, X);
+ } else if (X == Z) {
+ std::swap(Y, Z);
+ std::swap(W, X);
+ }
+ }
+
+ if (W == Y) {
+ Value *NewAdd = InsertNewInstBefore(BinaryOperator::createAdd(X, Z,
+ LHS->getName()), I);
+ return BinaryOperator::createMul(W, NewAdd);
+ }
+ }
+ }
+
if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) {
Value *X = 0;
if (match(LHS, m_Not(m_Value(X)))) // ~X + C --> (C-1) - X
}
// add (cast *A to intptrtype) B ->
- // cast (GEP (cast *A to sbyte*) B) ->
- // intptrtype
+ // cast (GEP (cast *A to sbyte*) B) --> intptrtype
{
CastInst *CI = dyn_cast<CastInst>(LHS);
Value *Other = RHS;
(CI->getType()->getPrimitiveSizeInBits() ==
TD->getIntPtrType()->getPrimitiveSizeInBits())
&& isa<PointerType>(CI->getOperand(0)->getType())) {
- Value *I2 = InsertCastBefore(Instruction::BitCast, CI->getOperand(0),
- PointerType::get(Type::Int8Ty), I);
+ unsigned AS =
+ cast<PointerType>(CI->getOperand(0)->getType())->getAddressSpace();
+ Value *I2 = InsertBitCastBefore(CI->getOperand(0),
+ PointerType::get(Type::Int8Ty, AS), I);
I2 = InsertNewInstBefore(new GetElementPtrInst(I2, Other, "ctg2"), I);
return new PtrToIntInst(I2, CI->getType());
}
}
+
+ // add (select X 0 (sub n A)) A --> select X A n
+ {
+ SelectInst *SI = dyn_cast<SelectInst>(LHS);
+ Value *Other = RHS;
+ if (!SI) {
+ SI = dyn_cast<SelectInst>(RHS);
+ Other = LHS;
+ }
+ if (SI && SI->hasOneUse()) {
+ Value *TV = SI->getTrueValue();
+ Value *FV = SI->getFalseValue();
+ Value *A, *N;
+
+ // Can we fold the add into the argument of the select?
+ // We check both true and false select arguments for a matching subtract.
+ if (match(FV, m_Zero()) && match(TV, m_Sub(m_Value(N), m_Value(A))) &&
+ A == Other) // Fold the add into the true select value.
+ return new SelectInst(SI->getCondition(), N, A);
+ if (match(TV, m_Zero()) && match(FV, m_Sub(m_Value(N), m_Value(A))) &&
+ A == Other) // Fold the add into the false select value.
+ return new SelectInst(SI->getCondition(), A, N);
+ }
+ }
+
+ // Check for X+0.0. Simplify it to X if we know X is not -0.0.
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS))
+ if (CFP->getValueAPF().isPosZero() && CannotBeNegativeZero(LHS))
+ return ReplaceInstUsesWith(I, LHS);
return Changed ? &I : 0;
}
if (isa<UndefValue>(Op1))
return ReplaceInstUsesWith(I, Op1);
- // Handle cases involving: div X, (select Cond, Y, Z)
+ // Handle cases involving: [su]div X, (select Cond, Y, Z)
+ // This does not apply for fdiv.
if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) {
- // div X, (Cond ? 0 : Y) -> div X, Y. If the div and the select are in the
- // same basic block, then we replace the select with Y, and the condition
- // of the select with false (if the cond value is in the same BB). If the
- // select has uses other than the div, this allows them to be simplified
- // also. Note that div X, Y is just as good as div X, 0 (undef)
- if (Constant *ST = dyn_cast<Constant>(SI->getOperand(1)))
+ // [su]div X, (Cond ? 0 : Y) -> div X, Y. If the div and the select are in
+ // the same basic block, then we replace the select with Y, and the
+ // condition of the select with false (if the cond value is in the same BB).
+ // If the select has uses other than the div, this allows them to be
+ // simplified also. Note that div X, Y is just as good as div X, 0 (undef)
+ if (ConstantInt *ST = dyn_cast<ConstantInt>(SI->getOperand(1)))
if (ST->isNullValue()) {
Instruction *CondI = dyn_cast<Instruction>(SI->getOperand(0));
if (CondI && CondI->getParent() == I.getParent())
return &I;
}
- // Likewise for: div X, (Cond ? Y : 0) -> div X, Y
- if (Constant *ST = dyn_cast<Constant>(SI->getOperand(2)))
+ // Likewise for: [su]div X, (Cond ? Y : 0) -> div X, Y
+ if (ConstantInt *ST = dyn_cast<ConstantInt>(SI->getOperand(2)))
if (ST->isNullValue()) {
Instruction *CondI = dyn_cast<Instruction>(SI->getOperand(0));
if (CondI && CondI->getParent() == I.getParent())
ICmpInst::isSignedPredicate(LHSCC) ==
ICmpInst::isSignedPredicate(RHSCC))) {
// Ensure that the larger constant is on the RHS.
- ICmpInst::Predicate GT = ICmpInst::isSignedPredicate(LHSCC) ?
- ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
+ ICmpInst::Predicate GT;
+ if (ICmpInst::isSignedPredicate(LHSCC) ||
+ (ICmpInst::isEquality(LHSCC) &&
+ ICmpInst::isSignedPredicate(RHSCC)))
+ GT = ICmpInst::ICMP_SGT;
+ else
+ GT = ICmpInst::ICMP_UGT;
+
Constant *Cmp = ConstantExpr::getICmp(GT, LHSCst, RHSCst);
ICmpInst *LHS = cast<ICmpInst>(Op0);
if (cast<ConstantInt>(Cmp)->getZExtValue()) {
if (isa<UndefValue>(Op1)) // X icmp undef -> undef
return ReplaceInstUsesWith(I, UndefValue::get(Type::Int1Ty));
-
+
// icmp <global/alloca*/null>, <global/alloca*/null> - Global/Stack value
// addresses never equal each other! We already know that Op0 != Op1.
if ((isa<GlobalValue>(Op0) || isa<AllocaInst>(Op0) ||
// See if we are doing a comparison between a constant and an instruction that
// can be folded into the comparison.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
+ Value *A, *B;
+
+ // (icmp ne/eq (sub A B) 0) -> (icmp ne/eq A, B)
+ if (I.isEquality() && CI->isNullValue() &&
+ match(Op0, m_Sub(m_Value(A), m_Value(B)))) {
+ // (icmp cond A B) if cond is equality
+ return new ICmpInst(I.getPredicate(), A, B);
+ }
+
switch (I.getPredicate()) {
default: break;
case ICmpInst::ICMP_ULT: // A <u MIN -> FALSE
Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType());
} else {
// Otherwise, cast the RHS right before the icmp
- Op1 = InsertCastBefore(Instruction::BitCast, Op1, Op0->getType(), I);
+ Op1 = InsertBitCastBefore(Op1, Op0->getType(), I);
}
return new ICmpInst(I.getPredicate(), Op0, Op1);
}
RHSOp = RHSC->getOperand(0);
// If the pointer types don't match, insert a bitcast.
if (LHSCIOp->getType() != RHSOp->getType())
- RHSOp = InsertCastBefore(Instruction::BitCast, RHSOp,
- LHSCIOp->getType(), ICI);
+ RHSOp = InsertBitCastBefore(RHSOp, LHSCIOp->getType(), ICI);
}
if (RHSOp)
if (RHSCIOp->getType() != LHSCIOp->getType())
return 0;
- // If the signedness of the two compares doesn't agree (i.e. one is a sext
+ // If the signedness of the two casts doesn't agree (i.e. one is a sext
// and the other is a zext), then we can't handle this.
if (CI->getOpcode() != LHSCI->getOpcode())
return 0;
- // Likewise, if the signedness of the [sz]exts and the compare don't match,
- // then we can't handle this.
- if (isSignedExt != isSignedCmp && !ICI.isEquality())
- return 0;
-
- // Okay, just insert a compare of the reduced operands now!
- return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
+ // Deal with equality cases early.
+ if (ICI.isEquality())
+ return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
+
+ // A signed comparison of sign extended values simplifies into a
+ // signed comparison.
+ if (isSignedCmp && isSignedExt)
+ return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
+
+ // The other three cases all fold into an unsigned comparison.
+ return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, RHSCIOp);
}
// If we aren't dealing with a constant on the RHS, exit early
if (Instruction *NV = FoldOpIntoPhi(I))
return NV;
+ // Fold shift2(trunc(shift1(x,c1)), c2) -> trunc(shift2(shift1(x,c1),c2))
+ if (TruncInst *TI = dyn_cast<TruncInst>(Op0)) {
+ Instruction *TrOp = dyn_cast<Instruction>(TI->getOperand(0));
+ // If 'shift2' is an ashr, we would have to get the sign bit into a funny
+ // place. Don't try to do this transformation in this case. Also, we
+ // require that the input operand is a shift-by-constant so that we have
+ // confidence that the shifts will get folded together. We could do this
+ // xform in more cases, but it is unlikely to be profitable.
+ if (TrOp && I.isLogicalShift() && TrOp->isShift() &&
+ isa<ConstantInt>(TrOp->getOperand(1))) {
+ // Okay, we'll do this xform. Make the shift of shift.
+ Constant *ShAmt = ConstantExpr::getZExt(Op1, TrOp->getType());
+ Instruction *NSh = BinaryOperator::create(I.getOpcode(), TrOp, ShAmt,
+ I.getName());
+ InsertNewInstBefore(NSh, I); // (shift2 (shift1 & 0x00FF), c2)
+
+ // For logical shifts, the truncation has the effect of making the high
+ // part of the register be zeros. Emulate this by inserting an AND to
+ // clear the top bits as needed. This 'and' will usually be zapped by
+ // other xforms later if dead.
+ unsigned SrcSize = TrOp->getType()->getPrimitiveSizeInBits();
+ unsigned DstSize = TI->getType()->getPrimitiveSizeInBits();
+ APInt MaskV(APInt::getLowBitsSet(SrcSize, DstSize));
+
+ // The mask we constructed says what the trunc would do if occurring
+ // between the shifts. We want to know the effect *after* the second
+ // shift. We know that it is a logical shift by a constant, so adjust the
+ // mask as appropriate.
+ if (I.getOpcode() == Instruction::Shl)
+ MaskV <<= Op1->getZExtValue();
+ else {
+ assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift");
+ MaskV = MaskV.lshr(Op1->getZExtValue());
+ }
+
+ Instruction *And = BinaryOperator::createAnd(NSh, ConstantInt::get(MaskV),
+ TI->getName());
+ InsertNewInstBefore(And, I); // shift1 & 0x00FF
+
+ // Return the value truncated to the interesting size.
+ return new TruncInst(And, I.getType());
+ }
+ }
+
if (Op0->hasOneUse()) {
if (BinaryOperator *Op0BO = dyn_cast<BinaryOperator>(Op0)) {
// Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
CanEvaluateInDifferentType(I->getOperand(1), Ty, CastOpc,
NumCastsRemoved);
+ case Instruction::Mul:
+ // A multiply can be truncated by truncating its operands.
+ return Ty->getBitWidth() < OrigTy->getBitWidth() &&
+ CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
+ NumCastsRemoved) &&
+ CanEvaluateInDifferentType(I->getOperand(1), Ty, CastOpc,
+ NumCastsRemoved);
+
case Instruction::Shl:
// If we are truncating the result of this SHL, and if it's a shift of a
// constant amount, we can always perform a SHL in a smaller type.
switch (I->getOpcode()) {
case Instruction::Add:
case Instruction::Sub:
+ case Instruction::Mul:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor:
return 0;
}
-Instruction *InstCombiner::visitFPTrunc(CastInst &CI) {
- return commonCastTransforms(CI);
+/// FitsInFPType - Return a Constant* for the specified FP constant if it fits
+/// in the specified FP type without changing its value.
+static Constant *FitsInFPType(ConstantFP *CFP, const Type *FPTy,
+ const fltSemantics &Sem) {
+ APFloat F = CFP->getValueAPF();
+ if (F.convert(Sem, APFloat::rmNearestTiesToEven) == APFloat::opOK)
+ return ConstantFP::get(FPTy, F);
+ return 0;
+}
+
+/// LookThroughFPExtensions - If this is an fp extension instruction, look
+/// through it until we get the source value.
+static Value *LookThroughFPExtensions(Value *V) {
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ if (I->getOpcode() == Instruction::FPExt)
+ return LookThroughFPExtensions(I->getOperand(0));
+
+ // If this value is a constant, return the constant in the smallest FP type
+ // that can accurately represent it. This allows us to turn
+ // (float)((double)X+2.0) into x+2.0f.
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
+ if (CFP->getType() == Type::PPC_FP128Ty)
+ return V; // No constant folding of this.
+ // See if the value can be truncated to float and then reextended.
+ if (Value *V = FitsInFPType(CFP, Type::FloatTy, APFloat::IEEEsingle))
+ return V;
+ if (CFP->getType() == Type::DoubleTy)
+ return V; // Won't shrink.
+ if (Value *V = FitsInFPType(CFP, Type::DoubleTy, APFloat::IEEEdouble))
+ return V;
+ // Don't try to shrink to various long double types.
+ }
+
+ return V;
+}
+
+Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
+ if (Instruction *I = commonCastTransforms(CI))
+ return I;
+
+ // If we have fptrunc(add (fpextend x), (fpextend y)), where x and y are
+ // smaller than the destination type, we can eliminate the truncate by doing
+ // the add as the smaller type. This applies to add/sub/mul/div as well as
+ // many builtins (sqrt, etc).
+ BinaryOperator *OpI = dyn_cast<BinaryOperator>(CI.getOperand(0));
+ if (OpI && OpI->hasOneUse()) {
+ switch (OpI->getOpcode()) {
+ default: break;
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::FDiv:
+ case Instruction::FRem:
+ const Type *SrcTy = OpI->getType();
+ Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0));
+ Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1));
+ if (LHSTrunc->getType() != SrcTy &&
+ RHSTrunc->getType() != SrcTy) {
+ unsigned DstSize = CI.getType()->getPrimitiveSizeInBits();
+ // If the source types were both smaller than the destination type of
+ // the cast, do this xform.
+ if (LHSTrunc->getType()->getPrimitiveSizeInBits() <= DstSize &&
+ RHSTrunc->getType()->getPrimitiveSizeInBits() <= DstSize) {
+ LHSTrunc = InsertCastBefore(Instruction::FPExt, LHSTrunc,
+ CI.getType(), CI);
+ RHSTrunc = InsertCastBefore(Instruction::FPExt, RHSTrunc,
+ CI.getType(), CI);
+ return BinaryOperator::create(OpI->getOpcode(), LHSTrunc, RHSTrunc);
+ }
+ }
+ break;
+ }
+ }
+ return 0;
}
Instruction *InstCombiner::visitFPExt(CastInst &CI) {
return commonPointerCastTransforms(CI);
}
-Instruction *InstCombiner::visitIntToPtr(CastInst &CI) {
- return commonCastTransforms(CI);
+Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
+ if (Instruction *I = commonCastTransforms(CI))
+ return I;
+
+ const Type *DestPointee = cast<PointerType>(CI.getType())->getElementType();
+ if (!DestPointee->isSized()) return 0;
+
+ // If this is inttoptr(add (ptrtoint x), cst), try to turn this into a GEP.
+ ConstantInt *Cst;
+ Value *X;
+ if (match(CI.getOperand(0), m_Add(m_Cast<PtrToIntInst>(m_Value(X)),
+ m_ConstantInt(Cst)))) {
+ // If the source and destination operands have the same type, see if this
+ // is a single-index GEP.
+ if (X->getType() == CI.getType()) {
+ // Get the size of the pointee type.
+ uint64_t Size = TD->getABITypeSizeInBits(DestPointee);
+
+ // Convert the constant to intptr type.
+ APInt Offset = Cst->getValue();
+ Offset.sextOrTrunc(TD->getPointerSizeInBits());
+
+ // If Offset is evenly divisible by Size, we can do this xform.
+ if (Size && !APIntOps::srem(Offset, APInt(Offset.getBitWidth(), Size))){
+ Offset = APIntOps::sdiv(Offset, APInt(Offset.getBitWidth(), Size));
+ return new GetElementPtrInst(X, ConstantInt::get(Offset));
+ }
+ }
+ // TODO: Could handle other cases, e.g. where add is indexing into field of
+ // struct etc.
+ } else if (CI.getOperand(0)->hasOneUse() &&
+ match(CI.getOperand(0), m_Add(m_Value(X), m_ConstantInt(Cst)))) {
+ // Otherwise, if this is inttoptr(add x, cst), try to turn this into an
+ // "inttoptr+GEP" instead of "add+intptr".
+
+ // Get the size of the pointee type.
+ uint64_t Size = TD->getABITypeSize(DestPointee);
+
+ // Convert the constant to intptr type.
+ APInt Offset = Cst->getValue();
+ Offset.sextOrTrunc(TD->getPointerSizeInBits());
+
+ // If Offset is evenly divisible by Size, we can do this xform.
+ if (Size && !APIntOps::srem(Offset, APInt(Offset.getBitWidth(), Size))){
+ Offset = APIntOps::sdiv(Offset, APInt(Offset.getBitWidth(), Size));
+
+ Instruction *P = InsertNewInstBefore(new IntToPtrInst(X, CI.getType(),
+ "tmp"), CI);
+ return new GetElementPtrInst(P, ConstantInt::get(Offset), "tmp");
+ }
+ }
+ return 0;
}
Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
return 0;
}
+Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
+ unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1), TD);
+ unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2), TD);
+ unsigned MinAlign = std::min(DstAlign, SrcAlign);
+ unsigned CopyAlign = MI->getAlignment()->getZExtValue();
+
+ if (CopyAlign < MinAlign) {
+ MI->setAlignment(ConstantInt::get(Type::Int32Ty, MinAlign));
+ return MI;
+ }
+
+ // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
+ // load/store.
+ ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3));
+ if (MemOpLength == 0) return 0;
+
+ // Source and destination pointer types are always "i8*" for intrinsic. See
+ // if the size is something we can handle with a single primitive load/store.
+ // A single load+store correctly handles overlapping memory in the memmove
+ // case.
+ unsigned Size = MemOpLength->getZExtValue();
+ if (Size == 0 || Size > 8 || (Size&(Size-1)))
+ return 0; // If not 1/2/4/8 bytes, exit.
+
+ // Use an integer load+store unless we can find something better.
+ Type *NewPtrTy = PointerType::getUnqual(IntegerType::get(Size<<3));
+
+ // Memcpy forces the use of i8* for the source and destination. That means
+ // that if you're using memcpy to move one double around, you'll get a cast
+ // from double* to i8*. We'd much rather use a double load+store rather than
+ // an i64 load+store, here because this improves the odds that the source or
+ // dest address will be promotable. See if we can find a better type than the
+ // integer datatype.
+ if (Value *Op = getBitCastOperand(MI->getOperand(1))) {
+ const Type *SrcETy = cast<PointerType>(Op->getType())->getElementType();
+ if (SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
+ // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
+ // down through these levels if so.
+ while (!SrcETy->isFirstClassType()) {
+ if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
+ if (STy->getNumElements() == 1)
+ SrcETy = STy->getElementType(0);
+ else
+ break;
+ } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
+ if (ATy->getNumElements() == 1)
+ SrcETy = ATy->getElementType();
+ else
+ break;
+ } else
+ break;
+ }
+
+ if (SrcETy->isFirstClassType())
+ NewPtrTy = PointerType::getUnqual(SrcETy);
+ }
+ }
+
+
+ // If the memcpy/memmove provides better alignment info than we can
+ // infer, use it.
+ SrcAlign = std::max(SrcAlign, CopyAlign);
+ DstAlign = std::max(DstAlign, CopyAlign);
+
+ Value *Src = InsertBitCastBefore(MI->getOperand(2), NewPtrTy, *MI);
+ Value *Dest = InsertBitCastBefore(MI->getOperand(1), NewPtrTy, *MI);
+ Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign);
+ InsertNewInstBefore(L, *MI);
+ InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI);
+
+ // Set the size of the copy to 0, it will be deleted on the next iteration.
+ MI->setOperand(3, Constant::getNullValue(MemOpLength->getType()));
+ return MI;
+}
/// visitCallInst - CallInst simplification. This mostly only handles folding
/// of intrinsic instructions. For normal calls, it allows visitCallSite to do
// If we have a memmove and the source operation is a constant global,
// then the source and dest pointers can't alias, so we can change this
// into a call to memcpy.
- if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(II)) {
+ if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
if (GVSrc->isConstant()) {
Module *M = CI.getParent()->getParent()->getParent();
- const char *Name;
- if (CI.getCalledFunction()->getFunctionType()->getParamType(2) ==
- Type::Int32Ty)
- Name = "llvm.memcpy.i32";
+ Intrinsic::ID MemCpyID;
+ if (CI.getOperand(3)->getType() == Type::Int32Ty)
+ MemCpyID = Intrinsic::memcpy_i32;
else
- Name = "llvm.memcpy.i64";
- Constant *MemCpy = M->getOrInsertFunction(Name,
- CI.getCalledFunction()->getFunctionType());
- CI.setOperand(0, MemCpy);
+ MemCpyID = Intrinsic::memcpy_i64;
+ CI.setOperand(0, Intrinsic::getDeclaration(M, MemCpyID));
Changed = true;
}
}
// If we can determine a pointer alignment that is bigger than currently
// set, update the alignment.
if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
- unsigned Alignment1 = GetOrEnforceKnownAlignment(MI->getOperand(1), TD);
- unsigned Alignment2 = GetOrEnforceKnownAlignment(MI->getOperand(2), TD);
- unsigned Align = std::min(Alignment1, Alignment2);
- if (MI->getAlignment()->getZExtValue() < Align) {
- MI->setAlignment(ConstantInt::get(Type::Int32Ty, Align));
- Changed = true;
- }
-
- // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
- // load/store.
- ConstantInt *MemOpLength = dyn_cast<ConstantInt>(CI.getOperand(3));
- if (MemOpLength) {
- unsigned Size = MemOpLength->getZExtValue();
- unsigned Align = cast<ConstantInt>(CI.getOperand(4))->getZExtValue();
- PointerType *NewPtrTy = NULL;
- // Destination pointer type is always i8 *
- // If Size is 8 then use Int64Ty
- // If Size is 4 then use Int32Ty
- // If Size is 2 then use Int16Ty
- // If Size is 1 then use Int8Ty
- if (Size && Size <=8 && !(Size&(Size-1)))
- NewPtrTy = PointerType::get(IntegerType::get(Size<<3));
-
- if (NewPtrTy) {
- Value *Src = InsertCastBefore(Instruction::BitCast, CI.getOperand(2),
- NewPtrTy, CI);
- Value *Dest = InsertCastBefore(Instruction::BitCast, CI.getOperand(1),
- NewPtrTy, CI);
- Value *L = new LoadInst(Src, "tmp", false, Align, &CI);
- Value *NS = new StoreInst(L, Dest, false, Align, &CI);
- CI.replaceAllUsesWith(NS);
- Changed = true;
- return EraseInstFromFunction(CI);
- }
- }
+ if (Instruction *I = SimplifyMemTransfer(MI))
+ return I;
} else if (isa<MemSetInst>(MI)) {
unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest(), TD);
if (MI->getAlignment()->getZExtValue() < Alignment) {
// Turn PPC lvx -> load if the pointer is known aligned.
// Turn X86 loadups -> load if the pointer is known aligned.
if (GetOrEnforceKnownAlignment(II->getOperand(1), TD, 16) >= 16) {
- Value *Ptr = InsertCastBefore(Instruction::BitCast, II->getOperand(1),
- PointerType::get(II->getType()), CI);
+ Value *Ptr = InsertBitCastBefore(II->getOperand(1),
+ PointerType::getUnqual(II->getType()),
+ CI);
return new LoadInst(Ptr);
}
break;
case Intrinsic::ppc_altivec_stvxl:
// Turn stvx -> store if the pointer is known aligned.
if (GetOrEnforceKnownAlignment(II->getOperand(2), TD, 16) >= 16) {
- const Type *OpPtrTy = PointerType::get(II->getOperand(1)->getType());
- Value *Ptr = InsertCastBefore(Instruction::BitCast, II->getOperand(2),
- OpPtrTy, CI);
+ const Type *OpPtrTy =
+ PointerType::getUnqual(II->getOperand(1)->getType());
+ Value *Ptr = InsertBitCastBefore(II->getOperand(2), OpPtrTy, CI);
return new StoreInst(II->getOperand(1), Ptr);
}
break;
case Intrinsic::x86_sse2_storel_dq:
// Turn X86 storeu -> store if the pointer is known aligned.
if (GetOrEnforceKnownAlignment(II->getOperand(1), TD, 16) >= 16) {
- const Type *OpPtrTy = PointerType::get(II->getOperand(2)->getType());
- Value *Ptr = InsertCastBefore(Instruction::BitCast, II->getOperand(1),
- OpPtrTy, CI);
+ const Type *OpPtrTy =
+ PointerType::getUnqual(II->getOperand(2)->getType());
+ Value *Ptr = InsertBitCastBefore(II->getOperand(1), OpPtrTy, CI);
return new StoreInst(II->getOperand(2), Ptr);
}
break;
if (AllEltsOk) {
// Cast the input vectors to byte vectors.
- Value *Op0 = InsertCastBefore(Instruction::BitCast,
- II->getOperand(1), Mask->getType(), CI);
- Value *Op1 = InsertCastBefore(Instruction::BitCast,
- II->getOperand(2), Mask->getType(), CI);
+ Value *Op0 =InsertBitCastBefore(II->getOperand(1),Mask->getType(),CI);
+ Value *Op1 =InsertBitCastBefore(II->getOperand(2),Mask->getType(),CI);
Value *Result = UndefValue::get(Op0->getType());
// Only extract each element once.
// If the call and callee calling conventions don't match, this call must
// be unreachable, as the call is undefined.
new StoreInst(ConstantInt::getTrue(),
- UndefValue::get(PointerType::get(Type::Int1Ty)), OldCall);
+ UndefValue::get(PointerType::getUnqual(Type::Int1Ty)),
+ OldCall);
if (!OldCall->use_empty())
OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
if (isa<CallInst>(OldCall)) // Not worth removing an invoke here.
// undef so that we know that this code is not reachable, despite the fact
// that we can't modify the CFG here.
new StoreInst(ConstantInt::getTrue(),
- UndefValue::get(PointerType::get(Type::Int1Ty)),
+ UndefValue::get(PointerType::getUnqual(Type::Int1Ty)),
CS.getInstruction());
if (!CS.getInstruction()->use_empty())
}
}
- if (isa<InlineAsm>(Callee) && !CS.isNoUnwind()) {
+ if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
// Inline asm calls cannot throw - mark them 'nounwind'.
- const ParamAttrsList *PAL = CS.getParamAttrs();
- uint16_t RAttributes = PAL ? PAL->getParamAttrs(0) : 0;
- RAttributes |= ParamAttr::NoUnwind;
-
- ParamAttrsVector modVec;
- modVec.push_back(ParamAttrsWithIndex::get(0, RAttributes));
- PAL = ParamAttrsList::getModified(PAL, modVec);
- CS.setParamAttrs(PAL);
+ CS.setDoesNotThrow();
Changed = true;
}
return false;
Function *Callee = cast<Function>(CE->getOperand(0));
Instruction *Caller = CS.getInstruction();
+ const ParamAttrsList* CallerPAL = CS.getParamAttrs();
// Okay, this is a cast from a function to a different type. Unless doing so
// would cause a type conversion of one of our arguments, change this call to
const FunctionType *FT = Callee->getFunctionType();
const Type *OldRetTy = Caller->getType();
- const ParamAttrsList* CallerPAL = 0;
- if (CallInst *CallerCI = dyn_cast<CallInst>(Caller))
- CallerPAL = CallerCI->getParamAttrs();
- else if (InvokeInst *CallerII = dyn_cast<InvokeInst>(Caller))
- CallerPAL = CallerII->getParamAttrs();
-
- // If the parameter attributes are not compatible, don't do the xform. We
- // don't want to lose an sret attribute or something.
- if (!ParamAttrsList::areCompatible(CallerPAL, Callee->getParamAttrs()))
- return false;
-
// Check to see if we are changing the return type...
if (OldRetTy != FT->getReturnType()) {
if (Callee->isDeclaration() && !Caller->use_empty() &&
TD->getIntPtrType() == OldRetTy))
return false; // Cannot transform this return value.
+ if (!Caller->use_empty() &&
+ // void -> non-void is handled specially
+ FT->getReturnType() != Type::VoidTy &&
+ !CastInst::isCastable(FT->getReturnType(), OldRetTy))
+ return false; // Cannot transform this return value.
+
+ if (CallerPAL && !Caller->use_empty()) {
+ uint16_t RAttrs = CallerPAL->getParamAttrs(0);
+ if (RAttrs & ParamAttr::typeIncompatible(FT->getReturnType()))
+ return false; // Attribute not compatible with transformed value.
+ }
+
// If the callsite is an invoke instruction, and the return value is used by
// a PHI node in a successor, we cannot change the return type of the call
// because there is no place to put the cast instruction (without breaking
for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
const Type *ParamTy = FT->getParamType(i);
const Type *ActTy = (*AI)->getType();
+
+ if (!CastInst::isCastable(ActTy, ParamTy))
+ return false; // Cannot transform this parameter value.
+
+ if (CallerPAL) {
+ uint16_t PAttrs = CallerPAL->getParamAttrs(i + 1);
+ if (PAttrs & ParamAttr::typeIncompatible(ParamTy))
+ return false; // Attribute not compatible with transformed value.
+ }
+
ConstantInt *c = dyn_cast<ConstantInt>(*AI);
- //Some conversions are safe even if we do not have a body.
- //Either we can cast directly, or we can upconvert the argument
+ // Some conversions are safe even if we do not have a body.
+ // Either we can cast directly, or we can upconvert the argument
bool isConvertible = ActTy == ParamTy ||
(isa<PointerType>(ParamTy) && isa<PointerType>(ActTy)) ||
(ParamTy->isInteger() && ActTy->isInteger() &&
(c && ParamTy->getPrimitiveSizeInBits() >= ActTy->getPrimitiveSizeInBits()
&& c->getValue().isStrictlyPositive());
if (Callee->isDeclaration() && !isConvertible) return false;
-
- // Most other conversions can be done if we have a body, even if these
- // lose information, e.g. int->short.
- // Some conversions cannot be done at all, e.g. float to pointer.
- // Logic here parallels CastInst::getCastOpcode (the design there
- // requires legality checks like this be done before calling it).
- if (ParamTy->isInteger()) {
- if (const VectorType *VActTy = dyn_cast<VectorType>(ActTy)) {
- if (VActTy->getBitWidth() != ParamTy->getPrimitiveSizeInBits())
- return false;
- }
- if (!ActTy->isInteger() && !ActTy->isFloatingPoint() &&
- !isa<PointerType>(ActTy))
- return false;
- } else if (ParamTy->isFloatingPoint()) {
- if (const VectorType *VActTy = dyn_cast<VectorType>(ActTy)) {
- if (VActTy->getBitWidth() != ParamTy->getPrimitiveSizeInBits())
- return false;
- }
- if (!ActTy->isInteger() && !ActTy->isFloatingPoint())
- return false;
- } else if (const VectorType *VParamTy = dyn_cast<VectorType>(ParamTy)) {
- if (const VectorType *VActTy = dyn_cast<VectorType>(ActTy)) {
- if (VActTy->getBitWidth() != VParamTy->getBitWidth())
- return false;
- }
- if (VParamTy->getBitWidth() != ActTy->getPrimitiveSizeInBits())
- return false;
- } else if (isa<PointerType>(ParamTy)) {
- if (!ActTy->isInteger() && !isa<PointerType>(ActTy))
- return false;
- } else {
- return false;
- }
}
if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
Callee->isDeclaration())
return false; // Do not delete arguments unless we have a function body...
+ if (FT->getNumParams() < NumActualArgs && FT->isVarArg() && CallerPAL)
+ // In this case we have more arguments than the new function type, but we
+ // won't be dropping them. Check that these extra arguments have attributes
+ // that are compatible with being a vararg call argument.
+ for (unsigned i = CallerPAL->size(); i; --i) {
+ if (CallerPAL->getParamIndex(i - 1) <= FT->getNumParams())
+ break;
+ uint16_t PAttrs = CallerPAL->getParamAttrsAtIndex(i - 1);
+ if (PAttrs & ParamAttr::VarArgsIncompatible)
+ return false;
+ }
+
// Okay, we decided that this is a safe thing to do: go ahead and start
// inserting cast instructions as necessary...
std::vector<Value*> Args;
Args.reserve(NumActualArgs);
+ ParamAttrsVector attrVec;
+ attrVec.reserve(NumCommonArgs);
+
+ // Get any return attributes.
+ uint16_t RAttrs = CallerPAL ? CallerPAL->getParamAttrs(0) : 0;
+
+ // If the return value is not being used, the type may not be compatible
+ // with the existing attributes. Wipe out any problematic attributes.
+ RAttrs &= ~ParamAttr::typeIncompatible(FT->getReturnType());
+
+ // Add the new return attributes.
+ if (RAttrs)
+ attrVec.push_back(ParamAttrsWithIndex::get(0, RAttrs));
AI = CS.arg_begin();
for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
CastInst *NewCast = CastInst::create(opcode, *AI, ParamTy, "tmp");
Args.push_back(InsertNewInstBefore(NewCast, *Caller));
}
+
+ // Add any parameter attributes.
+ uint16_t PAttrs = CallerPAL ? CallerPAL->getParamAttrs(i + 1) : 0;
+ if (PAttrs)
+ attrVec.push_back(ParamAttrsWithIndex::get(i + 1, PAttrs));
}
// If the function takes more arguments than the call was taking, add them
} else {
Args.push_back(*AI);
}
+
+ // Add any parameter attributes.
+ uint16_t PAttrs = CallerPAL ? CallerPAL->getParamAttrs(i + 1) : 0;
+ if (PAttrs)
+ attrVec.push_back(ParamAttrsWithIndex::get(i + 1, PAttrs));
}
}
if (FT->getReturnType() == Type::VoidTy)
Caller->setName(""); // Void type should not have a name.
+ const ParamAttrsList* NewCallerPAL = ParamAttrsList::get(attrVec);
+
Instruction *NC;
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
NC = new InvokeInst(Callee, II->getNormalDest(), II->getUnwindDest(),
Args.begin(), Args.end(), Caller->getName(), Caller);
cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
- cast<InvokeInst>(NC)->setParamAttrs(CallerPAL);
+ cast<InvokeInst>(NC)->setParamAttrs(NewCallerPAL);
} else {
NC = new CallInst(Callee, Args.begin(), Args.end(),
Caller->getName(), Caller);
if (CI->isTailCall())
cast<CallInst>(NC)->setTailCall();
cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
- cast<CallInst>(NC)->setParamAttrs(CallerPAL);
+ cast<CallInst>(NC)->setParamAttrs(NewCallerPAL);
}
// Insert a cast of the return type as necessary.
Value *NV = NC;
- if (Caller->getType() != NV->getType() && !Caller->use_empty()) {
+ if (OldRetTy != NV->getType() && !Caller->use_empty()) {
if (NV->getType() != Type::VoidTy) {
- const Type *CallerTy = Caller->getType();
Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
- CallerTy, false);
- NV = NC = CastInst::create(opcode, NC, CallerTy, "tmp");
+ OldRetTy, false);
+ NV = NC = CastInst::create(opcode, NC, OldRetTy, "tmp");
// If this is an invoke instruction, we should insert it after the first
// non-phi, instruction in the normal successor block.
Value *Callee = CS.getCalledValue();
const PointerType *PTy = cast<PointerType>(Callee->getType());
const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
+ const ParamAttrsList *Attrs = CS.getParamAttrs();
+
+ // If the call already has the 'nest' attribute somewhere then give up -
+ // otherwise 'nest' would occur twice after splicing in the chain.
+ if (Attrs && Attrs->hasAttrSomewhere(ParamAttr::Nest))
+ return 0;
IntrinsicInst *Tramp =
cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
std::vector<Value*> NewArgs;
NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
+ ParamAttrsVector NewAttrs;
+ NewAttrs.reserve(Attrs ? Attrs->size() + 1 : 1);
+
// Insert the nest argument into the call argument list, which may
- // mean appending it.
+ // mean appending it. Likewise for attributes.
+
+ // Add any function result attributes.
+ uint16_t Attr = Attrs ? Attrs->getParamAttrs(0) : 0;
+ if (Attr)
+ NewAttrs.push_back (ParamAttrsWithIndex::get(0, Attr));
+
{
unsigned Idx = 1;
CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
do {
if (Idx == NestIdx) {
- // Add the chain argument.
+ // Add the chain argument and attributes.
Value *NestVal = Tramp->getOperand(3);
if (NestVal->getType() != NestTy)
NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
NewArgs.push_back(NestVal);
+ NewAttrs.push_back(ParamAttrsWithIndex::get(NestIdx, NestAttr));
}
if (I == E)
break;
- // Add the original argument.
+ // Add the original argument and attributes.
NewArgs.push_back(*I);
+ Attr = Attrs ? Attrs->getParamAttrs(Idx) : 0;
+ if (Attr)
+ NewAttrs.push_back
+ (ParamAttrsWithIndex::get(Idx + (Idx >= NestIdx), Attr));
++Idx, ++I;
} while (1);
// The trampoline may have been bitcast to a bogus type (FTy).
// Handle this by synthesizing a new function type, equal to FTy
- // with the chain parameter inserted. Likewise for attributes.
+ // with the chain parameter inserted.
- const ParamAttrsList *Attrs = CS.getParamAttrs();
std::vector<const Type*> NewTypes;
- ParamAttrsVector NewAttrs;
NewTypes.reserve(FTy->getNumParams()+1);
- // Add any function result attributes.
- uint16_t Attr = Attrs ? Attrs->getParamAttrs(0) : 0;
- if (Attr)
- NewAttrs.push_back (ParamAttrsWithIndex::get(0, Attr));
-
// Insert the chain's type into the list of parameter types, which may
- // mean appending it. Likewise for the chain's attributes.
+ // mean appending it.
{
unsigned Idx = 1;
FunctionType::param_iterator I = FTy->param_begin(),
E = FTy->param_end();
do {
- if (Idx == NestIdx) {
- // Add the chain's type and attributes.
+ if (Idx == NestIdx)
+ // Add the chain's type.
NewTypes.push_back(NestTy);
- NewAttrs.push_back(ParamAttrsWithIndex::get(NestIdx, NestAttr));
- }
if (I == E)
break;
- // Add the original type and attributes.
+ // Add the original type.
NewTypes.push_back(*I);
- Attr = Attrs ? Attrs->getParamAttrs(Idx) : 0;
- if (Attr)
- NewAttrs.push_back
- (ParamAttrsWithIndex::get(Idx + (Idx >= NestIdx), Attr));
++Idx, ++I;
} while (1);
// code sort out any function type mismatches.
FunctionType *NewFTy =
FunctionType::get(FTy->getReturnType(), NewTypes, FTy->isVarArg());
- Constant *NewCallee = NestF->getType() == PointerType::get(NewFTy) ?
- NestF : ConstantExpr::getBitCast(NestF, PointerType::get(NewFTy));
+ Constant *NewCallee = NestF->getType() == PointerType::getUnqual(NewFTy) ?
+ NestF : ConstantExpr::getBitCast(NestF, PointerType::getUnqual(NewFTy));
const ParamAttrsList *NewPAL = ParamAttrsList::get(NewAttrs);
Instruction *NewCaller;
if (isa<UndefValue>(Op)) {
// Insert a new store to null because we cannot modify the CFG here.
new StoreInst(ConstantInt::getTrue(),
- UndefValue::get(PointerType::get(Type::Int1Ty)), &FI);
+ UndefValue::get(PointerType::getUnqual(Type::Int1Ty)), &FI);
return EraseInstFromFunction(FI);
}
return ReplaceInstUsesWith(LI, LIB);
}
- if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op))
- if (isa<ConstantPointerNull>(GEPI->getOperand(0))) {
+ if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
+ const Value *GEPI0 = GEPI->getOperand(0);
+ // TODO: Consider a target hook for valid address spaces for this xform.
+ if (isa<ConstantPointerNull>(GEPI0) &&
+ cast<PointerType>(GEPI0->getType())->getAddressSpace() == 0) {
// Insert a new store to null instruction before the load to indicate
// that this code is not reachable. We do this instead of inserting
// an unreachable instruction directly because we cannot modify the
Constant::getNullValue(Op->getType()), &LI);
return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
}
+ }
if (Constant *C = dyn_cast<Constant>(Op)) {
// load null/undef -> undef
- if ((C->isNullValue() || isa<UndefValue>(C))) {
+ // TODO: Consider a target hook for valid address spaces for this xform.
+ if (isa<UndefValue>(C) || (C->isNullValue() &&
+ cast<PointerType>(Op->getType())->getAddressSpace() == 0)) {
// Insert a new store to null instruction before the load to indicate that
// this code is not reachable. We do this instead of inserting an
// unreachable instruction directly because we cannot modify the CFG.
return BinaryOperator::create(BO->getOpcode(), newEI0, newEI1);
}
} else if (isa<LoadInst>(I)) {
- Value *Ptr = InsertCastBefore(Instruction::BitCast, I->getOperand(0),
- PointerType::get(EI.getType()), EI);
+ unsigned AS =
+ cast<PointerType>(I->getOperand(0)->getType())->getAddressSpace();
+ Value *Ptr = InsertBitCastBefore(I->getOperand(0),
+ PointerType::get(EI.getType(), AS),EI);
GetElementPtrInst *GEP =
new GetElementPtrInst(Ptr, EI.getOperand(1), I->getName() + ".gep");
InsertNewInstBefore(GEP, EI);