// an i64 load+store, here because this improves the odds that the source or
// dest address will be promotable. See if we can find a better type than the
// integer datatype.
- Value *StrippedDest = MI->getOperand(1)->stripPointerCasts();
- if (StrippedDest != MI->getOperand(1)) {
+ Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
+ if (StrippedDest != MI->getArgOperand(0)) {
const Type *SrcETy = cast<PointerType>(StrippedDest->getType())
->getElementType();
if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
if (!II) return visitCallSite(&CI);
-
+
// Intrinsics cannot occur in an invoke, so handle them here instead of in
// visitCallSite.
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
if (!TD) break;
const Type *ReturnTy = CI.getType();
- bool Min = (cast<ConstantInt>(II->getOperand(2))->getZExtValue() == 1);
+ bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
// Get to the real allocated thing and offset as fast as possible.
- Value *Op1 = II->getOperand(1)->stripPointerCasts();
+ Value *Op1 = II->getArgOperand(0)->stripPointerCasts();
// If we've stripped down to a single global variable that we
// can know the size of then just return that.
Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset);
return ReplaceInstUsesWith(CI, RetVal);
-
}
// Do not return "I don't know" here. Later optimization passes could
}
case Intrinsic::bswap:
// bswap(bswap(x)) -> x
- if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1)))
+ if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0)))
if (Operand->getIntrinsicID() == Intrinsic::bswap)
- return ReplaceInstUsesWith(CI, Operand->getOperand(1));
+ return ReplaceInstUsesWith(CI, Operand->getArgOperand(0));
// bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
- if (TruncInst *TI = dyn_cast<TruncInst>(II->getOperand(1))) {
+ if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) {
if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
if (Operand->getIntrinsicID() == Intrinsic::bswap) {
unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
TI->getType()->getPrimitiveSizeInBits();
Value *CV = ConstantInt::get(Operand->getType(), C);
- Value *V = Builder->CreateLShr(Operand->getOperand(1), CV);
+ Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV);
return new TruncInst(V, TI->getType());
}
}
break;
case Intrinsic::powi:
- if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getOperand(2))) {
+ if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
// powi(x, 0) -> 1.0
if (Power->isZero())
return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
// powi(x, 1) -> x
if (Power->isOne())
- return ReplaceInstUsesWith(CI, II->getOperand(1));
+ return ReplaceInstUsesWith(CI, II->getArgOperand(0));
// powi(x, -1) -> 1/x
if (Power->isAllOnesValue())
return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
- II->getOperand(1));
+ II->getArgOperand(0));
}
break;
case Intrinsic::cttz: {
// If all bits below the first known one are known zero,
// this value is constant.
- const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
+ const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
uint32_t BitWidth = IT->getBitWidth();
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
+ ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
KnownZero, KnownOne);
unsigned TrailingZeros = KnownOne.countTrailingZeros();
APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
case Intrinsic::ctlz: {
// If all bits above the first known one are known zero,
// this value is constant.
- const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
+ const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
uint32_t BitWidth = IT->getBitWidth();
APInt KnownZero(BitWidth, 0);
APInt KnownOne(BitWidth, 0);
- ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
+ ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
KnownZero, KnownOne);
unsigned LeadingZeros = KnownOne.countLeadingZeros();
APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
}
break;
case Intrinsic::uadd_with_overflow: {
- Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
- const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
+ Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
+ const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
uint32_t BitWidth = IT->getBitWidth();
APInt Mask = APInt::getSignBit(BitWidth);
APInt LHSKnownZero(BitWidth, 0);
ConstantInt::getFalse(II->getContext())
};
Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
- return InsertValueInst::Create(Struct, II->getOperand(1), 0);
+ return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
}
}
break;
// X * 1 -> {X, false}
if (RHSI->equalsInt(1)) {
Constant *V[] = {
- UndefValue::get(II->getOperand(1)->getType()),
+ UndefValue::get(II->getArgOperand(0)->getType()),
ConstantInt::getFalse(II->getContext())
};
Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
- return InsertValueInst::Create(Struct, II->getOperand(1), 0);
+ return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
}
}
break;
case Intrinsic::x86_sse2_loadu_dq:
// Turn PPC lvx -> load if the pointer is known aligned.
// Turn X86 loadups -> load if the pointer is known aligned.
- if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
- Value *Ptr = Builder->CreateBitCast(II->getOperand(1),
+ if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) {
+ Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr);
}
case Intrinsic::ppc_altivec_vperm:
// Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
- if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) {
+ if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) {
assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
// Check that all of the elements are integer constants or undefs.
if (AllEltsOk) {
// Cast the input vectors to byte vectors.
- Value *Op0 = Builder->CreateBitCast(II->getOperand(1), Mask->getType());
- Value *Op1 = Builder->CreateBitCast(II->getOperand(2), Mask->getType());
+ Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0), Mask->getType());
+ Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1), Mask->getType());
Value *Result = UndefValue::get(Op0->getType());
// Only extract each element once.
case Intrinsic::stackrestore: {
// If the save is right next to the restore, remove the restore. This can
// happen when variable allocas are DCE'd.
- if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) {
+ if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
if (SS->getIntrinsicID() == Intrinsic::stacksave) {
BasicBlock::iterator BI = SS;
if (&*++BI == II)
UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
CS.getInstruction());
- // If CS dues not return void then replaceAllUsesWith undef.
+ // If CS does not return void then replaceAllUsesWith undef.
// This allows ValueHandlers and custom metadata to adjust itself.
if (!CS.getInstruction()->getType()->isVoidTy())
CS.getInstruction()->
IntrinsicInst *Tramp =
cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
- Function *NestF = cast<Function>(Tramp->getOperand(2)->stripPointerCasts());
+ Function *NestF = cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
do {
if (Idx == NestIdx) {
// Add the chain argument and attributes.
- Value *NestVal = Tramp->getOperand(3);
+ Value *NestVal = Tramp->getArgOperand(2);
if (NestVal->getType() != NestTy)
NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
NewArgs.push_back(NestVal);