#include "llvm/Pass.h"
#include "llvm/DerivedTypes.h"
#include "llvm/GlobalVariable.h"
+#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/PatternMatch.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include <algorithm>
+#include <set>
using namespace llvm;
using namespace llvm::PatternMatch;
: public FunctionPass,
public InstVisitor<InstCombiner, Instruction*> {
// Worklist of all of the instructions that need to be simplified.
- std::vector<Instruction*> WorkList;
+ std::vector<Instruction*> Worklist;
+ DenseMap<Instruction*, unsigned> WorklistMap;
TargetData *TD;
+ bool MustPreserveLCSSA;
+ public:
+ /// AddToWorkList - Add the specified instruction to the worklist if it
+ /// isn't already in it.
+ void AddToWorkList(Instruction *I) {
+ if (WorklistMap.insert(std::make_pair(I, Worklist.size())))
+ Worklist.push_back(I);
+ }
+
+ // RemoveFromWorkList - remove I from the worklist if it exists.
+ void RemoveFromWorkList(Instruction *I) {
+ DenseMap<Instruction*, unsigned>::iterator It = WorklistMap.find(I);
+ if (It == WorklistMap.end()) return; // Not in worklist.
+
+ // Don't bother moving everything down, just null out the slot.
+ Worklist[It->second] = 0;
+
+ WorklistMap.erase(It);
+ }
+
+ Instruction *RemoveOneFromWorkList() {
+ Instruction *I = Worklist.back();
+ Worklist.pop_back();
+ WorklistMap.erase(I);
+ return I;
+ }
+
/// AddUsersToWorkList - When an instruction is simplified, add all users of
/// the instruction to the work lists because they might get more simplified
/// now.
void AddUsersToWorkList(Value &I) {
for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
UI != UE; ++UI)
- WorkList.push_back(cast<Instruction>(*UI));
+ AddToWorkList(cast<Instruction>(*UI));
}
/// AddUsesToWorkList - When an instruction is simplified, add operands to
void AddUsesToWorkList(Instruction &I) {
for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
if (Instruction *Op = dyn_cast<Instruction>(I.getOperand(i)))
- WorkList.push_back(Op);
+ AddToWorkList(Op);
}
/// AddSoonDeadInstToWorklist - The specified instruction is about to become
for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
if (Instruction *Op = dyn_cast<Instruction>(I.getOperand(i))) {
- WorkList.push_back(Op);
+ AddToWorkList(Op);
// Set the operand to undef to drop the use.
I.setOperand(i, UndefValue::get(Op->getType()));
}
return R;
}
- // removeFromWorkList - remove all instances of I from the worklist.
- void removeFromWorkList(Instruction *I);
public:
virtual bool runOnFunction(Function &F);
+
+ bool DoOneIteration(Function &F, unsigned ItNum);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetData>();
Instruction *visitAnd(BinaryOperator &I);
Instruction *visitOr (BinaryOperator &I);
Instruction *visitXor(BinaryOperator &I);
+ Instruction *visitShl(BinaryOperator &I);
+ Instruction *visitAShr(BinaryOperator &I);
+ Instruction *visitLShr(BinaryOperator &I);
+ Instruction *commonShiftTransforms(BinaryOperator &I);
Instruction *visitFCmpInst(FCmpInst &I);
Instruction *visitICmpInst(ICmpInst &I);
Instruction *visitICmpInstWithCastAndCast(ICmpInst &ICI);
Instruction *FoldGEPICmp(User *GEPLHS, Value *RHS,
ICmpInst::Predicate Cond, Instruction &I);
- Instruction *visitShiftInst(ShiftInst &I);
Instruction *FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
- ShiftInst &I);
+ BinaryOperator &I);
Instruction *commonCastTransforms(CastInst &CI);
Instruction *commonIntCastTransforms(CastInst &CI);
Instruction *visitTrunc(CastInst &CI);
"New instruction already inserted into a basic block!");
BasicBlock *BB = Old.getParent();
BB->getInstList().insert(&Old, New); // Insert inst
- WorkList.push_back(New); // Add to worklist
+ AddToWorkList(New);
return New;
}
return ConstantExpr::getCast(opc, CV, Ty);
Instruction *C = CastInst::create(opc, V, Ty, V->getName(), &Pos);
- WorkList.push_back(C);
+ AddToWorkList(C);
return C;
}
if (Old != New)
Old->replaceAllUsesWith(New);
if (Instruction *I = dyn_cast<Instruction>(Old))
- WorkList.push_back(I);
+ AddToWorkList(I);
if (Instruction *I = dyn_cast<Instruction>(New))
- WorkList.push_back(I);
+ AddToWorkList(I);
return true;
}
Instruction *EraseInstFromFunction(Instruction &I) {
assert(I.use_empty() && "Cannot erase instruction that is used!");
AddUsesToWorkList(I);
- removeFromWorkList(&I);
+ RemoveFromWorkList(&I);
I.eraseFromParent();
return 0; // Don't do anything with FI
}
/// This function is a wrapper around CastInst::isEliminableCastPair. It
/// simply extracts arguments and returns what that function returns.
-/// @Determine if it is valid to eliminate a Convert pair
static Instruction::CastOps
isEliminableCastPair(
const CastInst *CI, ///< The first cast instruction
Instruction *New = BinaryOperator::create(Opcode, Op->getOperand(0),
Op1->getOperand(0),
Op1->getName(), &I);
- WorkList.push_back(New);
+ AddToWorkList(New);
I.setOperand(0, New);
I.setOperand(1, Folded);
return true;
bool InstCombiner::SimplifyDemandedBits(Value *V, uint64_t DemandedMask,
uint64_t &KnownZero, uint64_t &KnownOne,
unsigned Depth) {
+ const IntegerType *VTy = cast<IntegerType>(V->getType());
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
// We know all of the bits for a constant!
KnownOne = CI->getZExtValue() & DemandedMask;
}
// If this is the root being simplified, allow it to have multiple uses,
// just set the DemandedMask to all bits.
- DemandedMask = cast<IntegerType>(V->getType())->getBitMask();
+ DemandedMask = VTy->getBitMask();
} else if (DemandedMask == 0) { // Not demanding any bits from V.
- if (V != UndefValue::get(V->getType()))
- return UpdateValueUsesWith(V, UndefValue::get(V->getType()));
+ if (V != UndefValue::get(VTy))
+ return UpdateValueUsesWith(V, UndefValue::get(VTy));
return false;
} else if (Depth == 6) { // Limit search depth.
return false;
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return false; // Only analyze instructions.
- DemandedMask &= cast<IntegerType>(V->getType())->getBitMask();
+ DemandedMask &= VTy->getBitMask();
uint64_t KnownZero2 = 0, KnownOne2 = 0;
switch (I->getOpcode()) {
// If all of the demanded bits in the inputs are known zeros, return zero.
if ((DemandedMask & (KnownZero|KnownZero2)) == DemandedMask)
- return UpdateValueUsesWith(I, Constant::getNullValue(I->getType()));
+ return UpdateValueUsesWith(I, Constant::getNullValue(VTy));
// If the RHS is a constant, see if we can simplify it.
if (ShrinkDemandedConstant(I, 1, DemandedMask & ~KnownZero2))
// e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask) { // all known
if ((KnownOne & KnownOne2) == KnownOne) {
- Constant *AndC = ConstantInt::get(I->getType(),
- ~KnownOne & DemandedMask);
+ Constant *AndC = ConstantInt::get(VTy, ~KnownOne & DemandedMask);
Instruction *And =
BinaryOperator::createAnd(I->getOperand(0), AndC, "tmp");
InsertNewInstBefore(And, *I);
// Compute the bits in the result that are not present in the input.
const IntegerType *SrcTy = cast<IntegerType>(I->getOperand(0)->getType());
uint64_t NotIn = ~SrcTy->getBitMask();
- uint64_t NewBits = cast<IntegerType>(I->getType())->getBitMask() & NotIn;
+ uint64_t NewBits = VTy->getBitMask() & NotIn;
DemandedMask &= SrcTy->getBitMask();
if (SimplifyDemandedBits(I->getOperand(0), DemandedMask,
// Compute the bits in the result that are not present in the input.
const IntegerType *SrcTy = cast<IntegerType>(I->getOperand(0)->getType());
uint64_t NotIn = ~SrcTy->getBitMask();
- uint64_t NewBits = cast<IntegerType>(I->getType())->getBitMask() & NotIn;
+ uint64_t NewBits = VTy->getBitMask() & NotIn;
// Get the sign bit for the source type
uint64_t InSignBit = 1ULL << (SrcTy->getPrimitiveSizeInBits()-1);
// convert this into a zero extension.
if ((KnownZero & InSignBit) || (NewBits & ~DemandedMask) == NewBits) {
// Convert to ZExt cast
- CastInst *NewCast = CastInst::create(
- Instruction::ZExt, I->getOperand(0), I->getType(), I->getName(), I);
+ CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName(), I);
return UpdateValueUsesWith(I, NewCast);
} else if (KnownOne & InSignBit) { // Input sign bit known set
KnownOne |= NewBits;
// either.
// Shift the demanded mask up so that it's at the top of the uint64_t.
- unsigned BitWidth = I->getType()->getPrimitiveSizeInBits();
+ unsigned BitWidth = VTy->getPrimitiveSizeInBits();
unsigned NLZ = CountLeadingZeros_64(DemandedMask << (64-BitWidth));
// If the top bit of the output is demanded, demand everything from the
// Compute the new bits that are at the top now.
uint64_t HighBits = (1ULL << ShiftAmt)-1;
- HighBits <<= I->getType()->getPrimitiveSizeInBits() - ShiftAmt;
- uint64_t TypeMask = cast<IntegerType>(I->getType())->getBitMask();
+ HighBits <<= VTy->getBitWidth() - ShiftAmt;
+ uint64_t TypeMask = VTy->getBitMask();
// Unsigned shift right.
if (SimplifyDemandedBits(I->getOperand(0),
(DemandedMask << ShiftAmt) & TypeMask,
// the shift amount is >= the size of the datatype, which is undefined.
if (DemandedMask == 1) {
// Perform the logical shift right.
- Value *NewVal = new ShiftInst(Instruction::LShr, I->getOperand(0),
- I->getOperand(1), I->getName());
+ Value *NewVal = BinaryOperator::createLShr(
+ I->getOperand(0), I->getOperand(1), I->getName());
InsertNewInstBefore(cast<Instruction>(NewVal), *I);
return UpdateValueUsesWith(I, NewVal);
}
// Compute the new bits that are at the top now.
uint64_t HighBits = (1ULL << ShiftAmt)-1;
- HighBits <<= I->getType()->getPrimitiveSizeInBits() - ShiftAmt;
- uint64_t TypeMask = cast<IntegerType>(I->getType())->getBitMask();
+ HighBits <<= VTy->getBitWidth() - ShiftAmt;
+ uint64_t TypeMask = VTy->getBitMask();
// Signed shift right.
if (SimplifyDemandedBits(I->getOperand(0),
(DemandedMask << ShiftAmt) & TypeMask,
KnownOne >>= ShiftAmt;
// Handle the sign bits.
- uint64_t SignBit = 1ULL << (I->getType()->getPrimitiveSizeInBits()-1);
+ uint64_t SignBit = 1ULL << (VTy->getBitWidth()-1);
SignBit >>= ShiftAmt; // Adjust to where it is now in the mask.
// If the input sign bit is known to be zero, or if none of the top bits
// are demanded, turn this into an unsigned shift right.
if ((KnownZero & SignBit) || (HighBits & ~DemandedMask) == HighBits) {
// Perform the logical shift right.
- Value *NewVal = new ShiftInst(Instruction::LShr, I->getOperand(0),
- SA, I->getName());
+ Value *NewVal = BinaryOperator::createLShr(
+ I->getOperand(0), SA, I->getName());
InsertNewInstBefore(cast<Instruction>(NewVal), *I);
return UpdateValueUsesWith(I, NewVal);
} else if (KnownOne & SignBit) { // New bits are known one.
// If the client is only demanding bits that we know, return the known
// constant.
if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask)
- return UpdateValueUsesWith(I, ConstantInt::get(I->getType(), KnownOne));
+ return UpdateValueUsesWith(I, ConstantInt::get(VTy, KnownOne));
return false;
}
Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, uint64_t DemandedElts,
uint64_t &UndefElts,
unsigned Depth) {
- unsigned VWidth = cast<PackedType>(V->getType())->getNumElements();
+ unsigned VWidth = cast<VectorType>(V->getType())->getNumElements();
assert(VWidth <= 64 && "Vector too wide to analyze!");
uint64_t EltMask = ~0ULL >> (64-VWidth);
assert(DemandedElts != EltMask && (DemandedElts & ~EltMask) == 0 &&
}
UndefElts = 0;
- if (ConstantPacked *CP = dyn_cast<ConstantPacked>(V)) {
- const Type *EltTy = cast<PackedType>(V->getType())->getElementType();
+ if (ConstantVector *CP = dyn_cast<ConstantVector>(V)) {
+ const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
Constant *Undef = UndefValue::get(EltTy);
std::vector<Constant*> Elts;
}
// If we changed the constant, return it.
- Constant *NewCP = ConstantPacked::get(Elts);
+ Constant *NewCP = ConstantVector::get(Elts);
return NewCP != CP ? NewCP : 0;
} else if (isa<ConstantAggregateZero>(V)) {
- // Simplify the CAZ to a ConstantPacked where the non-demanded elements are
+ // Simplify the CAZ to a ConstantVector where the non-demanded elements are
// set to undef.
- const Type *EltTy = cast<PackedType>(V->getType())->getElementType();
+ const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
Constant *Zero = Constant::getNullValue(EltTy);
Constant *Undef = UndefValue::get(EltTy);
std::vector<Constant*> Elts;
for (unsigned i = 0; i != VWidth; ++i)
Elts.push_back((DemandedElts & (1ULL << i)) ? Zero : Undef);
UndefElts = DemandedElts ^ EltMask;
- return ConstantPacked::get(Elts);
+ return ConstantVector::get(Elts);
}
if (!V->hasOneUse()) { // Other users may use these bits.
AddRHS(Value *rhs) : RHS(rhs) {}
bool shouldApply(Value *LHS) const { return LHS == RHS; }
Instruction *apply(BinaryOperator &Add) const {
- return new ShiftInst(Instruction::Shl, Add.getOperand(0),
- ConstantInt::get(Type::Int8Ty, 1));
+ return BinaryOperator::createShl(Add.getOperand(0),
+ ConstantInt::get(Add.getType(), 1));
}
};
else if (CmpInst *CI = dyn_cast<CmpInst>(&I))
New = CmpInst::create(CI->getOpcode(), CI->getPredicate(), Op0, Op1,
SO->getName()+".cmp");
- else if (ShiftInst *SI = dyn_cast<ShiftInst>(&I))
- New = new ShiftInst(SI->getOpcode(), Op0, Op1, SO->getName()+".sh");
else {
assert(0 && "Unknown binary instruction type!");
abort();
// Check to see if all of the operands of the PHI are constants. If there is
// one non-constant value, remember the BB it is. If there is more than one
- // bail out.
+ // or if *it* is a PHI, bail out.
BasicBlock *NonConstBB = 0;
for (unsigned i = 0; i != NumPHIValues; ++i)
if (!isa<Constant>(PN->getIncomingValue(i))) {
if (NonConstBB) return 0; // More than one non-const value.
+ if (isa<PHINode>(PN->getIncomingValue(i))) return 0; // Itself a phi.
NonConstBB = PN->getIncomingBlock(i);
// If the incoming non-constant value is in I's block, we have an infinite
}
// Okay, we can do the transformation: create the new PHI node.
- PHINode *NewPN = new PHINode(I.getType(), I.getName());
- I.setName("");
+ PHINode *NewPN = new PHINode(I.getType(), "");
NewPN->reserveOperandSpace(PN->getNumOperands()/2);
InsertNewInstBefore(NewPN, *PN);
+ NewPN->takeName(PN);
// Next, add all of the operands to the PHI.
if (I.getNumOperands() == 2) {
CI->getPredicate(),
PN->getIncomingValue(i), C, "phitmp",
NonConstBB->getTerminator());
- else if (ShiftInst *SI = dyn_cast<ShiftInst>(&I))
- InV = new ShiftInst(SI->getOpcode(),
- PN->getIncomingValue(i), C, "phitmp",
- NonConstBB->getTerminator());
else
assert(0 && "Unknown binop!");
- WorkList.push_back(cast<Instruction>(InV));
+ AddToWorkList(cast<Instruction>(InV));
}
NewPN->addIncoming(InV, PN->getIncomingBlock(i));
}
InV = CastInst::create(CI->getOpcode(), PN->getIncomingValue(i),
I.getType(), "phitmp",
NonConstBB->getTerminator());
- WorkList.push_back(cast<Instruction>(InV));
+ AddToWorkList(cast<Instruction>(InV));
}
NewPN->addIncoming(InV, PN->getIncomingBlock(i));
}
// See if SimplifyDemandedBits can simplify this. This handles stuff like
// (X & 254)+1 -> (X&254)|1
uint64_t KnownZero, KnownOne;
- if (!isa<PackedType>(I.getType()) &&
+ if (!isa<VectorType>(I.getType()) &&
SimplifyDemandedBits(&I, cast<IntegerType>(I.getType())->getBitMask(),
KnownZero, KnownOne))
return &I;
ConstantInt *XorRHS = 0;
Value *XorLHS = 0;
- if (match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
+ if (isa<ConstantInt>(RHSC) &&
+ match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
unsigned TySizeBits = I.getType()->getPrimitiveSizeInBits();
int64_t RHSSExt = cast<ConstantInt>(RHSC)->getSExtValue();
uint64_t RHSZExt = cast<ConstantInt>(RHSC)->getZExtValue();
// -(X >>u 31) -> (X >>s 31)
// -(X >>s 31) -> (X >>u 31)
if (C->isNullValue()) {
- if (ShiftInst *SI = dyn_cast<ShiftInst>(Op1))
+ if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op1))
if (SI->getOpcode() == Instruction::LShr) {
if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
// Check to see if we are shifting out everything but the sign bit.
if (CU->getZExtValue() ==
SI->getType()->getPrimitiveSizeInBits()-1) {
// Ok, the transformation is safe. Insert AShr.
- return new ShiftInst(Instruction::AShr, SI->getOperand(0), CU,
- SI->getName());
+ return BinaryOperator::create(Instruction::AShr,
+ SI->getOperand(0), CU, SI->getName());
}
}
}
if (CU->getZExtValue() ==
SI->getType()->getPrimitiveSizeInBits()-1) {
// Ok, the transformation is safe. Insert LShr.
- return new ShiftInst(Instruction::LShr, SI->getOperand(0), CU,
- SI->getName());
+ return BinaryOperator::createLShr(
+ SI->getOperand(0), CU, SI->getName());
}
}
}
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
// ((X << C1)*C2) == (X * (C2 << C1))
- if (ShiftInst *SI = dyn_cast<ShiftInst>(Op0))
+ if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op0))
if (SI->getOpcode() == Instruction::Shl)
if (Constant *ShOp = dyn_cast<Constant>(SI->getOperand(1)))
return BinaryOperator::createMul(SI->getOperand(0),
int64_t Val = (int64_t)cast<ConstantInt>(CI)->getZExtValue();
if (isPowerOf2_64(Val)) { // Replace X*(2^C) with X << C
uint64_t C = Log2_64(Val);
- return new ShiftInst(Instruction::Shl, Op0,
- ConstantInt::get(Type::Int8Ty, C));
+ return BinaryOperator::createShl(Op0,
+ ConstantInt::get(Op0->getType(), C));
}
} else if (ConstantFP *Op1F = dyn_cast<ConstantFP>(Op1)) {
if (Op1F->isNullValue())
if (isa<ConstantInt>(SCIOp1) &&
isSignBitCheck(SCI->getPredicate(), cast<ConstantInt>(SCIOp1))) {
// Shift the X value right to turn it into "all signbits".
- Constant *Amt = ConstantInt::get(Type::Int8Ty,
+ Constant *Amt = ConstantInt::get(SCIOp0->getType(),
SCOpTy->getPrimitiveSizeInBits()-1);
Value *V =
- InsertNewInstBefore(new ShiftInst(Instruction::AShr, SCIOp0, Amt,
+ InsertNewInstBefore(
+ BinaryOperator::create(Instruction::AShr, SCIOp0, Amt,
BoolCast->getOperand(0)->getName()+
".mask"), I);
if (uint64_t Val = C->getZExtValue()) // Don't break X / 0
if (isPowerOf2_64(Val)) {
uint64_t ShiftAmt = Log2_64(Val);
- return new ShiftInst(Instruction::LShr, Op0,
- ConstantInt::get(Type::Int8Ty, ShiftAmt));
+ return BinaryOperator::createLShr(Op0,
+ ConstantInt::get(Op0->getType(), ShiftAmt));
}
}
// X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
- if (ShiftInst *RHSI = dyn_cast<ShiftInst>(I.getOperand(1))) {
+ if (BinaryOperator *RHSI = dyn_cast<BinaryOperator>(I.getOperand(1))) {
if (RHSI->getOpcode() == Instruction::Shl &&
isa<ConstantInt>(RHSI->getOperand(0))) {
uint64_t C1 = cast<ConstantInt>(RHSI->getOperand(0))->getZExtValue();
Constant *C2V = ConstantInt::get(NTy, C2);
N = InsertNewInstBefore(BinaryOperator::createAdd(N, C2V, "tmp"), I);
}
- return new ShiftInst(Instruction::LShr, Op0, N);
+ return BinaryOperator::createLShr(Op0, N);
}
}
}
// Compute the shift amounts
unsigned TSA = Log2_64(TVA), FSA = Log2_64(FVA);
// Construct the "on true" case of the select
- Constant *TC = ConstantInt::get(Type::Int8Ty, TSA);
- Instruction *TSI =
- new ShiftInst(Instruction::LShr, Op0, TC, SI->getName()+".t");
+ Constant *TC = ConstantInt::get(Op0->getType(), TSA);
+ Instruction *TSI = BinaryOperator::createLShr(
+ Op0, TC, SI->getName()+".t");
TSI = InsertNewInstBefore(TSI, I);
// Construct the "on false" case of the select
- Constant *FC = ConstantInt::get(Type::Int8Ty, FSA);
- Instruction *FSI =
- new ShiftInst(Instruction::LShr, Op0, FC, SI->getName()+".f");
+ Constant *FC = ConstantInt::get(Op0->getType(), FSA);
+ Instruction *FSI = BinaryOperator::createLShr(
+ Op0, FC, SI->getName()+".f");
FSI = InsertNewInstBefore(FSI, I);
// construct the select instruction and return it.
unsigned Zeros = CountTrailingZeros_64(RHS->getZExtValue());
if (Zeros != V->getType()->getPrimitiveSizeInBits())
return ConstantExpr::getShl(Result,
- ConstantInt::get(Type::Int8Ty, Zeros));
+ ConstantInt::get(Result->getType(), Zeros));
}
} else if (CastInst *CI = dyn_cast<CastInst>(I)) {
// Only handle int->int casts.
// OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where
// the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is
-// guaranteed to be either a shift instruction or a binary operator.
+// guaranteed to be a binary operator.
Instruction *InstCombiner::OptAndOp(Instruction *Op,
ConstantInt *OpRHS,
ConstantInt *AndRHS,
BinaryOperator &TheAnd) {
Value *X = Op->getOperand(0);
Constant *Together = 0;
- if (!isa<ShiftInst>(Op))
+ if (!Op->isShift())
Together = ConstantExpr::getAnd(AndRHS, OpRHS);
switch (Op->getOpcode()) {
case Instruction::Xor:
if (Op->hasOneUse()) {
// (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
- std::string OpName = Op->getName(); Op->setName("");
- Instruction *And = BinaryOperator::createAnd(X, AndRHS, OpName);
+ Instruction *And = BinaryOperator::createAnd(X, AndRHS);
InsertNewInstBefore(And, TheAnd);
+ And->takeName(Op);
return BinaryOperator::createXor(And, Together);
}
break;
if (Op->hasOneUse() && Together != OpRHS) {
// (X | C1) & C2 --> (X | (C1&C2)) & C2
- std::string Op0Name = Op->getName(); Op->setName("");
- Instruction *Or = BinaryOperator::createOr(X, Together, Op0Name);
+ Instruction *Or = BinaryOperator::createOr(X, Together);
InsertNewInstBefore(Or, TheAnd);
+ Or->takeName(Op);
return BinaryOperator::createAnd(Or, AndRHS);
}
break;
TheAnd.setOperand(0, X);
return &TheAnd;
} else {
- std::string Name = Op->getName(); Op->setName("");
// Pull the XOR out of the AND.
- Instruction *NewAnd = BinaryOperator::createAnd(X, AndRHS, Name);
+ Instruction *NewAnd = BinaryOperator::createAnd(X, AndRHS);
InsertNewInstBefore(NewAnd, TheAnd);
+ NewAnd->takeName(Op);
return BinaryOperator::createXor(NewAnd, AndRHS);
}
}
// (Val ashr C1) & C2 -> (Val lshr C1) & C2
// Make the argument unsigned.
Value *ShVal = Op->getOperand(0);
- ShVal = InsertNewInstBefore(new ShiftInst(Instruction::LShr, ShVal,
- OpRHS, Op->getName()), TheAnd);
+ ShVal = InsertNewInstBefore(
+ BinaryOperator::createLShr(ShVal, OpRHS,
+ Op->getName()), TheAnd);
return BinaryOperator::createAnd(ShVal, AndRHS, TheAnd.getName());
}
}
// See if we can simplify any instructions used by the instruction whose sole
// purpose is to compute bits we don't care about.
uint64_t KnownZero, KnownOne;
- if (!isa<PackedType>(I.getType())) {
+ if (!isa<VectorType>(I.getType())) {
if (SimplifyDemandedBits(&I, cast<IntegerType>(I.getType())->getBitMask(),
KnownZero, KnownOne))
return &I;
} else {
- if (ConstantPacked *CP = dyn_cast<ConstantPacked>(Op1)) {
+ if (ConstantVector *CP = dyn_cast<ConstantVector>(Op1)) {
if (CP->isAllOnesValue())
return ReplaceInstUsesWith(I, I.getOperand(0));
}
uint64_t NotAndRHS = AndRHSMask^TypeMask;
// Optimize a variety of ((val OP C1) & C2) combinations...
- if (isa<BinaryOperator>(Op0) || isa<ShiftInst>(Op0)) {
+ if (isa<BinaryOperator>(Op0)) {
Instruction *Op0I = cast<Instruction>(Op0);
Value *Op0LHS = Op0I->getOperand(0);
Value *Op0RHS = Op0I->getOperand(1);
Instruction *Add = BinaryOperator::createAdd(LHSVal, AddCST,
LHSVal->getName()+".off");
InsertNewInstBefore(Add, I);
- return new ICmpInst(ICmpInst::ICMP_UGT, Add, AddCST);
+ return new ICmpInst(ICmpInst::ICMP_UGT, Add,
+ ConstantInt::get(Add->getType(), 1));
}
break; // (X != 13 & X != 15) -> no change
}
}
// (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
- if (ShiftInst *SI1 = dyn_cast<ShiftInst>(Op1)) {
- if (ShiftInst *SI0 = dyn_cast<ShiftInst>(Op0))
- if (SI0->getOpcode() == SI1->getOpcode() &&
+ if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
+ if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
+ if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
SI0->getOperand(1) == SI1->getOperand(1) &&
(SI0->hasOneUse() || SI1->hasOneUse())) {
Instruction *NewOp =
InsertNewInstBefore(BinaryOperator::createAnd(SI0->getOperand(0),
SI1->getOperand(0),
SI0->getName()), I);
- return new ShiftInst(SI1->getOpcode(), NewOp, SI1->getOperand(1));
+ return BinaryOperator::create(SI1->getOpcode(), NewOp,
+ SI1->getOperand(1));
}
}
/// CollectBSwapParts - Look to see if the specified value defines a single byte
/// in the result. If it does, and if the specified byte hasn't been filled in
/// yet, fill it in and return false.
-static bool CollectBSwapParts(Value *V, std::vector<Value*> &ByteValues) {
+static bool CollectBSwapParts(Value *V, SmallVector<Value*, 8> &ByteValues) {
Instruction *I = dyn_cast<Instruction>(V);
if (I == 0) return true;
// If this is a shift by a constant int, and it is "24", then its operand
// defines a byte. We only handle unsigned types here.
- if (isa<ShiftInst>(I) && isa<ConstantInt>(I->getOperand(1))) {
+ if (I->isShift() && isa<ConstantInt>(I->getOperand(1))) {
// Not shifting the entire input by N-1 bytes?
if (cast<ConstantInt>(I->getOperand(1))->getZExtValue() !=
8*(ByteValues.size()-1))
/// MatchBSwap - Given an OR instruction, check to see if this is a bswap idiom.
/// If so, insert the new bswap intrinsic and return it.
Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
- // We can only handle bswap of unsigned integers, and cannot bswap one byte.
+ // We cannot bswap one byte.
if (I.getType() == Type::Int8Ty)
return 0;
/// ByteValues - For each byte of the result, we keep track of which value
/// defines each byte.
- std::vector<Value*> ByteValues;
+ SmallVector<Value*, 8> ByteValues;
ByteValues.resize(TD->getTypeSize(I.getType()));
// Try to find all the pieces corresponding to the bswap.
// See if we can simplify any instructions used by the instruction whose sole
// purpose is to compute bits we don't care about.
uint64_t KnownZero, KnownOne;
- if (!isa<PackedType>(I.getType()) &&
+ if (!isa<VectorType>(I.getType()) &&
SimplifyDemandedBits(&I, cast<IntegerType>(I.getType())->getBitMask(),
KnownZero, KnownOne))
return &I;
ConstantInt *C1 = 0; Value *X = 0;
// (X & C1) | C2 --> (X | C2) & (C1|C2)
if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) && isOnlyUse(Op0)) {
- Instruction *Or = BinaryOperator::createOr(X, RHS, Op0->getName());
- Op0->setName("");
+ Instruction *Or = BinaryOperator::createOr(X, RHS);
InsertNewInstBefore(Or, I);
+ Or->takeName(Op0);
return BinaryOperator::createAnd(Or, ConstantExpr::getOr(RHS, C1));
}
// (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
if (match(Op0, m_Xor(m_Value(X), m_ConstantInt(C1))) && isOnlyUse(Op0)) {
- std::string Op0Name = Op0->getName(); Op0->setName("");
- Instruction *Or = BinaryOperator::createOr(X, RHS, Op0Name);
+ Instruction *Or = BinaryOperator::createOr(X, RHS);
InsertNewInstBefore(Or, I);
+ Or->takeName(Op0);
return BinaryOperator::createXor(Or,
ConstantExpr::getAnd(C1, ConstantExpr::getNot(RHS)));
}
// (X^C)|Y -> (X|Y)^C iff Y&C == 0
if (Op0->hasOneUse() && match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
MaskedValueIsZero(Op1, C1->getZExtValue())) {
- Instruction *NOr = BinaryOperator::createOr(A, Op1, Op0->getName());
- Op0->setName("");
- return BinaryOperator::createXor(InsertNewInstBefore(NOr, I), C1);
+ Instruction *NOr = BinaryOperator::createOr(A, Op1);
+ InsertNewInstBefore(NOr, I);
+ NOr->takeName(Op0);
+ return BinaryOperator::createXor(NOr, C1);
}
// Y|(X^C) -> (X|Y)^C iff Y&C == 0
if (Op1->hasOneUse() && match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
MaskedValueIsZero(Op0, C1->getZExtValue())) {
- Instruction *NOr = BinaryOperator::createOr(A, Op0, Op1->getName());
- Op0->setName("");
- return BinaryOperator::createXor(InsertNewInstBefore(NOr, I), C1);
+ Instruction *NOr = BinaryOperator::createOr(A, Op0);
+ InsertNewInstBefore(NOr, I);
+ NOr->takeName(Op0);
+ return BinaryOperator::createXor(NOr, C1);
}
// (A & C1)|(B & C2)
}
// (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
- if (ShiftInst *SI1 = dyn_cast<ShiftInst>(Op1)) {
- if (ShiftInst *SI0 = dyn_cast<ShiftInst>(Op0))
- if (SI0->getOpcode() == SI1->getOpcode() &&
+ if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
+ if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
+ if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
SI0->getOperand(1) == SI1->getOperand(1) &&
(SI0->hasOneUse() || SI1->hasOneUse())) {
Instruction *NewOp =
InsertNewInstBefore(BinaryOperator::createOr(SI0->getOperand(0),
SI1->getOperand(0),
SI0->getName()), I);
- return new ShiftInst(SI1->getOpcode(), NewOp, SI1->getOperand(1));
+ return BinaryOperator::create(SI1->getOpcode(), NewOp,
+ SI1->getOperand(1));
}
}
// See if we can simplify any instructions used by the instruction whose sole
// purpose is to compute bits we don't care about.
uint64_t KnownZero, KnownOne;
- if (!isa<PackedType>(I.getType()) &&
+ if (!isa<VectorType>(I.getType()) &&
SimplifyDemandedBits(&I, cast<IntegerType>(I.getType())->getBitMask(),
KnownZero, KnownOne))
return &I;
Constant *CommonBits = ConstantExpr::getAnd(Op0CI, RHS);
NewRHS = ConstantExpr::getAnd(NewRHS,
ConstantExpr::getNot(CommonBits));
- WorkList.push_back(Op0I);
+ AddToWorkList(Op0I);
I.setOperand(0, Op0I->getOperand(0));
I.setOperand(1, NewRHS);
return &I;
}
// (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts.
- if (ShiftInst *SI1 = dyn_cast<ShiftInst>(Op1)) {
- if (ShiftInst *SI0 = dyn_cast<ShiftInst>(Op0))
- if (SI0->getOpcode() == SI1->getOpcode() &&
+ if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
+ if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
+ if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
SI0->getOperand(1) == SI1->getOperand(1) &&
(SI0->hasOneUse() || SI1->hasOneUse())) {
Instruction *NewOp =
InsertNewInstBefore(BinaryOperator::createXor(SI0->getOperand(0),
SI1->getOperand(0),
SI0->getName()), I);
- return new ShiftInst(SI1->getOpcode(), NewOp, SI1->getOperand(1));
+ return BinaryOperator::create(SI1->getOpcode(), NewOp,
+ SI1->getOperand(1));
}
}
// could exist), turn it into (X & (C2 << C1)) != (C3 << C1). This
// happens a LOT in code produced by the C front-end, for bitfield
// access.
- ShiftInst *Shift = dyn_cast<ShiftInst>(LHSI->getOperand(0));
-
- // Check to see if there is a noop-cast between the shift and the and.
- if (!Shift) {
- if (CastInst *CI = dyn_cast<CastInst>(LHSI->getOperand(0)))
- if (CI->getOpcode() == Instruction::BitCast)
- Shift = dyn_cast<ShiftInst>(CI->getOperand(0));
- }
+ BinaryOperator *Shift = dyn_cast<BinaryOperator>(LHSI->getOperand(0));
+ if (Shift && !Shift->isShift())
+ Shift = 0;
ConstantInt *ShAmt;
ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : 0;
int ShAmtVal = Ty->getPrimitiveSizeInBits()-ShAmt->getZExtValue();
if (ShAmtVal < 0) ShAmtVal = 0; // Out of range shift.
- Constant *OShAmt = ConstantInt::get(Type::Int8Ty, ShAmtVal);
+ Constant *OShAmt = ConstantInt::get(AndTy, ShAmtVal);
Constant *ShVal =
ConstantExpr::getShl(ConstantInt::getAllOnesValue(AndTy),
OShAmt);
NewAndCST = ConstantExpr::getShl(AndCST, ShAmt);
LHSI->setOperand(1, NewAndCST);
LHSI->setOperand(0, Shift->getOperand(0));
- WorkList.push_back(Shift); // Shift is dead.
+ AddToWorkList(Shift); // Shift is dead.
AddUsesToWorkList(I);
return &I;
}
// Compute C << Y.
Value *NS;
if (Shift->getOpcode() == Instruction::LShr) {
- NS = new ShiftInst(Instruction::Shl, AndCST, Shift->getOperand(1),
- "tmp");
+ NS = BinaryOperator::createShl(AndCST,
+ Shift->getOperand(1), "tmp");
} else {
// Insert a logical shift.
- NS = new ShiftInst(Instruction::LShr, AndCST,
- Shift->getOperand(1), "tmp");
+ NS = BinaryOperator::createLShr(AndCST,
+ Shift->getOperand(1), "tmp");
}
InsertNewInstBefore(cast<Instruction>(NS), I);
else if (Value *NegVal = dyn_castNegVal(BOp0))
return new ICmpInst(I.getPredicate(), NegVal, BOp1);
else if (BO->hasOneUse()) {
- Instruction *Neg = BinaryOperator::createNeg(BOp1, BO->getName());
- BO->setName("");
+ Instruction *Neg = BinaryOperator::createNeg(BOp1);
InsertNewInstBefore(Neg, I);
+ Neg->takeName(BO);
return new ICmpInst(I.getPredicate(), BOp0, Neg);
}
}
default: break;
case Intrinsic::bswap_i16:
// icmp eq (bswap(x)), c -> icmp eq (x,bswap(c))
- WorkList.push_back(II); // Dead?
+ AddToWorkList(II); // Dead?
I.setOperand(0, II->getOperand(1));
I.setOperand(1, ConstantInt::get(Type::Int16Ty,
ByteSwap_16(CI->getZExtValue())));
return &I;
case Intrinsic::bswap_i32:
// icmp eq (bswap(x)), c -> icmp eq (x,bswap(c))
- WorkList.push_back(II); // Dead?
+ AddToWorkList(II); // Dead?
I.setOperand(0, II->getOperand(1));
I.setOperand(1, ConstantInt::get(Type::Int32Ty,
ByteSwap_32(CI->getZExtValue())));
return &I;
case Intrinsic::bswap_i64:
// icmp eq (bswap(x)), c -> icmp eq (x,bswap(c))
- WorkList.push_back(II); // Dead?
+ AddToWorkList(II); // Dead?
I.setOperand(0, II->getOperand(1));
I.setOperand(1, ConstantInt::get(Type::Int64Ty,
ByteSwap_64(CI->getZExtValue())));
ConstantInt *CUI = cast<ConstantInt>(CI);
if (CUI->getZExtValue() == 1ULL << (SrcTySize-1))
return new ICmpInst(ICmpInst::ICMP_SGT, CastOp,
- ConstantInt::get(SrcTy, -1));
+ ConstantInt::get(SrcTy, -1ULL));
break;
}
case ICmpInst::ICMP_UGT: { // X u> 127 => X s< 0
}
}
-Instruction *InstCombiner::visitShiftInst(ShiftInst &I) {
- assert(I.getOperand(1)->getType() == Type::Int8Ty);
+Instruction *InstCombiner::visitShl(BinaryOperator &I) {
+ return commonShiftTransforms(I);
+}
+
+Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
+ return commonShiftTransforms(I);
+}
+
+Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
+ return commonShiftTransforms(I);
+}
+
+Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
+ assert(I.getOperand(1)->getType() == I.getOperand(0)->getType());
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
// shl X, 0 == X and shr X, 0 == X
// shl 0, X == 0 and shr 0, X == 0
- if (Op1 == Constant::getNullValue(Type::Int8Ty) ||
+ if (Op1 == Constant::getNullValue(Op1->getType()) ||
Op0 == Constant::getNullValue(Op0->getType()))
return ReplaceInstUsesWith(I, Op0);
if (I.isArithmeticShift()) {
if (MaskedValueIsZero(Op0,
1ULL << (I.getType()->getPrimitiveSizeInBits()-1))) {
- return new ShiftInst(Instruction::LShr, Op0, Op1, I.getName());
+ return BinaryOperator::createLShr(Op0, Op1, I.getName());
}
}
}
Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
- ShiftInst &I) {
+ BinaryOperator &I) {
bool isLeftShift = I.getOpcode() == Instruction::Shl;
- bool isSignedShift = I.getOpcode() == Instruction::AShr;
- bool isUnsignedShift = !isSignedShift;
// See if we can simplify any instructions used by the instruction whose sole
// purpose is to compute bits we don't care about.
//
unsigned TypeBits = Op0->getType()->getPrimitiveSizeInBits();
if (Op1->getZExtValue() >= TypeBits) {
- if (isUnsignedShift || isLeftShift)
+ if (I.getOpcode() != Instruction::AShr)
return ReplaceInstUsesWith(I, Constant::getNullValue(Op0->getType()));
else {
- I.setOperand(1, ConstantInt::get(Type::Int8Ty, TypeBits-1));
+ I.setOperand(1, ConstantInt::get(I.getType(), TypeBits-1));
return &I;
}
}
case Instruction::Add:
case Instruction::And:
case Instruction::Or:
- case Instruction::Xor:
+ case Instruction::Xor: {
// These operators commute.
// Turn (Y + (X >> C)) << C -> (X + (Y << C)) & (~0 << C)
if (isLeftShift && Op0BO->getOperand(1)->hasOneUse() &&
match(Op0BO->getOperand(1),
m_Shr(m_Value(V1), m_ConstantInt(CC))) && CC == Op1) {
- Instruction *YS = new ShiftInst(Instruction::Shl,
+ Instruction *YS = BinaryOperator::createShl(
Op0BO->getOperand(0), Op1,
Op0BO->getName());
InsertNewInstBefore(YS, I); // (Y << C)
}
// Turn (Y + ((X >> C) & CC)) << C -> ((X & (CC << C)) + (Y << C))
- if (isLeftShift && Op0BO->getOperand(1)->hasOneUse() &&
- match(Op0BO->getOperand(1),
- m_And(m_Shr(m_Value(V1), m_Value(V2)),
- m_ConstantInt(CC))) && V2 == Op1 &&
- cast<BinaryOperator>(Op0BO->getOperand(1))->getOperand(0)->hasOneUse()) {
- Instruction *YS = new ShiftInst(Instruction::Shl,
- Op0BO->getOperand(0), Op1,
- Op0BO->getName());
+ Value *Op0BOOp1 = Op0BO->getOperand(1);
+ if (isLeftShift && Op0BOOp1->hasOneUse() && V2 == Op1 &&
+ match(Op0BOOp1,
+ m_And(m_Shr(m_Value(V1), m_Value(V2)),m_ConstantInt(CC))) &&
+ cast<BinaryOperator>(Op0BOOp1)->getOperand(0)-> hasOneUse()) {
+ Instruction *YS = BinaryOperator::createShl(
+ Op0BO->getOperand(0), Op1,
+ Op0BO->getName());
InsertNewInstBefore(YS, I); // (Y << C)
Instruction *XM =
BinaryOperator::createAnd(V1, ConstantExpr::getShl(CC, Op1),
return BinaryOperator::create(Op0BO->getOpcode(), YS, XM);
}
+ }
- // FALL THROUGH.
- case Instruction::Sub:
+ // FALL THROUGH.
+ case Instruction::Sub: {
// Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
match(Op0BO->getOperand(0),
m_Shr(m_Value(V1), m_ConstantInt(CC))) && CC == Op1) {
- Instruction *YS = new ShiftInst(Instruction::Shl,
- Op0BO->getOperand(1), Op1,
- Op0BO->getName());
+ Instruction *YS = BinaryOperator::createShl(
+ Op0BO->getOperand(1), Op1,
+ Op0BO->getName());
InsertNewInstBefore(YS, I); // (Y << C)
Instruction *X =
BinaryOperator::create(Op0BO->getOpcode(), V1, YS,
m_ConstantInt(CC))) && V2 == Op1 &&
cast<BinaryOperator>(Op0BO->getOperand(0))
->getOperand(0)->hasOneUse()) {
- Instruction *YS = new ShiftInst(Instruction::Shl,
- Op0BO->getOperand(1), Op1,
- Op0BO->getName());
+ Instruction *YS = BinaryOperator::createShl(
+ Op0BO->getOperand(1), Op1,
+ Op0BO->getName());
InsertNewInstBefore(YS, I); // (Y << C)
Instruction *XM =
BinaryOperator::createAnd(V1, ConstantExpr::getShl(CC, Op1),
}
break;
+ }
}
// the constant which would cause it to be modified for this
// operation.
//
- if (isValid && !isLeftShift && isSignedShift) {
+ if (isValid && !isLeftShift && I.getOpcode() == Instruction::AShr) {
uint64_t Val = Op0C->getZExtValue();
isValid = ((Val & (1 << (TypeBits-1))) != 0) == highBitSet;
}
Constant *NewRHS = ConstantExpr::get(I.getOpcode(), Op0C, Op1);
Instruction *NewShift =
- new ShiftInst(I.getOpcode(), Op0BO->getOperand(0), Op1,
- Op0BO->getName());
- Op0BO->setName("");
+ BinaryOperator::create(I.getOpcode(), Op0BO->getOperand(0), Op1);
InsertNewInstBefore(NewShift, I);
+ NewShift->takeName(Op0BO);
return BinaryOperator::create(Op0BO->getOpcode(), NewShift,
NewRHS);
}
// Find out if this is a shift of a shift by a constant.
- ShiftInst *ShiftOp = 0;
- if (ShiftInst *Op0SI = dyn_cast<ShiftInst>(Op0))
- ShiftOp = Op0SI;
- else if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) {
- // If this is a noop-integer cast of a shift instruction, use the shift.
- if (isa<ShiftInst>(CI->getOperand(0))) {
- ShiftOp = cast<ShiftInst>(CI->getOperand(0));
- }
- }
+ BinaryOperator *ShiftOp = dyn_cast<BinaryOperator>(Op0);
+ if (ShiftOp && !ShiftOp->isShift())
+ ShiftOp = 0;
if (ShiftOp && isa<ConstantInt>(ShiftOp->getOperand(1))) {
- // Find the operands and properties of the input shift. Note that the
- // signedness of the input shift may differ from the current shift if there
- // is a noop cast between the two.
- bool isShiftOfLeftShift = ShiftOp->getOpcode() == Instruction::Shl;
- bool isShiftOfSignedShift = ShiftOp->getOpcode() == Instruction::AShr;
- bool isShiftOfUnsignedShift = !isShiftOfSignedShift;
-
ConstantInt *ShiftAmt1C = cast<ConstantInt>(ShiftOp->getOperand(1));
-
unsigned ShiftAmt1 = (unsigned)ShiftAmt1C->getZExtValue();
unsigned ShiftAmt2 = (unsigned)Op1->getZExtValue();
+ assert(ShiftAmt2 != 0 && "Should have been simplified earlier");
+ if (ShiftAmt1 == 0) return 0; // Will be simplified in the future.
+ Value *X = ShiftOp->getOperand(0);
- // Check for (A << c1) << c2 and (A >> c1) >> c2.
- if (isLeftShift == isShiftOfLeftShift) {
- // Do not fold these shifts if the first one is signed and the second one
- // is unsigned and this is a right shift. Further, don't do any folding
- // on them.
- if (isShiftOfSignedShift && isUnsignedShift && !isLeftShift)
- return 0;
-
- unsigned Amt = ShiftAmt1+ShiftAmt2; // Fold into one big shift.
- if (Amt > Op0->getType()->getPrimitiveSizeInBits())
- Amt = Op0->getType()->getPrimitiveSizeInBits();
-
- Value *Op = ShiftOp->getOperand(0);
- ShiftInst *ShiftResult = new ShiftInst(I.getOpcode(), Op,
- ConstantInt::get(Type::Int8Ty, Amt));
- if (I.getType() == ShiftResult->getType())
- return ShiftResult;
- InsertNewInstBefore(ShiftResult, I);
- return CastInst::create(Instruction::BitCast, ShiftResult, I.getType());
+ unsigned AmtSum = ShiftAmt1+ShiftAmt2; // Fold into one big shift.
+ if (AmtSum > I.getType()->getPrimitiveSizeInBits())
+ AmtSum = I.getType()->getPrimitiveSizeInBits();
+
+ const IntegerType *Ty = cast<IntegerType>(I.getType());
+
+ // Check for (X << c1) << c2 and (X >> c1) >> c2
+ if (I.getOpcode() == ShiftOp->getOpcode()) {
+ return BinaryOperator::create(I.getOpcode(), X,
+ ConstantInt::get(Ty, AmtSum));
+ } else if (ShiftOp->getOpcode() == Instruction::LShr &&
+ I.getOpcode() == Instruction::AShr) {
+ // ((X >>u C1) >>s C2) -> (X >>u (C1+C2)) since C1 != 0.
+ return BinaryOperator::createLShr(X, ConstantInt::get(Ty, AmtSum));
+ } else if (ShiftOp->getOpcode() == Instruction::AShr &&
+ I.getOpcode() == Instruction::LShr) {
+ // ((X >>s C1) >>u C2) -> ((X >>s (C1+C2)) & mask) since C1 != 0.
+ Instruction *Shift =
+ BinaryOperator::createAShr(X, ConstantInt::get(Ty, AmtSum));
+ InsertNewInstBefore(Shift, I);
+
+ uint64_t Mask = Ty->getBitMask() >> ShiftAmt2;
+ return BinaryOperator::createAnd(Shift, ConstantInt::get(Ty, Mask));
}
- // Check for (A << c1) >> c2 or (A >> c1) << c2. If we are dealing with
- // signed types, we can only support the (A >> c1) << c2 configuration,
- // because it can not turn an arbitrary bit of A into a sign bit.
- if (isUnsignedShift || isLeftShift) {
- // Calculate bitmask for what gets shifted off the edge.
- Constant *C = ConstantInt::getAllOnesValue(I.getType());
- if (isLeftShift)
- C = ConstantExpr::getShl(C, ShiftAmt1C);
- else
- C = ConstantExpr::getLShr(C, ShiftAmt1C);
-
- Value *Op = ShiftOp->getOperand(0);
+ // Okay, if we get here, one shift must be left, and the other shift must be
+ // right. See if the amounts are equal.
+ if (ShiftAmt1 == ShiftAmt2) {
+ // If we have ((X >>? C) << C), turn this into X & (-1 << C).
+ if (I.getOpcode() == Instruction::Shl) {
+ uint64_t Mask = Ty->getBitMask() << ShiftAmt1;
+ return BinaryOperator::createAnd(X, ConstantInt::get(Ty, Mask));
+ }
+ // If we have ((X << C) >>u C), turn this into X & (-1 >>u C).
+ if (I.getOpcode() == Instruction::LShr) {
+ uint64_t Mask = Ty->getBitMask() >> ShiftAmt1;
+ return BinaryOperator::createAnd(X, ConstantInt::get(Ty, Mask));
+ }
+ // We can simplify ((X << C) >>s C) into a trunc + sext.
+ // NOTE: we could do this for any C, but that would make 'unusual' integer
+ // types. For now, just stick to ones well-supported by the code
+ // generators.
+ const Type *SExtType = 0;
+ switch (Ty->getBitWidth() - ShiftAmt1) {
+ case 8 : SExtType = Type::Int8Ty; break;
+ case 16: SExtType = Type::Int16Ty; break;
+ case 32: SExtType = Type::Int32Ty; break;
+ default: break;
+ }
+ if (SExtType) {
+ Instruction *NewTrunc = new TruncInst(X, SExtType, "sext");
+ InsertNewInstBefore(NewTrunc, I);
+ return new SExtInst(NewTrunc, Ty);
+ }
+ // Otherwise, we can't handle it yet.
+ } else if (ShiftAmt1 < ShiftAmt2) {
+ unsigned ShiftDiff = ShiftAmt2-ShiftAmt1;
- Instruction *Mask =
- BinaryOperator::createAnd(Op, C, Op->getName()+".mask");
- InsertNewInstBefore(Mask, I);
+ // (X >>? C1) << C2 --> X << (C2-C1) & (-1 << C2)
+ if (I.getOpcode() == Instruction::Shl) {
+ assert(ShiftOp->getOpcode() == Instruction::LShr ||
+ ShiftOp->getOpcode() == Instruction::AShr);
+ Instruction *Shift =
+ BinaryOperator::createShl(X, ConstantInt::get(Ty, ShiftDiff));
+ InsertNewInstBefore(Shift, I);
+
+ uint64_t Mask = Ty->getBitMask() << ShiftAmt2;
+ return BinaryOperator::createAnd(Shift, ConstantInt::get(Ty, Mask));
+ }
- // Figure out what flavor of shift we should use...
- if (ShiftAmt1 == ShiftAmt2) {
- return ReplaceInstUsesWith(I, Mask); // (A << c) >> c === A & c2
- } else if (ShiftAmt1 < ShiftAmt2) {
- return new ShiftInst(I.getOpcode(), Mask,
- ConstantInt::get(Type::Int8Ty, ShiftAmt2-ShiftAmt1));
- } else if (isShiftOfUnsignedShift || isShiftOfLeftShift) {
- if (isShiftOfUnsignedShift && !isShiftOfLeftShift && isSignedShift) {
- return new ShiftInst(Instruction::LShr, Mask,
- ConstantInt::get(Type::Int8Ty, ShiftAmt1-ShiftAmt2));
- } else {
- return new ShiftInst(ShiftOp->getOpcode(), Mask,
- ConstantInt::get(Type::Int8Ty, ShiftAmt1-ShiftAmt2));
- }
- } else {
- // (X >>s C1) << C2 where C1 > C2 === (X >>s (C1-C2)) & mask
+ // (X << C1) >>u C2 --> X >>u (C2-C1) & (-1 >> C2)
+ if (I.getOpcode() == Instruction::LShr) {
+ assert(ShiftOp->getOpcode() == Instruction::Shl);
Instruction *Shift =
- new ShiftInst(ShiftOp->getOpcode(), Mask,
- ConstantInt::get(Type::Int8Ty, ShiftAmt1-ShiftAmt2));
+ BinaryOperator::createLShr(X, ConstantInt::get(Ty, ShiftDiff));
InsertNewInstBefore(Shift, I);
- C = ConstantInt::getAllOnesValue(Shift->getType());
- C = ConstantExpr::getShl(C, Op1);
- return BinaryOperator::createAnd(Shift, C, Op->getName()+".mask");
+ uint64_t Mask = Ty->getBitMask() >> ShiftAmt2;
+ return BinaryOperator::createAnd(Shift, ConstantInt::get(Ty, Mask));
}
+
+ // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in.
} else {
- // We can handle signed (X << C1) >>s C2 if it's a sign extend. In
- // this case, C1 == C2 and C1 is 8, 16, or 32.
- if (ShiftAmt1 == ShiftAmt2) {
- const Type *SExtType = 0;
- switch (Op0->getType()->getPrimitiveSizeInBits() - ShiftAmt1) {
- case 8 : SExtType = Type::Int8Ty; break;
- case 16: SExtType = Type::Int16Ty; break;
- case 32: SExtType = Type::Int32Ty; break;
- }
+ assert(ShiftAmt2 < ShiftAmt1);
+ unsigned ShiftDiff = ShiftAmt1-ShiftAmt2;
+
+ // (X >>? C1) << C2 --> X >>? (C1-C2) & (-1 << C2)
+ if (I.getOpcode() == Instruction::Shl) {
+ assert(ShiftOp->getOpcode() == Instruction::LShr ||
+ ShiftOp->getOpcode() == Instruction::AShr);
+ Instruction *Shift =
+ BinaryOperator::create(ShiftOp->getOpcode(), X,
+ ConstantInt::get(Ty, ShiftDiff));
+ InsertNewInstBefore(Shift, I);
- if (SExtType) {
- Instruction *NewTrunc =
- new TruncInst(ShiftOp->getOperand(0), SExtType, "sext");
- InsertNewInstBefore(NewTrunc, I);
- return new SExtInst(NewTrunc, I.getType());
- }
+ uint64_t Mask = Ty->getBitMask() << ShiftAmt2;
+ return BinaryOperator::createAnd(Shift, ConstantInt::get(Ty, Mask));
+ }
+
+ // (X << C1) >>u C2 --> X << (C1-C2) & (-1 >> C2)
+ if (I.getOpcode() == Instruction::LShr) {
+ assert(ShiftOp->getOpcode() == Instruction::Shl);
+ Instruction *Shift =
+ BinaryOperator::createShl(X, ConstantInt::get(Ty, ShiftDiff));
+ InsertNewInstBefore(Shift, I);
+
+ uint64_t Mask = Ty->getBitMask() >> ShiftAmt2;
+ return BinaryOperator::createAnd(Shift, ConstantInt::get(Ty, Mask));
}
+
+ // We can't handle (X << C1) >>a C2, it shifts arbitrary bits in.
}
}
return 0;
// Remove any uses of AI that are dead.
assert(!CI.use_empty() && "Dead instructions should be removed earlier!");
- std::vector<Instruction*> DeadUsers;
+
for (Value::use_iterator UI = AI.use_begin(), E = AI.use_end(); UI != E; ) {
Instruction *User = cast<Instruction>(*UI++);
if (isInstructionTriviallyDead(User)) {
while (UI != E && *UI == User)
++UI; // If this instruction uses AI more than once, don't break UI.
- // Add operands to the worklist.
- AddUsesToWorkList(*User);
++NumDeadInst;
DOUT << "IC: DCE: " << *User;
-
- User->eraseFromParent();
- removeFromWorkList(User);
+ EraseInstFromFunction(*User);
}
}
const Type *CastElTy = PTy->getElementType();
if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;
- unsigned AllocElTyAlign = TD->getTypeAlignmentABI(AllocElTy);
- unsigned CastElTyAlign = TD->getTypeAlignmentABI(CastElTy);
+ unsigned AllocElTyAlign = TD->getABITypeAlignment(AllocElTy);
+ unsigned CastElTyAlign = TD->getABITypeAlignment(CastElTy);
if (CastElTyAlign < AllocElTyAlign) return 0;
// If the allocation has multiple uses, only promote it if we are strictly
Amt = InsertNewInstBefore(Tmp, AI);
}
- std::string Name = AI.getName(); AI.setName("");
AllocationInst *New;
if (isa<MallocInst>(AI))
- New = new MallocInst(CastElTy, Amt, AI.getAlignment(), Name);
+ New = new MallocInst(CastElTy, Amt, AI.getAlignment());
else
- New = new AllocaInst(CastElTy, Amt, AI.getAlignment(), Name);
+ New = new AllocaInst(CastElTy, Amt, AI.getAlignment());
InsertNewInstBefore(New, AI);
+ New->takeName(&AI);
// If the allocation has multiple uses, insert a cast and change all things
// that used it to use the new cast. This will also hack on CI, but it will
}
/// CanEvaluateInDifferentType - Return true if we can take the specified value
-/// and return it without inserting any new casts. This is used by code that
-/// tries to decide whether promoting or shrinking integer operations to wider
-/// or smaller types will allow us to eliminate a truncate or extend.
-static bool CanEvaluateInDifferentType(Value *V, const Type *Ty,
+/// and return it as type Ty without inserting any new casts and without
+/// changing the computed value. This is used by code that tries to decide
+/// whether promoting or shrinking integer operations to wider or smaller types
+/// will allow us to eliminate a truncate or extend.
+///
+/// This is a truncation operation if Ty is smaller than V->getType(), or an
+/// extension operation if Ty is larger.
+static bool CanEvaluateInDifferentType(Value *V, const IntegerType *Ty,
int &NumCastsRemoved) {
- if (isa<Constant>(V)) return true;
+ // We can always evaluate constants in another type.
+ if (isa<ConstantInt>(V))
+ return true;
Instruction *I = dyn_cast<Instruction>(V);
- if (!I || !I->hasOneUse()) return false;
+ if (!I) return false;
+
+ const IntegerType *OrigTy = cast<IntegerType>(V->getType());
switch (I->getOpcode()) {
+ case Instruction::Add:
+ case Instruction::Sub:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor:
+ if (!I->hasOneUse()) return false;
// These operators can all arbitrarily be extended or truncated.
return CanEvaluateInDifferentType(I->getOperand(0), Ty, NumCastsRemoved) &&
CanEvaluateInDifferentType(I->getOperand(1), Ty, NumCastsRemoved);
- case Instruction::AShr:
- case Instruction::LShr:
+
case Instruction::Shl:
- // If this is just a bitcast changing the sign of the operation, we can
- // convert if the operand can be converted.
- if (V->getType()->getPrimitiveSizeInBits() == Ty->getPrimitiveSizeInBits())
- return CanEvaluateInDifferentType(I->getOperand(0), Ty, NumCastsRemoved);
+ if (!I->hasOneUse()) return false;
+ // If we are truncating the result of this SHL, and if it's a shift of a
+ // constant amount, we can always perform a SHL in a smaller type.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
+ if (Ty->getBitWidth() < OrigTy->getBitWidth() &&
+ CI->getZExtValue() < Ty->getBitWidth())
+ return CanEvaluateInDifferentType(I->getOperand(0), Ty,NumCastsRemoved);
+ }
+ break;
+ case Instruction::LShr:
+ if (!I->hasOneUse()) return false;
+ // If this is a truncate of a logical shr, we can truncate it to a smaller
+ // lshr iff we know that the bits we would otherwise be shifting in are
+ // already zeros.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
+ if (Ty->getBitWidth() < OrigTy->getBitWidth() &&
+ MaskedValueIsZero(I->getOperand(0),
+ OrigTy->getBitMask() & ~Ty->getBitMask()) &&
+ CI->getZExtValue() < Ty->getBitWidth()) {
+ return CanEvaluateInDifferentType(I->getOperand(0), Ty, NumCastsRemoved);
+ }
+ }
break;
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SExt:
- case Instruction::BitCast:
// If this is a cast from the destination type, we can trivially eliminate
// it, and this will remove a cast overall.
if (I->getOperand(0)->getType() == Ty) {
/// CanEvaluateInDifferentType returns true for, actually insert the code to
/// evaluate the expression.
Value *InstCombiner::EvaluateInDifferentType(Value *V, const Type *Ty,
- bool isSigned ) {
+ bool isSigned) {
if (Constant *C = dyn_cast<Constant>(V))
return ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
Instruction *I = cast<Instruction>(V);
Instruction *Res = 0;
switch (I->getOpcode()) {
+ case Instruction::Add:
+ case Instruction::Sub:
case Instruction::And:
case Instruction::Or:
- case Instruction::Xor: {
- Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
- Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
- Res = BinaryOperator::create((Instruction::BinaryOps)I->getOpcode(),
- LHS, RHS, I->getName());
- break;
- }
+ case Instruction::Xor:
case Instruction::AShr:
case Instruction::LShr:
case Instruction::Shl: {
Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
- Res = new ShiftInst((Instruction::OtherOps)I->getOpcode(), LHS,
- I->getOperand(1), I->getName());
+ Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
+ Res = BinaryOperator::create((Instruction::BinaryOps)I->getOpcode(),
+ LHS, RHS, I->getName());
break;
}
case Instruction::Trunc:
return 0;
}
-/// Only the TRUNC, ZEXT, SEXT, and BITCONVERT can have both operands as
-/// integers. This function implements the common transforms for all those
+/// Only the TRUNC, ZEXT, SEXT, and BITCAST can both operand and result as
+/// integer types. This function implements the common transforms for all those
/// cases.
/// @brief Implement the transforms common to CastInst with integer operands
Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) {
if (!SrcI || !Src->hasOneUse())
return 0;
- // Attempt to propagate the cast into the instruction.
+ // Attempt to propagate the cast into the instruction for int->int casts.
int NumCastsRemoved = 0;
- if (CanEvaluateInDifferentType(SrcI, DestTy, NumCastsRemoved)) {
+ if (!isa<BitCastInst>(CI) &&
+ CanEvaluateInDifferentType(SrcI, cast<IntegerType>(DestTy),
+ NumCastsRemoved)) {
// If this cast is a truncate, evaluting in a different type always
// eliminates the cast, so it is always a win. If this is a noop-cast
// this just removes a noop cast which isn't pointful, but simplifies
// the input have eliminated at least one cast. If this is a sign
// extension, we insert two new casts (to do the extension) so we
// require that two casts have been eliminated.
- bool DoXForm = CI.isNoopCast(TD->getIntPtrType());
- if (!DoXForm) {
- switch (CI.getOpcode()) {
- case Instruction::Trunc:
- DoXForm = true;
- break;
- case Instruction::ZExt:
- DoXForm = NumCastsRemoved >= 1;
- break;
- case Instruction::SExt:
- DoXForm = NumCastsRemoved >= 2;
- break;
- case Instruction::BitCast:
- DoXForm = false;
- break;
- default:
- // All the others use floating point so we shouldn't actually
- // get here because of the check above.
- assert(!"Unknown cast type .. unreachable");
- break;
- }
+ bool DoXForm;
+ switch (CI.getOpcode()) {
+ default:
+ // All the others use floating point so we shouldn't actually
+ // get here because of the check above.
+ assert(0 && "Unknown cast type");
+ case Instruction::Trunc:
+ DoXForm = true;
+ break;
+ case Instruction::ZExt:
+ DoXForm = NumCastsRemoved >= 1;
+ break;
+ case Instruction::SExt:
+ DoXForm = NumCastsRemoved >= 2;
+ break;
+ case Instruction::BitCast:
+ DoXForm = false;
+ break;
}
if (DoXForm) {
Instruction::CastOps opcode = (DestBitSize == SrcBitSize ?
Instruction::BitCast : Instruction::Trunc);
Value *Op0c = InsertOperandCastBefore(opcode, Op0, DestTy, SrcI);
- return new ShiftInst(Instruction::Shl, Op0c, Op1);
+ Value *Op1c = InsertOperandCastBefore(opcode, Op1, DestTy, SrcI);
+ return BinaryOperator::createShl(Op0c, Op1c);
}
break;
case Instruction::AShr:
unsigned ShiftAmt = cast<ConstantInt>(Op1)->getZExtValue();
if (SrcBitSize > ShiftAmt && SrcBitSize-ShiftAmt >= DestBitSize) {
// Insert the new logical shift right.
- return new ShiftInst(Instruction::LShr, Op0, Op1);
+ return BinaryOperator::createLShr(Op0, Op1);
}
}
break;
// Perform a logical shr by shiftamt.
// Insert the shift to put the result in the low bit.
In = InsertNewInstBefore(
- new ShiftInst(Instruction::LShr, In,
- ConstantInt::get(Type::Int8Ty, ShiftAmt),
- In->getName()+".lobit"), CI);
+ BinaryOperator::createLShr(In,
+ ConstantInt::get(In->getType(), ShiftAmt),
+ In->getName()+".lobit"), CI);
}
if ((Op1CV != 0) == isNE) { // Toggle the low bit.
// Okay, we can shrink this. Truncate the input, then return a new
// shift.
- Value *V = InsertCastBefore(Instruction::Trunc, SrcIOp0, Ty, CI);
- return new ShiftInst(Instruction::LShr, V, SrcI->getOperand(1));
+ Value *V1 = InsertCastBefore(Instruction::Trunc, SrcIOp0, Ty, CI);
+ Value *V2 = InsertCastBefore(Instruction::Trunc, SrcI->getOperand(1),
+ Ty, CI);
+ return BinaryOperator::createLShr(V1, V2);
}
} else { // This is a variable shr.
if (CI.getType() == Type::Int1Ty && SrcI->hasOneUse()) {
Value *One = ConstantInt::get(SrcI->getType(), 1);
- Value *V = InsertNewInstBefore(new ShiftInst(Instruction::Shl, One,
- SrcI->getOperand(1),
- "tmp"), CI);
+ Value *V = InsertNewInstBefore(
+ BinaryOperator::createShl(One, SrcI->getOperand(1),
+ "tmp"), CI);
V = InsertNewInstBefore(BinaryOperator::createAnd(V,
SrcI->getOperand(0),
"tmp"), CI);
// If we found a path from the src to dest, create the getelementptr now.
if (SrcElTy == DstElTy) {
- std::vector<Value*> Idxs(NumZeros+1, ZeroUInt);
- return new GetElementPtrInst(Src, Idxs);
+ SmallVector<Value*, 8> Idxs(NumZeros+1, ZeroUInt);
+ return new GetElementPtrInst(Src, &Idxs[0], Idxs.size());
}
}
}
if (SVI->hasOneUse()) {
// Okay, we have (bitconvert (shuffle ..)). Check to see if this is
// a bitconvert to a vector with the same # elts.
- if (isa<PackedType>(DestTy) &&
- cast<PackedType>(DestTy)->getNumElements() ==
+ if (isa<VectorType>(DestTy) &&
+ cast<VectorType>(DestTy)->getNumElements() ==
SVI->getType()->getNumElements()) {
CastInst *Tmp;
// If either of the operands is a cast from CI.getType(), then
case Instruction::Sub:
case Instruction::Or:
case Instruction::Xor:
- return Constant::getNullValue(I->getType());
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
- return Constant::getNullValue(Type::Int8Ty);
+ return Constant::getNullValue(I->getType());
case Instruction::And:
return ConstantInt::getAllOnesValue(I->getType());
case Instruction::Mul:
TI->getType());
}
- // Only handle binary, compare and shift operators here.
- if (!isa<ShiftInst>(TI) && !isa<BinaryOperator>(TI))
+ // Only handle binary operators here.
+ if (!isa<BinaryOperator>(TI))
return 0;
// Figure out if the operations have any operands in common.
else
return BinaryOperator::create(BO->getOpcode(), NewSI, MatchOp);
}
-
- assert(isa<ShiftInst>(TI) && "Should only have Shift here");
- if (MatchIsOpZero)
- return new ShiftInst(cast<ShiftInst>(TI)->getOpcode(), MatchOp, NewSI);
- else
- return new ShiftInst(cast<ShiftInst>(TI)->getOpcode(), NewSI, MatchOp);
+ assert(0 && "Shouldn't get here");
+ return 0;
}
Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
// same width. Make an all-ones value by inserting a AShr.
Value *X = IC->getOperand(0);
unsigned Bits = X->getType()->getPrimitiveSizeInBits();
- Constant *ShAmt = ConstantInt::get(Type::Int8Ty, Bits-1);
- Instruction *SRA = new ShiftInst(Instruction::AShr, X,
- ShAmt, "ones");
+ Constant *ShAmt = ConstantInt::get(X->getType(), Bits-1);
+ Instruction *SRA = BinaryOperator::create(Instruction::AShr, X,
+ ShAmt, "ones");
InsertNewInstBefore(SRA, SI);
// Finally, convert to the type of the select RHS. We figure out
if (OpToFold) {
Constant *C = GetSelectFoldableConstant(TVI);
- std::string Name = TVI->getName(); TVI->setName("");
Instruction *NewSel =
- new SelectInst(SI.getCondition(), TVI->getOperand(2-OpToFold), C,
- Name);
+ new SelectInst(SI.getCondition(), TVI->getOperand(2-OpToFold), C);
InsertNewInstBefore(NewSel, SI);
+ NewSel->takeName(TVI);
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TVI))
return BinaryOperator::create(BO->getOpcode(), FalseVal, NewSel);
- else if (ShiftInst *SI = dyn_cast<ShiftInst>(TVI))
- return new ShiftInst(SI->getOpcode(), FalseVal, NewSel);
else {
assert(0 && "Unknown instruction!!");
}
if (OpToFold) {
Constant *C = GetSelectFoldableConstant(FVI);
- std::string Name = FVI->getName(); FVI->setName("");
Instruction *NewSel =
- new SelectInst(SI.getCondition(), C, FVI->getOperand(2-OpToFold),
- Name);
+ new SelectInst(SI.getCondition(), C, FVI->getOperand(2-OpToFold));
InsertNewInstBefore(NewSel, SI);
+ NewSel->takeName(FVI);
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FVI))
return BinaryOperator::create(BO->getOpcode(), TrueVal, NewSel);
- else if (ShiftInst *SI = dyn_cast<ShiftInst>(FVI))
- return new ShiftInst(SI->getOpcode(), TrueVal, NewSel);
- else {
+ else
assert(0 && "Unknown instruction!!");
- }
}
}
}
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
unsigned Align = GV->getAlignment();
if (Align == 0 && TD)
- Align = TD->getTypeAlignmentPref(GV->getType()->getElementType());
+ Align = TD->getPrefTypeAlignment(GV->getType()->getElementType());
return Align;
} else if (AllocationInst *AI = dyn_cast<AllocationInst>(V)) {
unsigned Align = AI->getAlignment();
if (Align == 0 && TD) {
if (isa<AllocaInst>(AI))
- Align = TD->getTypeAlignmentPref(AI->getType()->getElementType());
+ Align = TD->getPrefTypeAlignment(AI->getType()->getElementType());
else if (isa<MallocInst>(AI)) {
// Malloc returns maximally aligned memory.
- Align = TD->getTypeAlignmentABI(AI->getType()->getElementType());
+ Align = TD->getABITypeAlignment(AI->getType()->getElementType());
Align =
std::max(Align,
- (unsigned)TD->getTypeAlignmentABI(Type::DoubleTy));
+ (unsigned)TD->getABITypeAlignment(Type::DoubleTy));
Align =
std::max(Align,
- (unsigned)TD->getTypeAlignmentABI(Type::Int64Ty));
+ (unsigned)TD->getABITypeAlignment(Type::Int64Ty));
}
}
return Align;
const Type *BasePtrTy = GEPI->getOperand(0)->getType();
const PointerType *PtrTy = cast<PointerType>(BasePtrTy);
- if (TD->getTypeAlignmentABI(PtrTy->getElementType())
+ if (TD->getABITypeAlignment(PtrTy->getElementType())
<= BaseAlignment) {
const Type *GEPTy = GEPI->getType();
const PointerType *GEPPtrTy = cast<PointerType>(GEPTy);
- return TD->getTypeAlignmentABI(GEPPtrTy->getElementType());
+ return TD->getABITypeAlignment(GEPPtrTy->getElementType());
}
return 0;
}
case Intrinsic::ppc_altivec_vperm:
// Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
- if (ConstantPacked *Mask = dyn_cast<ConstantPacked>(II->getOperand(3))) {
+ if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) {
assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
// Check that all of the elements are integer constants or undefs.
// Check to see if we are changing the return type...
if (OldRetTy != FT->getReturnType()) {
- if (Callee->isExternal() && !Caller->use_empty() &&
+ if (Callee->isDeclaration() && !Caller->use_empty() &&
OldRetTy != FT->getReturnType() &&
// Conversion is ok if changing from pointer to int of same size.
!(isa<PointerType>(FT->getReturnType()) &&
ParamTy->getPrimitiveSizeInBits() >= ActTy->getPrimitiveSizeInBits()) ||
(c && ParamTy->getPrimitiveSizeInBits() >= ActTy->getPrimitiveSizeInBits()
&& c->getSExtValue() > 0);
- if (Callee->isExternal() && !isConvertible) return false;
+ if (Callee->isDeclaration() && !isConvertible) return false;
}
if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
- Callee->isExternal())
+ Callee->isDeclaration())
return false; // Do not delete arguments unless we have a function body...
// Okay, we decided that this is a safe thing to do: go ahead and start
}
if (FT->getReturnType() == Type::VoidTy)
- Caller->setName(""); // Void type should not have a name...
+ Caller->setName(""); // Void type should not have a name.
Instruction *NC;
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
NC = new InvokeInst(Callee, II->getNormalDest(), II->getUnwindDest(),
- Args, Caller->getName(), Caller);
+ &Args[0], Args.size(), Caller->getName(), Caller);
cast<InvokeInst>(II)->setCallingConv(II->getCallingConv());
} else {
- NC = new CallInst(Callee, Args, Caller->getName(), Caller);
+ NC = new CallInst(Callee, &Args[0], Args.size(), Caller->getName(), Caller);
if (cast<CallInst>(Caller)->isTailCall())
cast<CallInst>(NC)->setTailCall();
cast<CallInst>(NC)->setCallingConv(cast<CallInst>(Caller)->getCallingConv());
}
- // Insert a cast of the return type as necessary...
+ // Insert a cast of the return type as necessary.
Value *NV = NC;
if (Caller->getType() != NV->getType() && !Caller->use_empty()) {
if (NV->getType() != Type::VoidTy) {
if (Caller->getType() != Type::VoidTy && !Caller->use_empty())
Caller->replaceAllUsesWith(NV);
- Caller->getParent()->getInstList().erase(Caller);
- removeFromWorkList(Caller);
+ Caller->eraseFromParent();
+ RemoveFromWorkList(Caller);
return true;
}
/// and a single binop.
Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
- assert(isa<BinaryOperator>(FirstInst) || isa<ShiftInst>(FirstInst) ||
- isa<GetElementPtrInst>(FirstInst) || isa<CmpInst>(FirstInst));
+ assert(isa<BinaryOperator>(FirstInst) || isa<GetElementPtrInst>(FirstInst) ||
+ isa<CmpInst>(FirstInst));
unsigned Opc = FirstInst->getOpcode();
Value *LHSVal = FirstInst->getOperand(0);
Value *RHSVal = FirstInst->getOperand(1);
else if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst))
return CmpInst::create(CIOp->getOpcode(), CIOp->getPredicate(), LHSVal,
RHSVal);
- else if (ShiftInst *SI = dyn_cast<ShiftInst>(FirstInst))
- return new ShiftInst(SI->getOpcode(), LHSVal, RHSVal);
else {
assert(isa<GetElementPtrInst>(FirstInst));
return new GetElementPtrInst(LHSVal, RHSVal);
/// of the block that defines it. This means that it must be obvious the value
/// of the load is not changed from the point of the load to the end of the
/// block it is in.
+///
+/// Finally, it is safe, but not profitable, to sink a load targetting a
+/// non-address-taken alloca. Doing so will cause us to not promote the alloca
+/// to a register.
static bool isSafeToSinkLoad(LoadInst *L) {
BasicBlock::iterator BBI = L, E = L->getParent()->end();
for (++BBI; BBI != E; ++BBI)
if (BBI->mayWriteToMemory())
return false;
+
+ // Check for non-address taken alloca. If not address-taken already, it isn't
+ // profitable to do this xform.
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
+ bool isAddressTaken = false;
+ for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
+ UI != E; ++UI) {
+ if (isa<LoadInst>(UI)) continue;
+ if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
+ // If storing TO the alloca, then the address isn't taken.
+ if (SI->getOperand(1) == AI) continue;
+ }
+ isAddressTaken = true;
+ break;
+ }
+
+ if (!isAddressTaken)
+ return false;
+ }
+
return true;
}
bool isVolatile = false;
if (isa<CastInst>(FirstInst)) {
CastSrcTy = FirstInst->getOperand(0)->getType();
- } else if (isa<BinaryOperator>(FirstInst) || isa<ShiftInst>(FirstInst) ||
- isa<CmpInst>(FirstInst)) {
+ } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
// Can fold binop, compare or shift here if the RHS is a constant,
// otherwise call FoldPHIArgBinOpIntoPHI.
ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
return CmpInst::create(CIOp->getOpcode(), CIOp->getPredicate(),
PhiVal, ConstantOp);
else
- return new ShiftInst(cast<ShiftInst>(FirstInst)->getOpcode(),
- PhiVal, ConstantOp);
+ assert(0 && "Unknown operation");
}
/// DeadPHICycle - Return true if this PHI node is only used by a PHI node cycle
//
Instruction *InstCombiner::visitPHINode(PHINode &PN) {
// If LCSSA is around, don't mess with Phi nodes
- if (mustPreserveAnalysisID(LCSSAID)) return 0;
+ if (MustPreserveLCSSA) return 0;
if (Value *V = PN.hasConstantValue())
return ReplaceInstUsesWith(PN, V);
// is a getelementptr instruction, combine the indices of the two
// getelementptr instructions into a single instruction.
//
- std::vector<Value*> SrcGEPOperands;
+ SmallVector<Value*, 8> SrcGEPOperands;
if (User *Src = dyn_castGetElementPtr(PtrOp))
- SrcGEPOperands.assign(Src->op_begin(), Src->op_end());
+ SrcGEPOperands.append(Src->op_begin(), Src->op_end());
if (!SrcGEPOperands.empty()) {
// Note that if our source is a gep chain itself that we wait for that
cast<Instruction>(SrcGEPOperands[0])->getNumOperands() == 2)
return 0; // Wait until our source is folded to completion.
- std::vector<Value *> Indices;
+ SmallVector<Value*, 8> Indices;
// Find out whether the last index in the source GEP is a sequential idx.
bool EndsWithSequential = false;
}
if (!Indices.empty())
- return new GetElementPtrInst(SrcGEPOperands[0], Indices, GEP.getName());
+ return new GetElementPtrInst(SrcGEPOperands[0], &Indices[0],
+ Indices.size(), GEP.getName());
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(PtrOp)) {
// GEP of global variable. If all of the indices for this GEP are
// constants, we can promote this to a constexpr instead of an instruction.
// Scan for nonconstants...
- std::vector<Constant*> Indices;
+ SmallVector<Constant*, 8> Indices;
User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end();
for (; I != E && isa<Constant>(*I); ++I)
Indices.push_back(cast<Constant>(*I));
if (I == E) { // If they are all constants...
- Constant *CE = ConstantExpr::getGetElementPtr(GV, Indices);
+ Constant *CE = ConstantExpr::getGetElementPtr(GV,
+ &Indices[0],Indices.size());
// Replace all uses of the GEP with the new constexpr...
return ReplaceInstUsesWith(GEP, CE);
if (const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
const Type *SrcPTy = SrcTy->getElementType();
- if ((DestPTy->isInteger() && DestPTy != Type::Int1Ty) ||
- isa<PointerType>(DestPTy) || isa<PackedType>(DestPTy)) {
+ if (DestPTy->isInteger() || isa<PointerType>(DestPTy) ||
+ isa<VectorType>(DestPTy)) {
// If the source is an array, the code below will not succeed. Check to
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
// constants.
if (const ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
if (Constant *CSrc = dyn_cast<Constant>(CastOp))
if (ASrcTy->getNumElements() != 0) {
- std::vector<Value*> Idxs(2, Constant::getNullValue(Type::Int32Ty));
- CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs);
+ Value *Idxs[2];
+ Idxs[0] = Idxs[1] = Constant::getNullValue(Type::Int32Ty);
+ CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs, 2);
SrcTy = cast<PointerType>(CastOp->getType());
SrcPTy = SrcTy->getElementType();
}
- if (((SrcPTy->isInteger() && SrcPTy != Type::Int1Ty) ||
- isa<PointerType>(SrcPTy) || isa<PackedType>(SrcPTy)) &&
+ if ((SrcPTy->isInteger() || isa<PointerType>(SrcPTy) ||
+ isa<VectorType>(SrcPTy)) &&
// Do not allow turning this into a load of an integer, which is then
// casted to a pointer, this pessimizes pointer analysis a lot.
(isa<PointerType>(SrcPTy) == isa<PointerType>(LI.getType())) &&
- IC.getTargetData().getTypeSize(SrcPTy) ==
- IC.getTargetData().getTypeSize(DestPTy)) {
+ IC.getTargetData().getTypeSizeInBits(SrcPTy) ==
+ IC.getTargetData().getTypeSizeInBits(DestPTy)) {
// Okay, we are casting from one integer or pointer type to another of
// the same size. Instead of casting the pointer before the load, cast
// Instcombine load (constant global) into the value loaded.
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op))
- if (GV->isConstant() && !GV->isExternal())
+ if (GV->isConstant() && !GV->isDeclaration())
return ReplaceInstUsesWith(LI, GV->getInitializer());
// Instcombine load (constantexpr_GEP global, 0, ...) into the value loaded.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
if (CE->getOpcode() == Instruction::GetElementPtr) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0)))
- if (GV->isConstant() && !GV->isExternal())
+ if (GV->isConstant() && !GV->isDeclaration())
if (Constant *V =
ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
return ReplaceInstUsesWith(LI, V);
if (const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
const Type *SrcPTy = SrcTy->getElementType();
- if ((DestPTy->isInteger() && DestPTy != Type::Int1Ty) ||
- isa<PointerType>(DestPTy)) {
+ if (DestPTy->isInteger() || isa<PointerType>(DestPTy)) {
// If the source is an array, the code below will not succeed. Check to
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
// constants.
if (const ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
if (Constant *CSrc = dyn_cast<Constant>(CastOp))
if (ASrcTy->getNumElements() != 0) {
- std::vector<Value*> Idxs(2, Constant::getNullValue(Type::Int32Ty));
- CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs);
+ Value* Idxs[2];
+ Idxs[0] = Idxs[1] = Constant::getNullValue(Type::Int32Ty);
+ CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs, 2);
SrcTy = cast<PointerType>(CastOp->getType());
SrcPTy = SrcTy->getElementType();
}
- if (((SrcPTy->isInteger() && SrcPTy != Type::Int1Ty) ||
- isa<PointerType>(SrcPTy)) &&
- IC.getTargetData().getTypeSize(SrcPTy) ==
- IC.getTargetData().getTypeSize(DestPTy)) {
+ if ((SrcPTy->isInteger() || isa<PointerType>(SrcPTy)) &&
+ IC.getTargetData().getTypeSizeInBits(SrcPTy) ==
+ IC.getTargetData().getTypeSizeInBits(DestPTy)) {
// Okay, we are casting from one integer or pointer type to another of
// the same size. Instead of casting the pointer before
if (isa<PointerType>(CastDstTy)) {
if (CastSrcTy->isInteger())
opcode = Instruction::IntToPtr;
- } else if (const IntegerType* DITy = dyn_cast<IntegerType>(CastDstTy)) {
+ } else if (isa<IntegerType>(CastDstTy)) {
if (isa<PointerType>(SIOp0->getType()))
opcode = Instruction::PtrToInt;
- else if (const IntegerType* SITy = dyn_cast<IntegerType>(CastSrcTy))
- if (SITy->getBitWidth() != DITy->getBitWidth())
- return 0; // Don't do this transform on unequal bit widths.
- // else, BitCast is fine
}
if (Constant *C = dyn_cast<Constant>(SIOp0))
NewCast = ConstantExpr::getCast(opcode, C, CastDstTy);
if (!isa<UndefValue>(Val)) {
SI.setOperand(0, UndefValue::get(Val->getType()));
if (Instruction *U = dyn_cast<Instruction>(Val))
- WorkList.push_back(U); // Dropped a use.
+ AddToWorkList(U); // Dropped a use.
++NumCombined;
}
return 0; // Do not modify these!
if ((FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
FPred == FCmpInst::FCMP_OGE) && BI.getCondition()->hasOneUse()) {
FCmpInst *I = cast<FCmpInst>(BI.getCondition());
- std::string Name = I->getName(); I->setName("");
FCmpInst::Predicate NewPred = FCmpInst::getInversePredicate(FPred);
- Value *NewSCC = new FCmpInst(NewPred, X, Y, Name, I);
+ Instruction *NewSCC = new FCmpInst(NewPred, X, Y, "", I);
+ NewSCC->takeName(I);
// Swap Destinations and condition...
BI.setCondition(NewSCC);
BI.setSuccessor(0, FalseDest);
BI.setSuccessor(1, TrueDest);
- removeFromWorkList(I);
- I->getParent()->getInstList().erase(I);
- WorkList.push_back(cast<Instruction>(NewSCC));
+ RemoveFromWorkList(I);
+ I->eraseFromParent();
+ AddToWorkList(NewSCC);
return &BI;
}
IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
IPred == ICmpInst::ICMP_SGE) && BI.getCondition()->hasOneUse()) {
ICmpInst *I = cast<ICmpInst>(BI.getCondition());
- std::string Name = I->getName(); I->setName("");
ICmpInst::Predicate NewPred = ICmpInst::getInversePredicate(IPred);
- Value *NewSCC = new ICmpInst(NewPred, X, Y, Name, I);
+ Instruction *NewSCC = new ICmpInst(NewPred, X, Y, "", I);
+ NewSCC->takeName(I);
// Swap Destinations and condition...
BI.setCondition(NewSCC);
BI.setSuccessor(0, FalseDest);
BI.setSuccessor(1, TrueDest);
- removeFromWorkList(I);
- I->getParent()->getInstList().erase(I);
- WorkList.push_back(cast<Instruction>(NewSCC));
+ RemoveFromWorkList(I);
+ I->eraseFromParent();;
+ AddToWorkList(NewSCC);
return &BI;
}
SI.setOperand(i,ConstantExpr::getSub(cast<Constant>(SI.getOperand(i)),
AddRHS));
SI.setOperand(0, I->getOperand(0));
- WorkList.push_back(I);
+ AddToWorkList(I);
return &SI;
}
}
static bool CheapToScalarize(Value *V, bool isConstant) {
if (isa<ConstantAggregateZero>(V))
return true;
- if (ConstantPacked *C = dyn_cast<ConstantPacked>(V)) {
+ if (ConstantVector *C = dyn_cast<ConstantVector>(V)) {
if (isConstant) return true;
// If all elts are the same, we can extract.
Constant *Op0 = C->getOperand(0);
return false;
}
-/// getShuffleMask - Read and decode a shufflevector mask. It turns undef
-/// elements into values that are larger than the #elts in the input.
+/// Read and decode a shufflevector mask.
+///
+/// It turns undef elements into values that are larger than the number of
+/// elements in the input.
static std::vector<unsigned> getShuffleMask(const ShuffleVectorInst *SVI) {
unsigned NElts = SVI->getType()->getNumElements();
if (isa<ConstantAggregateZero>(SVI->getOperand(2)))
return std::vector<unsigned>(NElts, 2*NElts);
std::vector<unsigned> Result;
- const ConstantPacked *CP = cast<ConstantPacked>(SVI->getOperand(2));
+ const ConstantVector *CP = cast<ConstantVector>(SVI->getOperand(2));
for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
if (isa<UndefValue>(CP->getOperand(i)))
Result.push_back(NElts*2); // undef -> 8
/// value is already around as a register, for example if it were inserted then
/// extracted from the vector.
static Value *FindScalarElement(Value *V, unsigned EltNo) {
- assert(isa<PackedType>(V->getType()) && "Not looking at a vector?");
- const PackedType *PTy = cast<PackedType>(V->getType());
+ assert(isa<VectorType>(V->getType()) && "Not looking at a vector?");
+ const VectorType *PTy = cast<VectorType>(V->getType());
unsigned Width = PTy->getNumElements();
if (EltNo >= Width) // Out of range access.
return UndefValue::get(PTy->getElementType());
return UndefValue::get(PTy->getElementType());
else if (isa<ConstantAggregateZero>(V))
return Constant::getNullValue(PTy->getElementType());
- else if (ConstantPacked *CP = dyn_cast<ConstantPacked>(V))
+ else if (ConstantVector *CP = dyn_cast<ConstantVector>(V))
return CP->getOperand(EltNo);
else if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
// If this is an insert to a variable element, we don't know what it is.
if (isa<ConstantAggregateZero>(EI.getOperand(0)))
return ReplaceInstUsesWith(EI, Constant::getNullValue(EI.getType()));
- if (ConstantPacked *C = dyn_cast<ConstantPacked>(EI.getOperand(0))) {
+ if (ConstantVector *C = dyn_cast<ConstantVector>(EI.getOperand(0))) {
// If packed val is constant with uniform operands, replace EI
// with that operand
Constant *op0 = C->getOperand(0);
std::vector<Constant*> &Mask) {
assert(V->getType() == LHS->getType() && V->getType() == RHS->getType() &&
"Invalid CollectSingleShuffleElements");
- unsigned NumElts = cast<PackedType>(V->getType())->getNumElements();
+ unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
if (isa<UndefValue>(V)) {
Mask.assign(NumElts, UndefValue::get(Type::Int32Ty));
/// that computes V and the LHS value of the shuffle.
static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
Value *&RHS) {
- assert(isa<PackedType>(V->getType()) &&
+ assert(isa<VectorType>(V->getType()) &&
(RHS == 0 || V->getType() == RHS->getType()) &&
"Invalid shuffle!");
- unsigned NumElts = cast<PackedType>(V->getType())->getNumElements();
+ unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
if (isa<UndefValue>(V)) {
Mask.assign(NumElts, UndefValue::get(Type::Int32Ty));
}
Mask[InsertedIdx] = ConstantInt::get(Type::Int32Ty, ExtractedIdx);
return new ShuffleVectorInst(EI->getOperand(0), VecOp,
- ConstantPacked::get(Mask));
+ ConstantVector::get(Mask));
}
// If this insertelement isn't used by some other insertelement, turn it
Value *LHS = CollectShuffleElements(&IE, Mask, RHS);
if (RHS == 0) RHS = UndefValue::get(LHS->getType());
// We now have a shuffle of LHS, RHS, Mask.
- return new ShuffleVectorInst(LHS, RHS, ConstantPacked::get(Mask));
+ return new ShuffleVectorInst(LHS, RHS, ConstantVector::get(Mask));
}
}
}
else
Elts.push_back(ConstantInt::get(Type::Int32Ty, Mask[i]));
}
- SVI.setOperand(2, ConstantPacked::get(Elts));
+ SVI.setOperand(2, ConstantVector::get(Elts));
}
}
}
SVI.setOperand(0, SVI.getOperand(1));
SVI.setOperand(1, UndefValue::get(RHS->getType()));
- SVI.setOperand(2, ConstantPacked::get(Elts));
+ SVI.setOperand(2, ConstantVector::get(Elts));
LHS = SVI.getOperand(0);
RHS = SVI.getOperand(1);
MadeChange = true;
}
return new ShuffleVectorInst(LHSSVI->getOperand(0),
LHSSVI->getOperand(1),
- ConstantPacked::get(Elts));
+ ConstantVector::get(Elts));
}
}
}
-
+
return MadeChange ? &SVI : 0;
}
-void InstCombiner::removeFromWorkList(Instruction *I) {
- WorkList.erase(std::remove(WorkList.begin(), WorkList.end(), I),
- WorkList.end());
-}
-
/// TryToSinkInstruction - Try to move the specified instruction from its
/// current block into the beginning of DestBlock, which can only happen if it's
return true;
}
-/// OptimizeConstantExpr - Given a constant expression and target data layout
-/// information, symbolically evaluate the constant expr to something simpler
-/// if possible.
-static Constant *OptimizeConstantExpr(ConstantExpr *CE, const TargetData *TD) {
- if (!TD) return CE;
-
- Constant *Ptr = CE->getOperand(0);
- if (CE->getOpcode() == Instruction::GetElementPtr && Ptr->isNullValue() &&
- cast<PointerType>(Ptr->getType())->getElementType()->isSized()) {
- // If this is a constant expr gep that is effectively computing an
- // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
- bool isFoldableGEP = true;
- for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
- if (!isa<ConstantInt>(CE->getOperand(i)))
- isFoldableGEP = false;
- if (isFoldableGEP) {
- std::vector<Value*> Ops(CE->op_begin()+1, CE->op_end());
- uint64_t Offset = TD->getIndexedOffset(Ptr->getType(), Ops);
- Constant *C = ConstantInt::get(TD->getIntPtrType(), Offset);
- return ConstantExpr::getIntToPtr(C, CE->getType());
- }
- }
-
- return CE;
-}
-
/// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
/// all reachable code to the worklist.
/// whose condition is a known constant, we only visit the reachable successors.
///
static void AddReachableCodeToWorklist(BasicBlock *BB,
- std::set<BasicBlock*> &Visited,
- std::vector<Instruction*> &WorkList,
+ SmallPtrSet<BasicBlock*, 64> &Visited,
+ InstCombiner &IC,
const TargetData *TD) {
// We have now visited this block! If we've already been here, bail out.
- if (!Visited.insert(BB).second) return;
+ if (!Visited.insert(BB)) return;
for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
Instruction *Inst = BBI++;
}
// ConstantProp instruction if trivially constant.
- if (Constant *C = ConstantFoldInstruction(Inst)) {
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- C = OptimizeConstantExpr(CE, TD);
+ if (Constant *C = ConstantFoldInstruction(Inst, TD)) {
DOUT << "IC: ConstFold to: " << *C << " from: " << *Inst;
Inst->replaceAllUsesWith(C);
++NumConstProp;
continue;
}
- WorkList.push_back(Inst);
+ IC.AddToWorkList(Inst);
}
// Recursively visit successors. If this is a branch or switch on a constant,
if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
- AddReachableCodeToWorklist(BI->getSuccessor(!CondVal), Visited, WorkList,
- TD);
+ AddReachableCodeToWorklist(BI->getSuccessor(!CondVal), Visited, IC, TD);
return;
}
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
// See if this is an explicit destination.
for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i)
if (SI->getCaseValue(i) == Cond) {
- AddReachableCodeToWorklist(SI->getSuccessor(i), Visited, WorkList,TD);
+ AddReachableCodeToWorklist(SI->getSuccessor(i), Visited, IC, TD);
return;
}
// Otherwise it is the default destination.
- AddReachableCodeToWorklist(SI->getSuccessor(0), Visited, WorkList, TD);
+ AddReachableCodeToWorklist(SI->getSuccessor(0), Visited, IC, TD);
return;
}
}
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
- AddReachableCodeToWorklist(TI->getSuccessor(i), Visited, WorkList, TD);
+ AddReachableCodeToWorklist(TI->getSuccessor(i), Visited, IC, TD);
}
-bool InstCombiner::runOnFunction(Function &F) {
+bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
bool Changed = false;
TD = &getAnalysis<TargetData>();
+
+ DEBUG(DOUT << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
+ << F.getNameStr() << "\n");
{
// Do a depth-first traversal of the function, populate the worklist with
// the reachable instructions. Ignore blocks that are not reachable. Keep
// track of which blocks we visit.
- std::set<BasicBlock*> Visited;
- AddReachableCodeToWorklist(F.begin(), Visited, WorkList, TD);
+ SmallPtrSet<BasicBlock*, 64> Visited;
+ AddReachableCodeToWorklist(F.begin(), Visited, *this, TD);
// Do a quick scan over the function. If we find any blocks that are
// unreachable, remove any instructions inside of them. This prevents
}
}
- while (!WorkList.empty()) {
- Instruction *I = WorkList.back(); // Get an instruction from the worklist
- WorkList.pop_back();
+ while (!Worklist.empty()) {
+ Instruction *I = RemoveOneFromWorkList();
+ if (I == 0) continue; // skip null values.
// Check to see if we can DCE the instruction.
if (isInstructionTriviallyDead(I)) {
DOUT << "IC: DCE: " << *I;
I->eraseFromParent();
- removeFromWorkList(I);
+ RemoveFromWorkList(I);
continue;
}
// Instruction isn't dead, see if we can constant propagate it.
- if (Constant *C = ConstantFoldInstruction(I)) {
- if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- C = OptimizeConstantExpr(CE, TD);
+ if (Constant *C = ConstantFoldInstruction(I, TD)) {
DOUT << "IC: ConstFold to: " << *C << " from: " << *I;
// Add operands to the worklist.
++NumConstProp;
I->eraseFromParent();
- removeFromWorkList(I);
+ RemoveFromWorkList(I);
continue;
}
I->replaceAllUsesWith(Result);
// Push the new instruction and any users onto the worklist.
- WorkList.push_back(Result);
+ AddToWorkList(Result);
AddUsersToWorkList(*Result);
- // Move the name to the new instruction first...
- std::string OldName = I->getName(); I->setName("");
- Result->setName(OldName);
+ // Move the name to the new instruction first.
+ Result->takeName(I);
// Insert the new instruction into the basic block...
BasicBlock *InstParent = I->getParent();
// Make sure that we reprocess all operands now that we reduced their
// use counts.
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
- if (Instruction *OpI = dyn_cast<Instruction>(I->getOperand(i)))
- WorkList.push_back(OpI);
+ AddUsesToWorkList(*I);
// Instructions can end up on the worklist more than once. Make sure
// we do not process an instruction that has been deleted.
- removeFromWorkList(I);
+ RemoveFromWorkList(I);
// Erase the old instruction.
InstParent->getInstList().erase(I);
if (isInstructionTriviallyDead(I)) {
// Make sure we process all operands now that we are reducing their
// use counts.
- for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
- if (Instruction *OpI = dyn_cast<Instruction>(I->getOperand(i)))
- WorkList.push_back(OpI);
+ AddUsesToWorkList(*I);
// Instructions may end up in the worklist more than once. Erase all
// occurrences of this instruction.
- removeFromWorkList(I);
+ RemoveFromWorkList(I);
I->eraseFromParent();
} else {
- WorkList.push_back(Result);
- AddUsersToWorkList(*Result);
+ AddToWorkList(I);
+ AddUsersToWorkList(*I);
}
}
Changed = true;
}
}
+ assert(WorklistMap.empty() && "Worklist empty, but map not?");
return Changed;
}
+
+bool InstCombiner::runOnFunction(Function &F) {
+ MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
+
+ bool EverMadeChange = false;
+
+ // Iterate while there is work to do.
+ unsigned Iteration = 0;
+ while (DoOneIteration(F, Iteration++))
+ EverMadeChange = true;
+ return EverMadeChange;
+}
+
FunctionPass *llvm::createInstructionCombiningPass() {
return new InstCombiner();
}