Instruction *I = dyn_cast<Instruction>(V);
if (!I) return;
+ Mask &= V->getType()->getIntegralTypeMask();
+
switch (I->getOpcode()) {
case Instruction::And:
// If either the LHS or the RHS are Zero, the result is zero.
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return false; // Only analyze instructions.
+ DemandedMask &= V->getType()->getIntegralTypeMask();
+
uint64_t KnownZero2, KnownOne2;
switch (I->getOpcode()) {
default: break;
}
}
+ // fold (and (cast A), (cast B)) -> (cast (and A, B))
+ if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
+ if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
+ if (Op0C->getOperand(0)->getType() == Op1C->getOperand(0)->getType() &&
+ Op0C->getOperand(0)->getType()->isIntegral()) {
+ Instruction *NewOp = BinaryOperator::createAnd(Op0C->getOperand(0),
+ Op1C->getOperand(0),
+ I.getName());
+ InsertNewInstBefore(NewOp, I);
+ return new CastInst(NewOp, I.getType());
+ }
+ }
+
return Changed ? &I : 0;
}
}
}
}
+
+ // fold (or (cast A), (cast B)) -> (cast (or A, B))
+ if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
+ if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
+ if (Op0C->getOperand(0)->getType() == Op1C->getOperand(0)->getType() &&
+ Op0C->getOperand(0)->getType()->isIntegral()) {
+ Instruction *NewOp = BinaryOperator::createOr(Op0C->getOperand(0),
+ Op1C->getOperand(0),
+ I.getName());
+ InsertNewInstBefore(NewOp, I);
+ return new CastInst(NewOp, I.getType());
+ }
+ }
+
return Changed ? &I : 0;
}
if (Instruction *R = AssociativeOpt(I, FoldSetCCLogical(*this, RHS)))
return R;
+ // fold (xor (cast A), (cast B)) -> (cast (xor A, B))
+ if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
+ if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
+ if (Op0C->getOperand(0)->getType() == Op1C->getOperand(0)->getType() &&
+ Op0C->getOperand(0)->getType()->isIntegral()) {
+ Instruction *NewOp = BinaryOperator::createXor(Op0C->getOperand(0),
+ Op1C->getOperand(0),
+ I.getName());
+ InsertNewInstBefore(NewOp, I);
+ return new CastInst(NewOp, I.getType());
+ }
+ }
+
return Changed ? &I : 0;
}
// this case, C1 == C2 and C1 is 8, 16, or 32.
if (ShiftAmt1 == ShiftAmt2) {
const Type *SExtType = 0;
- switch (ShiftAmt1) {
+ switch (Op0->getType()->getPrimitiveSizeInBits() - ShiftAmt1) {
case 8 : SExtType = Type::SByteTy; break;
case 16: SExtType = Type::ShortTy; break;
case 32: SExtType = Type::IntTy; break;
default: break;
case Intrinsic::ppc_altivec_lvx:
case Intrinsic::ppc_altivec_lvxl:
- // Turn lvx -> load if the pointer is known aligned.
+ case Intrinsic::x86_sse_loadu_ps:
+ case Intrinsic::x86_sse2_loadu_pd:
+ case Intrinsic::x86_sse2_loadu_dq:
+ // Turn PPC lvx -> load if the pointer is known aligned.
+ // Turn X86 loadups -> load if the pointer is known aligned.
if (GetKnownAlignment(II->getOperand(1), TD) >= 16) {
Value *Ptr = InsertCastBefore(II->getOperand(1),
PointerType::get(II->getType()), CI);
return new StoreInst(II->getOperand(1), Ptr);
}
break;
+ case Intrinsic::x86_sse_storeu_ps:
+ case Intrinsic::x86_sse2_storeu_pd:
+ case Intrinsic::x86_sse2_storeu_dq:
+ case Intrinsic::x86_sse2_storel_dq:
+ // Turn X86 storeu -> store if the pointer is known aligned.
+ if (GetKnownAlignment(II->getOperand(1), TD) >= 16) {
+ const Type *OpPtrTy = PointerType::get(II->getOperand(2)->getType());
+ Value *Ptr = InsertCastBefore(II->getOperand(1), OpPtrTy, CI);
+ return new StoreInst(II->getOperand(2), Ptr);
+ }
+ break;
case Intrinsic::ppc_altivec_vperm:
// Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
if (ConstantPacked *Mask = dyn_cast<ConstantPacked>(II->getOperand(3))) {
// Check to see if we are changing the return type...
if (OldRetTy != FT->getReturnType()) {
if (Callee->isExternal() &&
- !OldRetTy->isLosslesslyConvertibleTo(FT->getReturnType()) &&
- !Caller->use_empty())
+ !(OldRetTy->isLosslesslyConvertibleTo(FT->getReturnType()) ||
+ (isa<PointerType>(FT->getReturnType()) &&
+ TD->getIntPtrType()->isLosslesslyConvertibleTo(OldRetTy)))
+ && !Caller->use_empty())
return false; // Cannot transform this return value...
// If the callsite is an invoke instruction, and the return value is used by
Value *ScalarOp = IEI->getOperand(1);
Value *IdxOp = IEI->getOperand(2);
- if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
- if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
+ if (!isa<ConstantInt>(IdxOp))
+ return false;
+ unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getRawValue();
+
+ if (isa<UndefValue>(ScalarOp)) { // inserting undef into vector.
+ // Okay, we can handle this if the vector we are insertinting into is
+ // transitively ok.
+ if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
+ // If so, update the mask to reflect the inserted undef.
+ Mask[InsertedIdx] = UndefValue::get(Type::UIntTy);
+ return true;
+ }
+ } else if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)){
+ if (isa<ConstantInt>(EI->getOperand(1)) &&
EI->getOperand(0)->getType() == V->getType()) {
unsigned ExtractedIdx =
cast<ConstantInt>(EI->getOperand(1))->getRawValue();
- unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getRawValue();
// This must be extracting from either LHS or RHS.
if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) {