P.Partitions.push_back(New);
}
- bool handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) {
+ bool handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset,
+ bool IsVolatile) {
uint64_t Size = TD.getTypeStoreSize(Ty);
// If this memory access can be shown to *statically* extend outside the
return true;
}
- insertUse(I, Offset, Size);
+ // We allow splitting of loads and stores where the type is an integer type
+ // and which cover the entire alloca. Such integer loads and stores
+ // often require decomposition into fine grained loads and stores.
+ bool IsSplittable = false;
+ if (IntegerType *ITy = dyn_cast<IntegerType>(Ty))
+ IsSplittable = !IsVolatile && ITy->getBitWidth() == AllocSize*8;
+
+ insertUse(I, Offset, Size, IsSplittable);
return true;
}
bool visitLoadInst(LoadInst &LI) {
assert((!LI.isSimple() || LI.getType()->isSingleValueType()) &&
"All simple FCA loads should have been pre-split");
- return handleLoadOrStore(LI.getType(), LI, Offset);
+ return handleLoadOrStore(LI.getType(), LI, Offset, LI.isVolatile());
}
bool visitStoreInst(StoreInst &SI) {
assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) &&
"All simple FCA stores should have been pre-split");
- return handleLoadOrStore(ValOp->getType(), SI, Offset);
+ return handleLoadOrStore(ValOp->getType(), SI, Offset, SI.isVolatile());
}
UserTy = LI->getType();
} else if (StoreInst *SI = dyn_cast<StoreInst>(UI->U->getUser())) {
UserTy = SI->getValueOperand()->getType();
+ } else {
+ return 0; // Bail if we have weird uses.
+ }
+
+ if (IntegerType *ITy = dyn_cast<IntegerType>(UserTy)) {
+ // If the type is larger than the partition, skip it. We only encounter
+ // this for split integer operations where we want to use the type of the
+ // entity causing the split.
+ if (ITy->getBitWidth() > (I->EndOffset - I->BeginOffset)*8)
+ continue;
+
+ // If we have found an integer type use covering the alloca, use that
+ // regardless of the other types, as integers are often used for a "bucket
+ // of bits" type.
+ return ITy;
}
if (Ty && Ty != UserTy)
if (SizeInBits != TD.getTypeStoreSizeInBits(AllocaTy))
return false;
+ // We need to ensure that an integer type with the appropriate bitwidth can
+ // be converted to the alloca type, whatever that is. We don't want to force
+ // the alloca itself to have an integer type if there is a more suitable one.
+ Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits);
+ if (!canConvertValue(TD, AllocaTy, IntTy) ||
+ !canConvertValue(TD, IntTy, AllocaTy))
+ return false;
+
uint64_t Size = TD.getTypeStoreSize(AllocaTy);
// Check the uses to ensure the uses are (likely) promoteable integer uses.
if (VecTy)
return rewriteVectorizedLoadInst(IRB, LI, OldOp);
+
+ uint64_t Size = EndOffset - BeginOffset;
+ if (Size < TD.getTypeStoreSize(LI.getType())) {
+ assert(!LI.isVolatile());
+ assert(LI.getType()->isIntegerTy() &&
+ "Only integer type loads and stores are split");
+ assert(LI.getType()->getIntegerBitWidth() ==
+ TD.getTypeStoreSizeInBits(LI.getType()) &&
+ "Non-byte-multiple bit width");
+ assert(LI.getType()->getIntegerBitWidth() ==
+ TD.getTypeSizeInBits(OldAI.getAllocatedType()) &&
+ "Only alloca-wide loads can be split and recomposed");
+ IntegerType *NarrowTy = Type::getIntNTy(LI.getContext(), Size * 8);
+ bool IsConvertable = (BeginOffset - NewAllocaBeginOffset == 0) &&
+ canConvertValue(TD, NewAllocaTy, NarrowTy);
+ Value *V;
+ // Move the insertion point just past the load so that we can refer to it.
+ IRB.SetInsertPoint(llvm::next(BasicBlock::iterator(&LI)));
+ if (IsConvertable)
+ V = convertValue(TD, IRB,
+ IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
+ getName(".load")),
+ NarrowTy);
+ else
+ V = IRB.CreateAlignedLoad(
+ getAdjustedAllocaPtr(IRB, NarrowTy->getPointerTo()),
+ getPartitionTypeAlign(NarrowTy), getName(".load"));
+ // Create a placeholder value with the same type as LI to use as the
+ // basis for the new value. This allows us to replace the uses of LI with
+ // the computed value, and then replace the placeholder with LI, leaving
+ // LI only used for this computation.
+ Value *Placeholder
+ = IRB.CreateLoad(UndefValue::get(LI.getType()->getPointerTo()));
+ V = insertInteger(TD, IRB, Placeholder, V, BeginOffset,
+ getName(".insert"));
+ LI.replaceAllUsesWith(V);
+ Placeholder->replaceAllUsesWith(&LI);
+ cast<Instruction>(Placeholder)->eraseFromParent();
+ if (Pass.DeadSplitInsts.insert(&LI))
+ Pass.DeadInsts.push_back(&LI);
+ DEBUG(dbgs() << " to: " << *V << "\n");
+ return IsConvertable;
+ }
+
if (IntTy && LI.getType()->isIntegerTy())
return rewriteIntegerLoad(IRB, LI);
if (VecTy)
return rewriteVectorizedStoreInst(IRB, SI, OldOp);
Type *ValueTy = SI.getValueOperand()->getType();
+
+ uint64_t Size = EndOffset - BeginOffset;
+ if (Size < TD.getTypeStoreSize(ValueTy)) {
+ assert(!SI.isVolatile());
+ assert(ValueTy->isIntegerTy() &&
+ "Only integer type loads and stores are split");
+ assert(ValueTy->getIntegerBitWidth() ==
+ TD.getTypeStoreSizeInBits(ValueTy) &&
+ "Non-byte-multiple bit width");
+ assert(ValueTy->getIntegerBitWidth() ==
+ TD.getTypeSizeInBits(OldAI.getAllocatedType()) &&
+ "Only alloca-wide stores can be split and recomposed");
+ IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), Size * 8);
+ Value *V = extractInteger(TD, IRB, SI.getValueOperand(), NarrowTy,
+ BeginOffset, getName(".extract"));
+ StoreInst *NewSI;
+ bool IsConvertable = (BeginOffset - NewAllocaBeginOffset == 0) &&
+ canConvertValue(TD, NarrowTy, NewAllocaTy);
+ if (IsConvertable)
+ NewSI = IRB.CreateAlignedStore(convertValue(TD, IRB, V, NewAllocaTy),
+ &NewAI, NewAI.getAlignment());
+ else
+ NewSI = IRB.CreateAlignedStore(
+ V, getAdjustedAllocaPtr(IRB, NarrowTy->getPointerTo()),
+ getPartitionTypeAlign(NarrowTy));
+ (void)NewSI;
+ if (Pass.DeadSplitInsts.insert(&SI))
+ Pass.DeadInsts.push_back(&SI);
+
+ DEBUG(dbgs() << " to: " << *NewSI << "\n");
+ return IsConvertable;
+ }
+
if (IntTy && ValueTy->isIntegerTy())
return rewriteIntegerStore(IRB, SI);
uint64_t Offset, uint64_t Size) {
if (Offset == 0 && TD.getTypeAllocSize(Ty) == Size)
return stripAggregateTypeWrapping(TD, Ty);
+ if (Offset > TD.getTypeAllocSize(Ty) ||
+ (TD.getTypeAllocSize(Ty) - Offset) < Size)
+ return 0;
if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) {
// We can't partition pointers...
Instruction *I = DeadInsts.pop_back_val();
DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
+ I->replaceAllUsesWith(UndefValue::get(I->getType()));
+
for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
if (Instruction *U = dyn_cast<Instruction>(*OI)) {
// Zero out the operand and see if it becomes trivially dead.
%ai = load i24* %aiptr
; CHCEK-NOT: store
; CHCEK-NOT: load
-; CHECK: %[[mask0:.*]] = and i24 undef, -256
-; CHECK-NEXT: %[[mask1:.*]] = and i24 %[[mask0]], -65281
-; CHECK-NEXT: %[[mask2:.*]] = and i24 %[[mask1]], 65535
+; CHECK: %[[ext2:.*]] = zext i8 0 to i24
+; CHECK-NEXT: %[[shift2:.*]] = shl i24 %[[ext2]], 16
+; CHECK-NEXT: %[[mask2:.*]] = and i24 undef, 65535
+; CHECK-NEXT: %[[insert2:.*]] = or i24 %[[mask2]], %[[shift2]]
+; CHECK-NEXT: %[[ext1:.*]] = zext i8 0 to i24
+; CHECK-NEXT: %[[shift1:.*]] = shl i24 %[[ext1]], 8
+; CHECK-NEXT: %[[mask1:.*]] = and i24 %[[insert2]], -65281
+; CHECK-NEXT: %[[insert1:.*]] = or i24 %[[mask1]], %[[shift1]]
+; CHECK-NEXT: %[[ext0:.*]] = zext i8 0 to i24
+; CHECK-NEXT: %[[mask0:.*]] = and i24 %[[insert1]], -256
+; CHECK-NEXT: %[[insert0:.*]] = or i24 %[[mask0]], %[[ext0]]
%biptr = bitcast [3 x i8]* %b to i24*
store i24 %ai, i24* %biptr
%b2 = load i8* %b2ptr
; CHCEK-NOT: store
; CHCEK-NOT: load
-; CHECK: %[[trunc0:.*]] = trunc i24 %[[mask2]] to i8
-; CHECK-NEXT: %[[shift1:.*]] = lshr i24 %[[mask2]], 8
+; CHECK: %[[trunc0:.*]] = trunc i24 %[[insert0]] to i8
+; CHECK-NEXT: %[[shift1:.*]] = lshr i24 %[[insert0]], 8
; CHECK-NEXT: %[[trunc1:.*]] = trunc i24 %[[shift1]] to i8
-; CHECK-NEXT: %[[shift2:.*]] = lshr i24 %[[mask2]], 16
+; CHECK-NEXT: %[[shift2:.*]] = lshr i24 %[[insert0]], 16
; CHECK-NEXT: %[[trunc2:.*]] = trunc i24 %[[shift2]] to i8
%bsum0 = add i8 %b0, %b1
ret void
}
+define i64 @PR14059.2({ float, float }* %phi) {
+; Check that SROA can split up alloca-wide integer loads and stores where the
+; underlying alloca has smaller components that are accessed independently. This
+; shows up particularly with ABI lowering patterns coming out of Clang that rely
+; on the particular register placement of a single large integer return value.
+; CHECK: @PR14059.2
+
+entry:
+ %retval = alloca { float, float }, align 4
+ ; CHECK-NOT: alloca
+
+ %0 = bitcast { float, float }* %retval to i64*
+ store i64 0, i64* %0
+ ; CHECK-NOT: store
+
+ %phi.realp = getelementptr inbounds { float, float }* %phi, i32 0, i32 0
+ %phi.real = load float* %phi.realp
+ %phi.imagp = getelementptr inbounds { float, float }* %phi, i32 0, i32 1
+ %phi.imag = load float* %phi.imagp
+ ; CHECK: %[[realp:.*]] = getelementptr inbounds { float, float }* %phi, i32 0, i32 0
+ ; CHECK-NEXT: %[[real:.*]] = load float* %[[realp]]
+ ; CHECK-NEXT: %[[imagp:.*]] = getelementptr inbounds { float, float }* %phi, i32 0, i32 1
+ ; CHECK-NEXT: %[[imag:.*]] = load float* %[[imagp]]
+
+ %real = getelementptr inbounds { float, float }* %retval, i32 0, i32 0
+ %imag = getelementptr inbounds { float, float }* %retval, i32 0, i32 1
+ store float %phi.real, float* %real
+ store float %phi.imag, float* %imag
+ ; CHECK-NEXT: %[[imag_convert:.*]] = bitcast float %[[imag]] to i32
+ ; CHECK-NEXT: %[[imag_ext:.*]] = zext i32 %[[imag_convert]] to i64
+ ; CHECK-NEXT: %[[imag_shift:.*]] = shl i64 %[[imag_ext]], 32
+ ; CHECK-NEXT: %[[imag_mask:.*]] = and i64 undef, 4294967295
+ ; CHECK-NEXT: %[[imag_insert:.*]] = or i64 %[[imag_mask]], %[[imag_shift]]
+ ; CHECK-NEXT: %[[real_convert:.*]] = bitcast float %[[real]] to i32
+ ; CHECK-NEXT: %[[real_ext:.*]] = zext i32 %[[real_convert]] to i64
+ ; CHECK-NEXT: %[[real_mask:.*]] = and i64 %[[imag_insert]], -4294967296
+ ; CHECK-NEXT: %[[real_insert:.*]] = or i64 %[[real_mask]], %[[real_ext]]
+
+ %1 = load i64* %0, align 1
+ ret i64 %1
+ ; CHECK-NEXT: ret i64 %[[real_insert]]
+}
+
define void @PR14105({ [16 x i8] }* %ptr) {
; Ensure that when rewriting the GEP index '-1' for this alloca we preserve is
; sign as negative. We use a volatile memcpy to ensure promotion never actually
%ai = load i24* %aiptr
; CHCEK-NOT: store
; CHCEK-NOT: load
-; CHECK: %[[mask0:.*]] = and i24 undef, 65535
-; CHECK-NEXT: %[[mask1:.*]] = and i24 %[[mask0]], -65281
-; CHECK-NEXT: %[[mask2:.*]] = and i24 %[[mask1]], -256
+; CHECK: %[[ext2:.*]] = zext i8 0 to i24
+; CHECK-NEXT: %[[mask2:.*]] = and i24 undef, -256
+; CHECK-NEXT: %[[insert2:.*]] = or i24 %[[mask2]], %[[ext2]]
+; CHECK-NEXT: %[[ext1:.*]] = zext i8 0 to i24
+; CHECK-NEXT: %[[shift1:.*]] = shl i24 %[[ext1]], 8
+; CHECK-NEXT: %[[mask1:.*]] = and i24 %[[insert2]], -65281
+; CHECK-NEXT: %[[insert1:.*]] = or i24 %[[mask1]], %[[shift1]]
+; CHECK-NEXT: %[[ext0:.*]] = zext i8 0 to i24
+; CHECK-NEXT: %[[shift0:.*]] = shl i24 %[[ext0]], 16
+; CHECK-NEXT: %[[mask0:.*]] = and i24 %[[insert1]], 65535
+; CHECK-NEXT: %[[insert0:.*]] = or i24 %[[mask0]], %[[shift0]]
%biptr = bitcast [3 x i8]* %b to i24*
store i24 %ai, i24* %biptr
%b2 = load i8* %b2ptr
; CHCEK-NOT: store
; CHCEK-NOT: load
-; CHECK: %[[shift0:.*]] = lshr i24 %[[mask2]], 16
+; CHECK: %[[shift0:.*]] = lshr i24 %[[insert0]], 16
; CHECK-NEXT: %[[trunc0:.*]] = trunc i24 %[[shift0]] to i8
-; CHECK-NEXT: %[[shift1:.*]] = lshr i24 %[[mask2]], 8
+; CHECK-NEXT: %[[shift1:.*]] = lshr i24 %[[insert0]], 8
; CHECK-NEXT: %[[trunc1:.*]] = trunc i24 %[[shift1]] to i8
-; CHECK-NEXT: %[[trunc2:.*]] = trunc i24 %[[mask2]] to i8
+; CHECK-NEXT: %[[trunc2:.*]] = trunc i24 %[[insert0]] to i8
%bsum0 = add i8 %b0, %b1
%bsum1 = add i8 %bsum0, %b2
%a0i16ptr = bitcast i8* %a0ptr to i16*
store i16 1, i16* %a0i16ptr
-; CHECK: %[[mask:.*]] = and i56 undef, 1099511627775
-; CHECK-NEXT: %[[or:.*]] = or i56 %[[mask]], 1099511627776
+; CHECK: %[[mask0:.*]] = and i16 1, -16
%a1i4ptr = bitcast i8* %a1ptr to i4*
store i4 1, i4* %a1i4ptr
-; CHECK: %[[mask:.*]] = and i56 %[[or]], -16492674416641
-; CHECK-NEXT: %[[or:.*]] = or i56 %[[mask]], 1099511627776
+; CHECK-NEXT: %[[insert0:.*]] = or i16 %[[mask0]], 1
store i8 1, i8* %a2ptr
-; CHECK-NEXT: %[[mask:.*]] = and i56 %[[or]], -1095216660481
-; CHECK-NEXT: %[[or:.*]] = or i56 %[[mask]], 4294967296
+; CHECK-NEXT: %[[mask1:.*]] = and i40 undef, 4294967295
+; CHECK-NEXT: %[[insert1:.*]] = or i40 %[[mask1]], 4294967296
%a3i24ptr = bitcast i8* %a3ptr to i24*
store i24 1, i24* %a3i24ptr
-; CHECK-NEXT: %[[mask:.*]] = and i56 %[[or]], -4294967041
-; CHECK-NEXT: %[[or:.*]] = or i56 %[[mask]], 256
+; CHECK-NEXT: %[[mask2:.*]] = and i40 %[[insert1]], -4294967041
+; CHECK-NEXT: %[[insert2:.*]] = or i40 %[[mask2]], 256
%a2i40ptr = bitcast i8* %a2ptr to i40*
store i40 1, i40* %a2i40ptr
-; CHECK-NEXT: %[[mask:.*]] = and i56 %[[or]], -1099511627776
-; CHECK-NEXT: %[[or:.*]] = or i56 %[[mask]], 1
+; CHECK-NEXT: %[[ext3:.*]] = zext i40 1 to i56
+; CHECK-NEXT: %[[mask3:.*]] = and i56 undef, -1099511627776
+; CHECK-NEXT: %[[insert3:.*]] = or i56 %[[mask3]], %[[ext3]]
; CHCEK-NOT: store
; CHCEK-NOT: load
%ai = load i56* %aiptr
%ret = zext i56 %ai to i64
ret i64 %ret
-; CHECK: %[[ret:.*]] = zext i56 %[[or]] to i64
+; CHECK-NEXT: %[[ext4:.*]] = zext i16 %[[insert0]] to i56
+; CHECK-NEXT: %[[shift4:.*]] = shl i56 %[[ext4]], 40
+; CHECK-NEXT: %[[mask4:.*]] = and i56 %[[insert3]], 1099511627775
+; CHECK-NEXT: %[[insert4:.*]] = or i56 %[[mask4]], %[[shift4]]
+; CHECK-NEXT: %[[ret:.*]] = zext i56 %[[insert4]] to i64
; CHECK-NEXT: ret i64 %[[ret]]
}