if ((N0.getValueType().getSizeInBits() & (EVTBits-1)) != 0)
return SDValue();
}
+
+ // If the shift amount is larger than the input type then we're not
+ // accessing any of the loaded bytes. If the load was a zextload/extload
+ // then the result of the shift+trunc is zero/undef (handled elsewhere).
+ // If the load was a sextload then the result is a splat of the sign bit
+ // of the extended byte. This is not worth optimizing for.
+ if (ShAmt >= VT.getSizeInBits())
+ return SDValue();
+
}
}
; RUN: llc < %s -march=x86 | \
; RUN: grep {s\[ah\]\[rl\]l} | count 1
-define i32* @test1(i32* %P, i32 %X) {
+define i32* @test1(i32* %P, i32 %X) nounwind {
%Y = lshr i32 %X, 2 ; <i32> [#uses=1]
%gep.upgrd.1 = zext i32 %Y to i64 ; <i64> [#uses=1]
%P2 = getelementptr i32* %P, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
ret i32* %P2
}
-define i32* @test2(i32* %P, i32 %X) {
+define i32* @test2(i32* %P, i32 %X) nounwind {
%Y = shl i32 %X, 2 ; <i32> [#uses=1]
%gep.upgrd.2 = zext i32 %Y to i64 ; <i64> [#uses=1]
%P2 = getelementptr i32* %P, i64 %gep.upgrd.2 ; <i32*> [#uses=1]
ret i32* %P2
}
-define i32* @test3(i32* %P, i32 %X) {
+define i32* @test3(i32* %P, i32 %X) nounwind {
%Y = ashr i32 %X, 2 ; <i32> [#uses=1]
%P2 = getelementptr i32* %P, i32 %Y ; <i32*> [#uses=1]
ret i32* %P2
store i32 %or, i32* @g_16
ret void
}
+
+; rdar://8494845 + PR8244
+; X64: test10:
+; X64-NEXT: movsbl (%rdi), %eax
+; X64-NEXT: shrl $8, %eax
+; X64-NEXT: ret
+define i8 @test10(i8* %P) nounwind ssp {
+entry:
+ %tmp = load i8* %P, align 1
+ %conv = sext i8 %tmp to i32
+ %shr3 = lshr i32 %conv, 8
+ %conv2 = trunc i32 %shr3 to i8
+ ret i8 %conv2
+}