From: Juergen Ributzka Date: Mon, 4 Aug 2014 21:49:51 +0000 (+0000) Subject: [FastISel][AArch64] Fix shift lowering for i8 and i16 value types. X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=2c68cde70178838e0603e12978d64c57b9bfa1ab;p=oota-llvm.git [FastISel][AArch64] Fix shift lowering for i8 and i16 value types. This fix changes the parameters #r and #s that are passed to the UBFM/SBFM instruction to get the zero/sign-extension for free. The original problem was that the shift left would use the 32-bit shift even for i8/i16 value types, which could leave the upper bits set with "garbage" values. The arithmetic shift right on the other side would use the wrong MSB as sign-bit to determine what bits to shift into the value. This fixes . git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@214788 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/AArch64/AArch64FastISel.cpp b/lib/Target/AArch64/AArch64FastISel.cpp index dbb9a050d52..aacc13ea8c6 100644 --- a/lib/Target/AArch64/AArch64FastISel.cpp +++ b/lib/Target/AArch64/AArch64FastISel.cpp @@ -2202,14 +2202,16 @@ unsigned AArch64FastISel::Emit_LSL_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, switch (RetVT.SimpleTy) { default: return 0; case MVT::i8: + Opc = AArch64::UBFMWri; ImmR = -Shift % 32; ImmS = 7 - Shift; break; case MVT::i16: + Opc = AArch64::UBFMWri; ImmR = -Shift % 32; ImmS = 15 - Shift; break; case MVT::i32: - RetVT = MVT::i32; Opc = AArch64::UBFMWri; ImmR = -Shift % 32; ImmS = 31 - Shift; break; case MVT::i64: Opc = AArch64::UBFMXri; ImmR = -Shift % 64; ImmS = 63 - Shift; break; } + RetVT.SimpleTy = std::max(MVT::i32, RetVT.SimpleTy); return FastEmitInst_rii(Opc, TLI.getRegClassFor(RetVT), Op0, Op0IsKill, ImmR, ImmS); } @@ -2219,15 +2221,13 @@ unsigned AArch64FastISel::Emit_LSR_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Opc, ImmS; switch (RetVT.SimpleTy) { default: return 0; - case MVT::i8: - case MVT::i16: - case MVT::i32: - RetVT = MVT::i32; - Opc = AArch64::UBFMWri; ImmS = 31; break; - case MVT::i64: - Opc = AArch64::UBFMXri; ImmS = 63; break; + case MVT::i8: Opc = AArch64::UBFMWri; ImmS = 7; break; + case MVT::i16: Opc = AArch64::UBFMWri; ImmS = 15; break; + case MVT::i32: Opc = AArch64::UBFMWri; ImmS = 31; break; + case MVT::i64: Opc = AArch64::UBFMXri; ImmS = 63; break; } + RetVT.SimpleTy = std::max(MVT::i32, RetVT.SimpleTy); return FastEmitInst_rii(Opc, TLI.getRegClassFor(RetVT), Op0, Op0IsKill, Shift, ImmS); } @@ -2237,15 +2237,13 @@ unsigned AArch64FastISel::Emit_ASR_ri(MVT RetVT, unsigned Op0, bool Op0IsKill, unsigned Opc, ImmS; switch (RetVT.SimpleTy) { default: return 0; - case MVT::i8: - case MVT::i16: - case MVT::i32: - RetVT = MVT::i32; - Opc = AArch64::SBFMWri; ImmS = 31; break; - case MVT::i64: - Opc = AArch64::SBFMXri; ImmS = 63; break; + case MVT::i8: Opc = AArch64::SBFMWri; ImmS = 7; break; + case MVT::i16: Opc = AArch64::SBFMWri; ImmS = 15; break; + case MVT::i32: Opc = AArch64::SBFMWri; ImmS = 31; break; + case MVT::i64: Opc = AArch64::SBFMXri; ImmS = 63; break; } + RetVT.SimpleTy = std::max(MVT::i32, RetVT.SimpleTy); return FastEmitInst_rii(Opc, TLI.getRegClassFor(RetVT), Op0, Op0IsKill, Shift, ImmS); } diff --git a/test/CodeGen/AArch64/fast-isel-shift.ll b/test/CodeGen/AArch64/fast-isel-shift.ll index 35868ee384f..8f670b4ba20 100644 --- a/test/CodeGen/AArch64/fast-isel-shift.ll +++ b/test/CodeGen/AArch64/fast-isel-shift.ll @@ -1,14 +1,14 @@ ; RUN: llc -fast-isel -fast-isel-abort -mtriple=arm64-apple-darwin < %s | FileCheck %s ; CHECK-LABEL: lsl_i8 -; CHECK: lsl {{w[0-9]*}}, {{w[0-9]*}}, #4 +; CHECK: ubfiz {{w[0-9]*}}, {{w[0-9]*}}, #4, #4 define zeroext i8 @lsl_i8(i8 %a) { %1 = shl i8 %a, 4 ret i8 %1 } ; CHECK-LABEL: lsl_i16 -; CHECK: lsl {{w[0-9]*}}, {{w[0-9]*}}, #8 +; CHECK: ubfiz {{w[0-9]*}}, {{w[0-9]*}}, #8, #8 define zeroext i16 @lsl_i16(i16 %a) { %1 = shl i16 %a, 8 ret i16 %1 @@ -30,14 +30,14 @@ define i64 @lsl_i64(i64 %a) { } ; CHECK-LABEL: lsr_i8 -; CHECK: lsr {{w[0-9]*}}, {{w[0-9]*}}, #4 +; CHECK: ubfx {{w[0-9]*}}, {{w[0-9]*}}, #4, #4 define zeroext i8 @lsr_i8(i8 %a) { %1 = lshr i8 %a, 4 ret i8 %1 } ; CHECK-LABEL: lsr_i16 -; CHECK: lsr {{w[0-9]*}}, {{w[0-9]*}}, #8 +; CHECK: ubfx {{w[0-9]*}}, {{w[0-9]*}}, #8, #8 define zeroext i16 @lsr_i16(i16 %a) { %1 = lshr i16 %a, 8 ret i16 %1 @@ -59,14 +59,14 @@ define i64 @lsr_i64(i64 %a) { } ; CHECK-LABEL: asr_i8 -; CHECK: asr {{w[0-9]*}}, {{w[0-9]*}}, #4 +; CHECK: sbfx {{w[0-9]*}}, {{w[0-9]*}}, #4, #4 define zeroext i8 @asr_i8(i8 %a) { %1 = ashr i8 %a, 4 ret i8 %1 } ; CHECK-LABEL: asr_i16 -; CHECK: asr {{w[0-9]*}}, {{w[0-9]*}}, #8 +; CHECK: sbfx {{w[0-9]*}}, {{w[0-9]*}}, #8, #8 define zeroext i16 @asr_i16(i16 %a) { %1 = ashr i16 %a, 8 ret i16 %1 @@ -87,3 +87,13 @@ define i64 @asr_i64(i64 %a) { ret i64 %1 } +; CHECK-LABEL: shift_test1 +; CHECK: ubfiz {{w[0-9]*}}, {{w[0-9]*}}, #4, #4 +; CHECK-NEXT: sbfx {{w[0-9]*}}, {{w[0-9]*}}, #4, #4 +define i32 @shift_test1(i8 %a) { + %1 = shl i8 %a, 4 + %2 = ashr i8 %1, 4 + %3 = sext i8 %2 to i32 + ret i32 %3 +} +