From f008b991205b9757498558e42fe6072e08307049 Mon Sep 17 00:00:00 2001 From: Andrea Di Biagio Date: Fri, 2 Oct 2015 12:45:37 +0000 Subject: [PATCH] [FastISel][x86] Teach how to select SSE2/AVX bitcasts between 128/256-bit vector types. This patch teaches FastIsel the following two things: 1) On SSE2, no instructions are needed for bitcasts between 128-bit vector types; 2) On AVX, no instructions are needed for bitcasts between 256-bit vector types. Example: %1 = bitcast <4 x i31> %V to <2 x i64> Before (-fast-isel -fast-isel-abort=1): FastIsel miss: %1 = bitcast <4 x i31> %V to <2 x i64> Now we don't fall back to SelectionDAG and we correctly fold that computation propagating the register associated to %V. Differential Revision: http://reviews.llvm.org/D13347 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@249121 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86FastISel.cpp | 24 ++ test/CodeGen/X86/fast-isel-bitcasts-avx.ll | 244 ++++++++++++++++++++ test/CodeGen/X86/fast-isel-bitcasts.ll | 245 +++++++++++++++++++++ 3 files changed, 513 insertions(+) create mode 100644 test/CodeGen/X86/fast-isel-bitcasts-avx.ll create mode 100644 test/CodeGen/X86/fast-isel-bitcasts.ll diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index 5eabd7a756f..17704da91c2 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -3234,6 +3234,30 @@ X86FastISel::fastSelectInstruction(const Instruction *I) { updateValueMap(I, Reg); return true; } + case Instruction::BitCast: { + // Select SSE2/AVX bitcasts between 128/256 bit vector types. + if (!Subtarget->hasSSE2()) + return false; + + EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType()); + EVT DstVT = TLI.getValueType(DL, I->getType()); + + if (!SrcVT.isSimple() || !DstVT.isSimple()) + return false; + + if (!SrcVT.is128BitVector() && + !(Subtarget->hasAVX() && SrcVT.is256BitVector())) + return false; + + unsigned Reg = getRegForValue(I->getOperand(0)); + if (Reg == 0) + return false; + + // No instruction is needed for conversion. Reuse the register used by + // the fist operand. + updateValueMap(I, Reg); + return true; + } } return false; diff --git a/test/CodeGen/X86/fast-isel-bitcasts-avx.ll b/test/CodeGen/X86/fast-isel-bitcasts-avx.ll new file mode 100644 index 00000000000..03cefbc8682 --- /dev/null +++ b/test/CodeGen/X86/fast-isel-bitcasts-avx.ll @@ -0,0 +1,244 @@ +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -fast-isel -fast-isel-abort=1 -asm-verbose=0 | FileCheck %s +; +; Bitcasts between 256-bit vector types are no-ops since no instruction is +; needed for the conversion. + +define <4 x i64> @v8i32_to_v4i64(<8 x i32> %a) { +;CHECK-LABEL: v8i32_to_v4i64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x i32> %a to <4 x i64> + ret <4 x i64> %1 +} + +define <4 x i64> @v16i16_to_v4i64(<16 x i16> %a) { +;CHECK-LABEL: v16i16_to_v4i64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <16 x i16> %a to <4 x i64> + ret <4 x i64> %1 +} + +define <4 x i64> @v32i8_to_v4i64(<32 x i8> %a) { +;CHECK-LABEL: v32i8_to_v4i64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <32 x i8> %a to <4 x i64> + ret <4 x i64> %1 +} + +define <4 x i64> @v4f64_to_v4i64(<4 x double> %a) { +;CHECK-LABEL: v4f64_to_v4i64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x double> %a to <4 x i64> + ret <4 x i64> %1 +} + +define <4 x i64> @v8f32_to_v4i64(<8 x float> %a) { +;CHECK-LABEL: v8f32_to_v4i64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x float> %a to <4 x i64> + ret <4 x i64> %1 +} + +define <8 x i32> @v4i64_to_v8i32(<4 x i64> %a) { +;CHECK-LABEL: v4i64_to_v8i32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x i64> %a to <8 x i32> + ret <8 x i32> %1 +} + +define <8 x i32> @v16i16_to_v8i32(<16 x i16> %a) { +;CHECK-LABEL: v16i16_to_v8i32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <16 x i16> %a to <8 x i32> + ret <8 x i32> %1 +} + +define <8 x i32> @v32i8_to_v8i32(<32 x i8> %a) { +;CHECK-LABEL: v32i8_to_v8i32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <32 x i8> %a to <8 x i32> + ret <8 x i32> %1 +} + +define <8 x i32> @v4f64_to_v8i32(<4 x double> %a) { +;CHECK-LABEL: v4f64_to_v8i32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x double> %a to <8 x i32> + ret <8 x i32> %1 +} + +define <8 x i32> @v8f32_to_v8i32(<8 x float> %a) { +;CHECK-LABEL: v8f32_to_v8i32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x float> %a to <8 x i32> + ret <8 x i32> %1 +} + +define <16 x i16> @v4i64_to_v16i16(<4 x i64> %a) { +;CHECK-LABEL: v4i64_to_v16i16: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x i64> %a to <16 x i16> + ret <16 x i16> %1 +} + +define <16 x i16> @v8i32_to_v16i16(<8 x i32> %a) { +;CHECK-LABEL: v8i32_to_v16i16: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x i32> %a to <16 x i16> + ret <16 x i16> %1 +} + +define <16 x i16> @v32i8_to_v16i16(<32 x i8> %a) { +;CHECK-LABEL: v32i8_to_v16i16: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <32 x i8> %a to <16 x i16> + ret <16 x i16> %1 +} + +define <16 x i16> @v4f64_to_v16i16(<4 x double> %a) { +;CHECK-LABEL: v4f64_to_v16i16: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x double> %a to <16 x i16> + ret <16 x i16> %1 +} + +define <16 x i16> @v8f32_to_v16i16(<8 x float> %a) { +;CHECK-LABEL: v8f32_to_v16i16: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x float> %a to <16 x i16> + ret <16 x i16> %1 +} + +define <32 x i8> @v16i16_to_v32i8(<16 x i16> %a) { +;CHECK-LABEL: v16i16_to_v32i8: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <16 x i16> %a to <32 x i8> + ret <32 x i8> %1 +} + +define <32 x i8> @v4i64_to_v32i8(<4 x i64> %a) { +;CHECK-LABEL: v4i64_to_v32i8: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x i64> %a to <32 x i8> + ret <32 x i8> %1 +} + +define <32 x i8> @v8i32_to_v32i8(<8 x i32> %a) { +;CHECK-LABEL: v8i32_to_v32i8: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x i32> %a to <32 x i8> + ret <32 x i8> %1 +} + +define <32 x i8> @v4f64_to_v32i8(<4 x double> %a) { +;CHECK-LABEL: v4f64_to_v32i8: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x double> %a to <32 x i8> + ret <32 x i8> %1 +} + +define <32 x i8> @v8f32_to_v32i8(<8 x float> %a) { +;CHECK-LABEL: v8f32_to_v32i8: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x float> %a to <32 x i8> + ret <32 x i8> %1 +} + +define <8 x float> @v32i8_to_v8f32(<32 x i8> %a) { +;CHECK-LABEL: v32i8_to_v8f32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <32 x i8> %a to <8 x float> + ret <8 x float> %1 +} + +define <8 x float> @v16i16_to_v8f32(<16 x i16> %a) { +;CHECK-LABEL: v16i16_to_v8f32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <16 x i16> %a to <8 x float> + ret <8 x float> %1 +} + +define <8 x float> @v4i64_to_v8f32(<4 x i64> %a) { +;CHECK-LABEL: v4i64_to_v8f32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x i64> %a to <8 x float> + ret <8 x float> %1 +} + +define <8 x float> @v8i32_to_v8f32(<8 x i32> %a) { +;CHECK-LABEL: v8i32_to_v8f32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x i32> %a to <8 x float> + ret <8 x float> %1 +} + +define <8 x float> @v4f64_to_v8f32(<4 x double> %a) { +;CHECK-LABEL: v4f64_to_v8f32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x double> %a to <8 x float> + ret <8 x float> %1 +} + +define <4 x double> @v8f32_to_v4f64(<8 x float> %a) { +;CHECK-LABEL: v8f32_to_v4f64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x float> %a to <4 x double> + ret <4 x double> %1 +} + +define <4 x double> @v32i8_to_v4f64(<32 x i8> %a) { +;CHECK-LABEL: v32i8_to_v4f64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <32 x i8> %a to <4 x double> + ret <4 x double> %1 +} + +define <4 x double> @v16i16_to_v4f64(<16 x i16> %a) { +;CHECK-LABEL: v16i16_to_v4f64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <16 x i16> %a to <4 x double> + ret <4 x double> %1 +} + +define <4 x double> @v4i64_to_v4f64(<4 x i64> %a) { +;CHECK-LABEL: v4i64_to_v4f64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x i64> %a to <4 x double> + ret <4 x double> %1 +} + +define <4 x double> @v8i32_to_v4f64(<8 x i32> %a) { +;CHECK-LABEL: v8i32_to_v4f64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x i32> %a to <4 x double> + ret <4 x double> %1 +} diff --git a/test/CodeGen/X86/fast-isel-bitcasts.ll b/test/CodeGen/X86/fast-isel-bitcasts.ll new file mode 100644 index 00000000000..892b517fe87 --- /dev/null +++ b/test/CodeGen/X86/fast-isel-bitcasts.ll @@ -0,0 +1,245 @@ +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -fast-isel -fast-isel-abort=1 -asm-verbose=0 | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -fast-isel -fast-isel-abort=1 -asm-verbose=0 | FileCheck %s +; +; Bitcasts between 128-bit vector types are no-ops since no instruction is +; needed for the conversion. + +define <2 x i64> @v4i32_to_v2i64(<4 x i32> %a) { +;CHECK-LABEL: v4i32_to_v2i64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x i32> %a to <2 x i64> + ret <2 x i64> %1 +} + +define <2 x i64> @v8i16_to_v2i64(<8 x i16> %a) { +;CHECK-LABEL: v8i16_to_v2i64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x i16> %a to <2 x i64> + ret <2 x i64> %1 +} + +define <2 x i64> @v16i8_to_v2i64(<16 x i8> %a) { +;CHECK-LABEL: v16i8_to_v2i64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <16 x i8> %a to <2 x i64> + ret <2 x i64> %1 +} + +define <2 x i64> @v2f64_to_v2i64(<2 x double> %a) { +;CHECK-LABEL: v2f64_to_v2i64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <2 x double> %a to <2 x i64> + ret <2 x i64> %1 +} + +define <2 x i64> @v4f32_to_v2i64(<4 x float> %a) { +;CHECK-LABEL: v4f32_to_v2i64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x float> %a to <2 x i64> + ret <2 x i64> %1 +} + +define <4 x i32> @v2i64_to_v4i32(<2 x i64> %a) { +;CHECK-LABEL: v2i64_to_v4i32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <2 x i64> %a to <4 x i32> + ret <4 x i32> %1 +} + +define <4 x i32> @v8i16_to_v4i32(<8 x i16> %a) { +;CHECK-LABEL: v8i16_to_v4i32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x i16> %a to <4 x i32> + ret <4 x i32> %1 +} + +define <4 x i32> @v16i8_to_v4i32(<16 x i8> %a) { +;CHECK-LABEL: v16i8_to_v4i32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <16 x i8> %a to <4 x i32> + ret <4 x i32> %1 +} + +define <4 x i32> @v2f64_to_v4i32(<2 x double> %a) { +;CHECK-LABEL: v2f64_to_v4i32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <2 x double> %a to <4 x i32> + ret <4 x i32> %1 +} + +define <4 x i32> @v4f32_to_v4i32(<4 x float> %a) { +;CHECK-LABEL: v4f32_to_v4i32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x float> %a to <4 x i32> + ret <4 x i32> %1 +} + +define <8 x i16> @v2i64_to_v8i16(<2 x i64> %a) { +;CHECK-LABEL: v2i64_to_v8i16: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <2 x i64> %a to <8 x i16> + ret <8 x i16> %1 +} + +define <8 x i16> @v4i32_to_v8i16(<4 x i32> %a) { +;CHECK-LABEL: v4i32_to_v8i16: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x i32> %a to <8 x i16> + ret <8 x i16> %1 +} + +define <8 x i16> @v16i8_to_v8i16(<16 x i8> %a) { +;CHECK-LABEL: v16i8_to_v8i16: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <16 x i8> %a to <8 x i16> + ret <8 x i16> %1 +} + +define <8 x i16> @v2f64_to_v8i16(<2 x double> %a) { +;CHECK-LABEL: v2f64_to_v8i16: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <2 x double> %a to <8 x i16> + ret <8 x i16> %1 +} + +define <8 x i16> @v4f32_to_v8i16(<4 x float> %a) { +;CHECK-LABEL: v4f32_to_v8i16: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x float> %a to <8 x i16> + ret <8 x i16> %1 +} + +define <16 x i8> @v8i16_to_v16i8(<8 x i16> %a) { +;CHECK-LABEL: v8i16_to_v16i8: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x i16> %a to <16 x i8> + ret <16 x i8> %1 +} + +define <16 x i8> @v2i64_to_v16i8(<2 x i64> %a) { +;CHECK-LABEL: v2i64_to_v16i8: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <2 x i64> %a to <16 x i8> + ret <16 x i8> %1 +} + +define <16 x i8> @v4i32_to_v16i8(<4 x i32> %a) { +;CHECK-LABEL: v4i32_to_v16i8: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x i32> %a to <16 x i8> + ret <16 x i8> %1 +} + +define <16 x i8> @v2f64_to_v16i8(<2 x double> %a) { +;CHECK-LABEL: v2f64_to_v16i8: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <2 x double> %a to <16 x i8> + ret <16 x i8> %1 +} + +define <16 x i8> @v4f32_to_v16i8(<4 x float> %a) { +;CHECK-LABEL: v4f32_to_v16i8: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x float> %a to <16 x i8> + ret <16 x i8> %1 +} + +define <4 x float> @v16i8_to_v4f32(<16 x i8> %a) { +;CHECK-LABEL: v16i8_to_v4f32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <16 x i8> %a to <4 x float> + ret <4 x float> %1 +} + +define <4 x float> @v8i16_to_v4f32(<8 x i16> %a) { +;CHECK-LABEL: v8i16_to_v4f32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x i16> %a to <4 x float> + ret <4 x float> %1 +} + +define <4 x float> @v2i64_to_v4f32(<2 x i64> %a) { +;CHECK-LABEL: v2i64_to_v4f32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <2 x i64> %a to <4 x float> + ret <4 x float> %1 +} + +define <4 x float> @v4i32_to_v4f32(<4 x i32> %a) { +;CHECK-LABEL: v4i32_to_v4f32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x i32> %a to <4 x float> + ret <4 x float> %1 +} + +define <4 x float> @v2f64_to_v4f32(<2 x double> %a) { +;CHECK-LABEL: v2f64_to_v4f32: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <2 x double> %a to <4 x float> + ret <4 x float> %1 +} + +define <2 x double> @v4f32_to_v2f64(<4 x float> %a) { +;CHECK-LABEL: v4f32_to_v2f64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x float> %a to <2 x double> + ret <2 x double> %1 +} + +define <2 x double> @v16i8_to_v2f64(<16 x i8> %a) { +;CHECK-LABEL: v16i8_to_v2f64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <16 x i8> %a to <2 x double> + ret <2 x double> %1 +} + +define <2 x double> @v8i16_to_v2f64(<8 x i16> %a) { +;CHECK-LABEL: v8i16_to_v2f64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <8 x i16> %a to <2 x double> + ret <2 x double> %1 +} + +define <2 x double> @v2i64_to_v2f64(<2 x i64> %a) { +;CHECK-LABEL: v2i64_to_v2f64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <2 x i64> %a to <2 x double> + ret <2 x double> %1 +} + +define <2 x double> @v4i32_to_v2f64(<4 x i32> %a) { +;CHECK-LABEL: v4i32_to_v2f64: +;CHECK-NEXT: .cfi_startproc +;CHECK-NEXT: ret + %1 = bitcast <4 x i32> %a to <2 x double> + ret <2 x double> %1 +} -- 2.34.1