From 541d0799474b0e800fa0df10b70cba11264b99f5 Mon Sep 17 00:00:00 2001 From: Silviu Baranga Date: Mon, 27 Jul 2015 14:39:34 +0000 Subject: [PATCH] [ARM/AArch64] Fix cost model for interleaved accesses Summary: Fix the cost of interleaved accesses for ARM/AArch64. We were calling getTypeAllocSize and using it to check the number of bits, when we should have called getTypeAllocSizeInBits instead. This would pottentially cause the vectorizer to generate loads/stores and shuffles which cannot be matched with an interleaved access instruction. No performance changes are expected for now since matching/generating interleaved accesses is still disabled by default. Reviewers: rengolin Subscribers: aemerson, llvm-commits, rengolin Differential Revision: http://reviews.llvm.org/D11524 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@243270 91177308-0d34-0410-b5e6-96231b3b80d8 --- .../AArch64/AArch64TargetTransformInfo.cpp | 2 +- lib/Target/ARM/ARMTargetTransformInfo.cpp | 2 +- .../LoopVectorize/AArch64/interleaved_cost.ll | 39 +++++++++++++++++++ .../LoopVectorize/ARM/interleaved_cost.ll | 39 +++++++++++++++++++ 4 files changed, 80 insertions(+), 2 deletions(-) create mode 100644 test/Transforms/LoopVectorize/AArch64/interleaved_cost.ll create mode 100644 test/Transforms/LoopVectorize/ARM/interleaved_cost.ll diff --git a/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/lib/Target/AArch64/AArch64TargetTransformInfo.cpp index e085cca35f1..1ffc6d960e9 100644 --- a/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -416,7 +416,7 @@ unsigned AArch64TTIImpl::getInterleavedMemoryOpCost( if (Factor <= TLI->getMaxSupportedInterleaveFactor()) { unsigned NumElts = VecTy->getVectorNumElements(); Type *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor); - unsigned SubVecSize = DL.getTypeAllocSize(SubVecTy); + unsigned SubVecSize = DL.getTypeAllocSizeInBits(SubVecTy); // ldN/stN only support legal vector types of size 64 or 128 in bits. if (NumElts % Factor == 0 && (SubVecSize == 64 || SubVecSize == 128)) diff --git a/lib/Target/ARM/ARMTargetTransformInfo.cpp b/lib/Target/ARM/ARMTargetTransformInfo.cpp index 2f194cf7ae0..e3c64ee8a59 100644 --- a/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -493,7 +493,7 @@ unsigned ARMTTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits) { unsigned NumElts = VecTy->getVectorNumElements(); Type *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor); - unsigned SubVecSize = DL.getTypeAllocSize(SubVecTy); + unsigned SubVecSize = DL.getTypeAllocSizeInBits(SubVecTy); // vldN/vstN only support legal vector types of size 64 or 128 in bits. if (NumElts % Factor == 0 && (SubVecSize == 64 || SubVecSize == 128)) diff --git a/test/Transforms/LoopVectorize/AArch64/interleaved_cost.ll b/test/Transforms/LoopVectorize/AArch64/interleaved_cost.ll new file mode 100644 index 00000000000..218dc647635 --- /dev/null +++ b/test/Transforms/LoopVectorize/AArch64/interleaved_cost.ll @@ -0,0 +1,39 @@ +; RUN: opt -S -debug-only=loop-vectorize -loop-vectorize -instcombine -enable-interleaved-mem-accesses=true < %s |& FileCheck %s + + +target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" +target triple = "aarch64--linux-gnueabi" + +@AB = common global [1024 x i8] zeroinitializer, align 4 +@CD = common global [1024 x i8] zeroinitializer, align 4 + +define void @test_byte_interleaved_cost(i8 %C, i8 %D) { +entry: + br label %for.body + +; 8xi8 and 16xi8 are valid i8 vector types, so the cost of the interleaved +; access group is 2. + +; CHECK: LV: Found an estimated cost of 2 for VF 8 For instruction: %tmp = load i8, i8* %arrayidx0, align 4 +; CHECK: LV: Found an estimated cost of 2 for VF 16 For instruction: %tmp = load i8, i8* %arrayidx0, align 4 + +for.body: ; preds = %for.body, %entry + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %arrayidx0 = getelementptr inbounds [1024 x i8], [1024 x i8]* @AB, i64 0, i64 %indvars.iv + %tmp = load i8, i8* %arrayidx0, align 4 + %tmp1 = or i64 %indvars.iv, 1 + %arrayidx1 = getelementptr inbounds [1024 x i8], [1024 x i8]* @AB, i64 0, i64 %tmp1 + %tmp2 = load i8, i8* %arrayidx1, align 4 + %add = add nsw i8 %tmp, %C + %mul = mul nsw i8 %tmp2, %D + %arrayidx2 = getelementptr inbounds [1024 x i8], [1024 x i8]* @CD, i64 0, i64 %indvars.iv + store i8 %add, i8* %arrayidx2, align 4 + %arrayidx3 = getelementptr inbounds [1024 x i8], [1024 x i8]* @CD, i64 0, i64 %tmp1 + store i8 %mul, i8* %arrayidx3, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2 + %cmp = icmp slt i64 %indvars.iv.next, 1024 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} diff --git a/test/Transforms/LoopVectorize/ARM/interleaved_cost.ll b/test/Transforms/LoopVectorize/ARM/interleaved_cost.ll new file mode 100644 index 00000000000..9453894ecbc --- /dev/null +++ b/test/Transforms/LoopVectorize/ARM/interleaved_cost.ll @@ -0,0 +1,39 @@ +; RUN: opt -S -debug-only=loop-vectorize -loop-vectorize -instcombine -enable-interleaved-mem-accesses=true < %s |& FileCheck %s + + +target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" +target triple = "armv8--linux-gnueabihf" + +@AB = common global [1024 x i8] zeroinitializer, align 4 +@CD = common global [1024 x i8] zeroinitializer, align 4 + +define void @test_byte_interleaved_cost(i8 %C, i8 %D) { +entry: + br label %for.body + +; 8xi8 and 16xi8 are valid i8 vector types, so the cost of the interleaved +; access group is 2. + +; CHECK: LV: Found an estimated cost of 2 for VF 8 For instruction: %tmp = load i8, i8* %arrayidx0, align 4 +; CHECK: LV: Found an estimated cost of 2 for VF 16 For instruction: %tmp = load i8, i8* %arrayidx0, align 4 + +for.body: ; preds = %for.body, %entry + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %arrayidx0 = getelementptr inbounds [1024 x i8], [1024 x i8]* @AB, i64 0, i64 %indvars.iv + %tmp = load i8, i8* %arrayidx0, align 4 + %tmp1 = or i64 %indvars.iv, 1 + %arrayidx1 = getelementptr inbounds [1024 x i8], [1024 x i8]* @AB, i64 0, i64 %tmp1 + %tmp2 = load i8, i8* %arrayidx1, align 4 + %add = add nsw i8 %tmp, %C + %mul = mul nsw i8 %tmp2, %D + %arrayidx2 = getelementptr inbounds [1024 x i8], [1024 x i8]* @CD, i64 0, i64 %indvars.iv + store i8 %add, i8* %arrayidx2, align 4 + %arrayidx3 = getelementptr inbounds [1024 x i8], [1024 x i8]* @CD, i64 0, i64 %tmp1 + store i8 %mul, i8* %arrayidx3, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2 + %cmp = icmp slt i64 %indvars.iv.next, 1024 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} -- 2.34.1