From: Quentin Colombet Date: Sat, 26 Apr 2014 01:11:26 +0000 (+0000) Subject: [X86] Implement TargetLowering::getScalingFactorCost hook. X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=9e93e47b7f196893b5779090897210d750aa1c6b;p=oota-llvm.git [X86] Implement TargetLowering::getScalingFactorCost hook. Scaling factors are not free on X86 because every "complex" addressing mode breaks the related instruction into 2 allocations instead of 1. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@207301 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index ddf1d2d73c8..7d195fcdae9 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -20851,3 +20851,16 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, return Res; } + +int X86TargetLowering::getScalingFactorCost(const AddrMode &AM, + Type *Ty) const { + // Scaling factors are not free at all. + // An indexed folded instruction, i.e., inst (reg1, reg2, scale), + // will take 2 allocations instead of 1 for plain addressing mode, + // i.e. inst (reg1). + if (isLegalAddressingMode(AM, Ty)) + // Scale represents reg2 * scale, thus account for 1 + // as soon as we use a second register. + return AM.Scale != 0; + return -1; +} diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 6eb0069d63b..a598d12cc0b 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -682,6 +682,12 @@ namespace llvm { /// the immediate into a register. bool isLegalAddImmediate(int64_t Imm) const override; + /// \brief Return the cost of the scaling factor used in the addressing + /// mode represented by AM for this target, for a load/store + /// of the specified type. + /// If the AM is supported, the return value must be >= 0. + /// If the AM is not supported, it returns a negative value. + int getScalingFactorCost(const AddrMode &AM, Type *Ty) const override; bool isVectorShiftByScalarCheap(Type *Ty) const override; diff --git a/test/CodeGen/X86/avoid_complex_am.ll b/test/CodeGen/X86/avoid_complex_am.ll new file mode 100644 index 00000000000..0b7a13d3c09 --- /dev/null +++ b/test/CodeGen/X86/avoid_complex_am.ll @@ -0,0 +1,37 @@ +; RUN: opt -S -loop-reduce < %s | FileCheck %s +; Complex addressing mode are costly. +; Make loop-reduce prefer unscaled accesses. +; +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx" + +define void @mulDouble(double* nocapture %a, double* nocapture %b, double* nocapture %c) { +; CHECK: @mulDouble +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry +; CHECK: [[IV:%[^ ]+]] = phi i64 [ [[IVNEXT:%[^,]+]], %for.body ], [ 0, %entry ] +; Only one induction variable should have been generated. +; CHECK-NOT: phi + %indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next, %for.body ] + %tmp = add nsw i64 %indvars.iv, -1 + %arrayidx = getelementptr inbounds double* %b, i64 %tmp + %tmp1 = load double* %arrayidx, align 8 +; The induction variable should carry the scaling factor: 1 * 8 = 8. +; CHECK: [[IVNEXT]] = add nuw nsw i64 [[IV]], 8 + %indvars.iv.next = add i64 %indvars.iv, 1 + %arrayidx2 = getelementptr inbounds double* %c, i64 %indvars.iv.next + %tmp2 = load double* %arrayidx2, align 8 + %mul = fmul double %tmp1, %tmp2 + %arrayidx4 = getelementptr inbounds double* %a, i64 %indvars.iv + store double %mul, double* %arrayidx4, align 8 + %lftr.wideiv = trunc i64 %indvars.iv.next to i32 +; Comparison should be 19 * 8 = 152. +; CHECK: icmp eq i32 {{%[^,]+}}, 152 + %exitcond = icmp eq i32 %lftr.wideiv, 20 + br i1 %exitcond, label %for.end, label %for.body + +for.end: ; preds = %for.body + ret void +} diff --git a/test/CodeGen/X86/masked-iv-safe.ll b/test/CodeGen/X86/masked-iv-safe.ll index 4a4d178f6e4..7f61e10f5f6 100644 --- a/test/CodeGen/X86/masked-iv-safe.ll +++ b/test/CodeGen/X86/masked-iv-safe.ll @@ -5,7 +5,7 @@ ; CHECK-LABEL: count_up ; CHECK-NOT: {{and|movz|sar|shl}} -; CHECK: inc +; CHECK: addq $8, ; CHECK-NOT: {{and|movz|sar|shl}} ; CHECK: jne define void @count_up(double* %d, i64 %n) nounwind { @@ -71,7 +71,7 @@ return: ; CHECK-LABEL: count_up_signed ; CHECK-NOT: {{and|movz|sar|shl}} -; CHECK: inc +; CHECK: addq $8, ; CHECK-NOT: {{and|movz|sar|shl}} ; CHECK: jne define void @count_up_signed(double* %d, i64 %n) nounwind { @@ -174,7 +174,7 @@ return: ; CHECK-LABEL: another_count_down ; CHECK-NOT: {{and|movz|sar|shl}} -; CHECK: decq +; CHECK: addq $-8, ; CHECK-NOT: {{and|movz|sar|shl}} ; CHECK: jne define void @another_count_down(double* %d, i64 %n) nounwind { @@ -242,7 +242,7 @@ return: ; CHECK-LABEL: another_count_down_signed ; CHECK-NOT: {{and|movz|sar|shl}} -; CHECK: decq +; CHECK: addq $-8, ; CHECK-NOT: {{and|movz|sar|shl}} ; CHECK: jne define void @another_count_down_signed(double* %d, i64 %n) nounwind {