X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=test%2FCodeGen%2FX86%2Flsr-reuse-trunc.ll;h=7f73b6b9d1ee9eaf53f13ba6d7e66577adf86b86;hb=6e961aa243f223ddb704ce708056238d7c1d7e24;hp=1f87089f80e701138b943817eaa587fb5bd99a3d;hpb=2a4410df44cd710e20b3f12873c35405830d66fb;p=oota-llvm.git diff --git a/test/CodeGen/X86/lsr-reuse-trunc.ll b/test/CodeGen/X86/lsr-reuse-trunc.ll index 1f87089f80e..7f73b6b9d1e 100644 --- a/test/CodeGen/X86/lsr-reuse-trunc.ll +++ b/test/CodeGen/X86/lsr-reuse-trunc.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s -; RUN: llc < %s -mtriple=x86_64-win32 | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-linux -mcpu=nehalem | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-win32 -mcpu=nehalem | FileCheck %s ; Full strength reduction wouldn't reduce register pressure, so LSR should ; stick with indexing here. @@ -14,18 +14,18 @@ define void @vvfloorf(float* nocapture %y, float* nocapture %x, i32* nocapture %n) nounwind { entry: - %0 = load i32* %n, align 4 + %0 = load i32, i32* %n, align 4 %1 = icmp sgt i32 %0, 0 br i1 %1, label %bb, label %return bb: %indvar = phi i64 [ %indvar.next, %bb ], [ 0, %entry ] %tmp = shl i64 %indvar, 2 - %scevgep = getelementptr float* %y, i64 %tmp + %scevgep = getelementptr float, float* %y, i64 %tmp %scevgep9 = bitcast float* %scevgep to <4 x float>* - %scevgep10 = getelementptr float* %x, i64 %tmp + %scevgep10 = getelementptr float, float* %x, i64 %tmp %scevgep1011 = bitcast float* %scevgep10 to <4 x float>* - %2 = load <4 x float>* %scevgep1011, align 16 + %2 = load <4 x float>, <4 x float>* %scevgep1011, align 16 %3 = bitcast <4 x float> %2 to <4 x i32> %4 = and <4 x i32> %3, %5 = bitcast <4 x i32> %4 to <4 x float> @@ -48,7 +48,7 @@ bb: store <4 x float> %19, <4 x float>* %scevgep9, align 16 %tmp12 = add i64 %tmp, 4 %tmp13 = trunc i64 %tmp12 to i32 - %20 = load i32* %n, align 4 + %20 = load i32, i32* %n, align 4 %21 = icmp sgt i32 %20, %tmp13 %indvar.next = add i64 %indvar, 1 br i1 %21, label %bb, label %return