X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=test%2FCodeGen%2FARM%2Ffast-isel.ll;h=5981cab7dcb15f862bae3c745ece7e75e48b29ab;hb=a6c0249619351b8cfa54a9e85eb3a3f1421bb315;hp=648d7118c2854e66a854c73de5d58677cf659370;hpb=8f644259dc519d3ac7159e4e662fcd6bcf50cbaf;p=oota-llvm.git diff --git a/test/CodeGen/ARM/fast-isel.ll b/test/CodeGen/ARM/fast-isel.ll index 648d7118c28..5981cab7dcb 100644 --- a/test/CodeGen/ARM/fast-isel.ll +++ b/test/CodeGen/ARM/fast-isel.ll @@ -1,8 +1,9 @@ -; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-darwin | FileCheck %s --check-prefix=ARM -; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-darwin | FileCheck %s --check-prefix=THUMB +; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=ARM +; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -verify-machineinstrs | FileCheck %s --check-prefix=ARM +; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=THUMB ; Very basic fast-isel functionality. -define i32 @add(i32 %a, i32 %b) nounwind { +define i32 @test0(i32 %a, i32 %b) nounwind { entry: %a.addr = alloca i32, align 4 %b.addr = alloca i32, align 4 @@ -26,16 +27,16 @@ br label %if.end if.end: ; preds = %if.then, %entry ret void -; ARM: test1: +; ARM-LABEL: test1: ; ARM: tst r0, #1 -; THUMB: test1: +; THUMB-LABEL: test1: ; THUMB: tst.w r0, #1 } ; Check some simple operations with immediates define void @test2(i32 %tmp, i32* %ptr) nounwind { -; THUMB: test2: -; ARM: test2: +; THUMB-LABEL: test2: +; ARM-LABEL: test2: b1: %a = add i32 %tmp, 4096 @@ -63,8 +64,8 @@ b3: } define void @test3(i32 %tmp, i32* %ptr1, i16* %ptr2, i8* %ptr3) nounwind { -; THUMB: test3: -; ARM: test3: +; THUMB-LABEL: test3: +; ARM-LABEL: test3: bb1: %a1 = trunc i32 %tmp to i16 @@ -80,12 +81,12 @@ bb1: ; THUMB: and ; THUMB: strb -; THUMB: uxtb +; THUMB: and{{.*}}, #255 ; THUMB: strh ; THUMB: uxth ; ARM: and ; ARM: strb -; ARM: uxtb +; ARM: and{{.*}}, #255 ; ARM: strh ; ARM: uxth @@ -121,13 +122,13 @@ bb3: ; THUMB: ldrb ; THUMB: ldrh -; THUMB: uxtb +; THUMB: and{{.*}}, #255 ; THUMB: sxth ; THUMB: add ; THUMB: sub ; ARM: ldrb ; ARM: ldrh -; ARM: uxtb +; ARM: and{{.*}}, #255 ; ARM: sxth ; ARM: add ; ARM: sub @@ -142,80 +143,42 @@ define void @test4() { store i32 %b, i32* @test4g ret void -; THUMB: ldr.n r0, LCPI4_1 -; THUMB: ldr r0, [r0] -; THUMB: ldr r0, [r0] -; THUMB: adds r0, #1 -; THUMB: ldr.n r1, LCPI4_0 -; THUMB: ldr r1, [r1] -; THUMB: str r0, [r1] - -; ARM: ldr r0, LCPI4_1 -; ARM: ldr r0, [r0] -; ARM: ldr r0, [r0] -; ARM: add r0, r0, #1 -; ARM: ldr r1, LCPI4_0 -; ARM: ldr r1, [r1] -; ARM: str r0, [r1] -} - -; Check unaligned stores -%struct.anon = type <{ float }> - -@a = common global %struct.anon* null, align 4 -define void @unaligned_store(float %x, float %y) nounwind { -entry: -; ARM: @unaligned_store -; ARM: vmov r1, s0 -; ARM: str r1, [r0] +; Note that relocations are either movw/movt or constant pool +; loads. Different platforms will select different approaches. -; THUMB: @unaligned_store -; THUMB: vmov r1, s0 +; THUMB: {{(movw r0, :lower16:L_test4g\$non_lazy_ptr)|(ldr.n r0, .LCPI)}} +; THUMB: {{(movt r0, :upper16:L_test4g\$non_lazy_ptr)?}} +; THUMB: ldr r0, [r0] +; THUMB: ldr r1, [r0] +; THUMB: adds r1, #1 ; THUMB: str r1, [r0] - %add = fadd float %x, %y - %0 = load %struct.anon** @a, align 4 - %x1 = getelementptr inbounds %struct.anon* %0, i32 0, i32 0 - store float %add, float* %x1, align 1 - ret void +; ARM: {{(movw r0, :lower16:L_test4g\$non_lazy_ptr)|(ldr r0, .LCPI)}} +; ARM: {{(movt r0, :upper16:L_test4g\$non_lazy_ptr)?}} +; ARM: ldr r0, [r0] +; ARM: ldr r1, [r0] +; ARM: add r1, r1, #1 +; ARM: str r1, [r0] } -; Doublewords require only word-alignment. -; rdar://10528060 -%struct.anon.0 = type { double } - -@foo_unpacked = common global %struct.anon.0 zeroinitializer, align 4 - -define void @test5(double %a, double %b) nounwind { -entry: -; ARM: @test5 -; THUMB: @test5 - %add = fadd double %a, %b - store double %add, double* getelementptr inbounds (%struct.anon.0* @foo_unpacked, i32 0, i32 0), align 4 -; ARM: vstr d16, [r0] -; THUMB: vstr d16, [r0] - ret void +; ARM: @urem_fold +; THUMB: @urem_fold +; ARM: and r0, r0, #31 +; THUMB: and r0, r0, #31 +define i32 @urem_fold(i32 %a) nounwind { + %rem = urem i32 %a, 32 + ret i32 %rem } -; Check unaligned loads of floats -%class.TAlignTest = type <{ i16, float }> - -define zeroext i1 @test6(%class.TAlignTest* %this) nounwind align 2 { +define i32 @trap_intrinsic() noreturn nounwind { entry: -; ARM: @test6 -; THUMB: @test6 - %0 = alloca %class.TAlignTest*, align 4 - store %class.TAlignTest* %this, %class.TAlignTest** %0, align 4 - %1 = load %class.TAlignTest** %0 - %2 = getelementptr inbounds %class.TAlignTest* %1, i32 0, i32 1 - %3 = load float* %2, align 1 - %4 = fcmp une float %3, 0.000000e+00 -; ARM: ldr r0, [r0, #2] -; ARM: vmov s0, r0 -; ARM: vcmpe.f32 s0, #0 -; THUMB: ldr.w r0, [r0, #2] -; THUMB: vmov s0, r0 -; THUMB: vcmpe.f32 s0, #0 - ret i1 %4 +; ARM: @trap_intrinsic +; THUMB: @trap_intrinsic +; ARM: trap +; THUMB: trap + tail call void @llvm.trap( ) + unreachable } + +declare void @llvm.trap() nounwind