-; RUN: llc -mtriple=i686-linux -enable-block-placement < %s | FileCheck %s
+; RUN: llc -mtriple=i686-linux -pre-RA-sched=source < %s | FileCheck %s
declare void @error(i32 %i, i32 %a, i32 %b)
define i32 @test_ifchains(i32 %i, i32* %a, i32 %b) {
; Test a chain of ifs, where the block guarded by the if is error handling code
; that is not expected to run.
-; CHECK: test_ifchains:
+; CHECK-LABEL: test_ifchains:
; CHECK: %entry
+; CHECK-NOT: .align
; CHECK: %else1
+; CHECK-NOT: .align
; CHECK: %else2
+; CHECK-NOT: .align
; CHECK: %else3
+; CHECK-NOT: .align
; CHECK: %else4
+; CHECK-NOT: .align
; CHECK: %exit
; CHECK: %then1
; CHECK: %then2
define i32 @test_loop_cold_blocks(i32 %i, i32* %a) {
; Check that we sink cold loop blocks after the hot loop body.
-; CHECK: test_loop_cold_blocks:
+; CHECK-LABEL: test_loop_cold_blocks:
; CHECK: %entry
+; CHECK-NOT: .align
+; CHECK: %unlikely1
+; CHECK-NOT: .align
+; CHECK: %unlikely2
+; CHECK: .align
; CHECK: %body1
; CHECK: %body2
; CHECK: %body3
-; CHECK: %unlikely1
-; CHECK: %unlikely2
; CHECK: %exit
entry:
define i32 @test_loop_early_exits(i32 %i, i32* %a) {
; Check that we sink early exit blocks out of loop bodies.
-; CHECK: test_loop_early_exits:
+; CHECK-LABEL: test_loop_early_exits:
; CHECK: %entry
; CHECK: %body1
; CHECK: %body2
define i32 @test_loop_rotate(i32 %i, i32* %a) {
; Check that we rotate conditional exits from the loop to the bottom of the
; loop, eliminating unconditional branches to the top.
-; CHECK: test_loop_rotate:
+; CHECK-LABEL: test_loop_rotate:
; CHECK: %entry
; CHECK: %body1
; CHECK: %body0
ret i32 %base
}
+define i32 @test_no_loop_rotate(i32 %i, i32* %a) {
+; Check that we don't try to rotate a loop which is already laid out with
+; fallthrough opportunities into the top and out of the bottom.
+; CHECK-LABEL: test_no_loop_rotate:
+; CHECK: %entry
+; CHECK: %body0
+; CHECK: %body1
+; CHECK: %exit
+
+entry:
+ br label %body0
+
+body0:
+ %iv = phi i32 [ 0, %entry ], [ %next, %body1 ]
+ %base = phi i32 [ 0, %entry ], [ %sum, %body1 ]
+ %arrayidx = getelementptr inbounds i32* %a, i32 %iv
+ %0 = load i32* %arrayidx
+ %sum = add nsw i32 %0, %base
+ %bailcond1 = icmp eq i32 %sum, 42
+ br i1 %bailcond1, label %exit, label %body1
+
+body1:
+ %next = add i32 %iv, 1
+ %exitcond = icmp eq i32 %next, %i
+ br i1 %exitcond, label %exit, label %body0
+
+exit:
+ ret i32 %base
+}
+
define i32 @test_loop_align(i32 %i, i32* %a) {
; Check that we provide basic loop body alignment with the block placement
; pass.
-; CHECK: test_loop_align:
+; CHECK-LABEL: test_loop_align:
; CHECK: %entry
; CHECK: .align [[ALIGN:[0-9]+]],
; CHECK-NEXT: %body
define i32 @test_nested_loop_align(i32 %i, i32* %a, i32* %b) {
; Check that we provide nested loop body alignment.
-; CHECK: test_nested_loop_align:
+; CHECK-LABEL: test_nested_loop_align:
; CHECK: %entry
; CHECK: .align [[ALIGN]],
; CHECK-NEXT: %loop.body.1
; single-source GCC.
; CHECK: unnatural_cfg2
; CHECK: %entry
-; CHECK: %loop.header
; CHECK: %loop.body1
; CHECK: %loop.body2
; CHECK: %loop.body3
; CHECK: %loop.body4
; CHECK: %loop.inner2.begin
; The loop.inner2.end block is folded
+; CHECK: %loop.header
; CHECK: %bail
entry:
br i1 %cond, label %entry.if.then_crit_edge, label %lor.lhs.false, !prof !1
entry.if.then_crit_edge:
- %.pre14 = load i8* undef, align 1, !tbaa !0
+ %.pre14 = load i8* undef, align 1
br label %if.then
lor.lhs.false:
if.then:
%0 = phi i8 [ %.pre14, %entry.if.then_crit_edge ], [ undef, %exit ]
%1 = and i8 %0, 1
- store i8 %1, i8* undef, align 4, !tbaa !0
+ store i8 %1, i8* undef, align 4
br label %if.end
if.end:
;
; CHECK: test_unnatural_cfg_backwards_inner_loop
; CHECK: %entry
-; CHECK: %body
-; CHECK: %loop1
+; CHECK: [[BODY:# BB#[0-9]+]]:
; CHECK: %loop2b
+; CHECK: %loop1
; CHECK: %loop2a
entry:
define void @unanalyzable_branch_to_best_succ(i1 %cond) {
; Ensure that we can handle unanalyzable branches where the destination block
-; gets selected as the optimal sucessor to merge.
+; gets selected as the optimal successor to merge.
;
; CHECK: unanalyzable_branch_to_best_succ
; CHECK: %entry
entry:
br label %0
- %val0 = volatile load float* undef
+ %val0 = load volatile float* undef
%cmp0 = fcmp une float %val0, undef
br i1 %cmp0, label %1, label %0
- %val1 = volatile load float* undef
+ %val1 = load volatile float* undef
%cmp1 = fcmp une float %val1, undef
br i1 %cmp1, label %2, label %1
- %val2 = volatile load float* undef
+ %val2 = load volatile float* undef
%cmp2 = fcmp une float %val2, undef
br i1 %cmp2, label %3, label %2
- %val3 = volatile load float* undef
+ %val3 = load volatile float* undef
%cmp3 = fcmp une float %val3, undef
br i1 %cmp3, label %4, label %3
- %val4 = volatile load float* undef
+ %val4 = load volatile float* undef
%cmp4 = fcmp une float %val4, undef
br i1 %cmp4, label %5, label %4
- %val5 = volatile load float* undef
+ %val5 = load volatile float* undef
%cmp5 = fcmp une float %val5, undef
br i1 %cmp5, label %6, label %5
- %val6 = volatile load float* undef
+ %val6 = load volatile float* undef
%cmp6 = fcmp une float %val6, undef
br i1 %cmp6, label %7, label %6
- %val7 = volatile load float* undef
+ %val7 = load volatile float* undef
%cmp7 = fcmp une float %val7, undef
br i1 %cmp7, label %8, label %7
- %val8 = volatile load float* undef
+ %val8 = load volatile float* undef
%cmp8 = fcmp une float %val8, undef
br i1 %cmp8, label %9, label %8
- %val9 = volatile load float* undef
+ %val9 = load volatile float* undef
%cmp9 = fcmp une float %val9, undef
br i1 %cmp9, label %10, label %9
- %val10 = volatile load float* undef
+ %val10 = load volatile float* undef
%cmp10 = fcmp une float %val10, undef
br i1 %cmp10, label %11, label %10
- %val11 = volatile load float* undef
+ %val11 = load volatile float* undef
%cmp11 = fcmp une float %val11, undef
br i1 %cmp11, label %12, label %11
- %val12 = volatile load float* undef
+ %val12 = load volatile float* undef
%cmp12 = fcmp une float %val12, undef
br i1 %cmp12, label %13, label %12
- %val13 = volatile load float* undef
+ %val13 = load volatile float* undef
%cmp13 = fcmp une float %val13, undef
br i1 %cmp13, label %14, label %13
- %val14 = volatile load float* undef
+ %val14 = load volatile float* undef
%cmp14 = fcmp une float %val14, undef
br i1 %cmp14, label %15, label %14
- %val15 = volatile load float* undef
+ %val15 = load volatile float* undef
%cmp15 = fcmp une float %val15, undef
br i1 %cmp15, label %16, label %15
- %val16 = volatile load float* undef
+ %val16 = load volatile float* undef
%cmp16 = fcmp une float %val16, undef
br i1 %cmp16, label %17, label %16
- %val17 = volatile load float* undef
+ %val17 = load volatile float* undef
%cmp17 = fcmp une float %val17, undef
br i1 %cmp17, label %18, label %17
- %val18 = volatile load float* undef
+ %val18 = load volatile float* undef
%cmp18 = fcmp une float %val18, undef
br i1 %cmp18, label %19, label %18
- %val19 = volatile load float* undef
+ %val19 = load volatile float* undef
%cmp19 = fcmp une float %val19, undef
br i1 %cmp19, label %20, label %19
- %val20 = volatile load float* undef
+ %val20 = load volatile float* undef
%cmp20 = fcmp une float %val20, undef
br i1 %cmp20, label %21, label %20
- %val21 = volatile load float* undef
+ %val21 = load volatile float* undef
%cmp21 = fcmp une float %val21, undef
br i1 %cmp21, label %22, label %21
- %val22 = volatile load float* undef
+ %val22 = load volatile float* undef
%cmp22 = fcmp une float %val22, undef
br i1 %cmp22, label %23, label %22
- %val23 = volatile load float* undef
+ %val23 = load volatile float* undef
%cmp23 = fcmp une float %val23, undef
br i1 %cmp23, label %24, label %23
- %val24 = volatile load float* undef
+ %val24 = load volatile float* undef
%cmp24 = fcmp une float %val24, undef
br i1 %cmp24, label %25, label %24
- %val25 = volatile load float* undef
+ %val25 = load volatile float* undef
%cmp25 = fcmp une float %val25, undef
br i1 %cmp25, label %26, label %25
- %val26 = volatile load float* undef
+ %val26 = load volatile float* undef
%cmp26 = fcmp une float %val26, undef
br i1 %cmp26, label %27, label %26
- %val27 = volatile load float* undef
+ %val27 = load volatile float* undef
%cmp27 = fcmp une float %val27, undef
br i1 %cmp27, label %28, label %27
- %val28 = volatile load float* undef
+ %val28 = load volatile float* undef
%cmp28 = fcmp une float %val28, undef
br i1 %cmp28, label %29, label %28
- %val29 = volatile load float* undef
+ %val29 = load volatile float* undef
%cmp29 = fcmp une float %val29, undef
br i1 %cmp29, label %30, label %29
- %val30 = volatile load float* undef
+ %val30 = load volatile float* undef
%cmp30 = fcmp une float %val30, undef
br i1 %cmp30, label %31, label %30
- %val31 = volatile load float* undef
+ %val31 = load volatile float* undef
%cmp31 = fcmp une float %val31, undef
br i1 %cmp31, label %32, label %31
- %val32 = volatile load float* undef
+ %val32 = load volatile float* undef
%cmp32 = fcmp une float %val32, undef
br i1 %cmp32, label %33, label %32
- %val33 = volatile load float* undef
+ %val33 = load volatile float* undef
%cmp33 = fcmp une float %val33, undef
br i1 %cmp33, label %34, label %33
- %val34 = volatile load float* undef
+ %val34 = load volatile float* undef
%cmp34 = fcmp une float %val34, undef
br i1 %cmp34, label %35, label %34
- %val35 = volatile load float* undef
+ %val35 = load volatile float* undef
%cmp35 = fcmp une float %val35, undef
br i1 %cmp35, label %36, label %35
- %val36 = volatile load float* undef
+ %val36 = load volatile float* undef
%cmp36 = fcmp une float %val36, undef
br i1 %cmp36, label %37, label %36
- %val37 = volatile load float* undef
+ %val37 = load volatile float* undef
%cmp37 = fcmp une float %val37, undef
br i1 %cmp37, label %38, label %37
- %val38 = volatile load float* undef
+ %val38 = load volatile float* undef
%cmp38 = fcmp une float %val38, undef
br i1 %cmp38, label %39, label %38
- %val39 = volatile load float* undef
+ %val39 = load volatile float* undef
%cmp39 = fcmp une float %val39, undef
br i1 %cmp39, label %40, label %39
- %val40 = volatile load float* undef
+ %val40 = load volatile float* undef
%cmp40 = fcmp une float %val40, undef
br i1 %cmp40, label %41, label %40
- %val41 = volatile load float* undef
+ %val41 = load volatile float* undef
%cmp41 = fcmp une float %val41, undef
br i1 %cmp41, label %42, label %41
- %val42 = volatile load float* undef
+ %val42 = load volatile float* undef
%cmp42 = fcmp une float %val42, undef
br i1 %cmp42, label %43, label %42
- %val43 = volatile load float* undef
+ %val43 = load volatile float* undef
%cmp43 = fcmp une float %val43, undef
br i1 %cmp43, label %44, label %43
- %val44 = volatile load float* undef
+ %val44 = load volatile float* undef
%cmp44 = fcmp une float %val44, undef
br i1 %cmp44, label %45, label %44
- %val45 = volatile load float* undef
+ %val45 = load volatile float* undef
%cmp45 = fcmp une float %val45, undef
br i1 %cmp45, label %46, label %45
- %val46 = volatile load float* undef
+ %val46 = load volatile float* undef
%cmp46 = fcmp une float %val46, undef
br i1 %cmp46, label %47, label %46
- %val47 = volatile load float* undef
+ %val47 = load volatile float* undef
%cmp47 = fcmp une float %val47, undef
br i1 %cmp47, label %48, label %47
- %val48 = volatile load float* undef
+ %val48 = load volatile float* undef
%cmp48 = fcmp une float %val48, undef
br i1 %cmp48, label %49, label %48
- %val49 = volatile load float* undef
+ %val49 = load volatile float* undef
%cmp49 = fcmp une float %val49, undef
br i1 %cmp49, label %50, label %49
- %val50 = volatile load float* undef
+ %val50 = load volatile float* undef
%cmp50 = fcmp une float %val50, undef
br i1 %cmp50, label %51, label %50
- %val51 = volatile load float* undef
+ %val51 = load volatile float* undef
%cmp51 = fcmp une float %val51, undef
br i1 %cmp51, label %52, label %51
- %val52 = volatile load float* undef
+ %val52 = load volatile float* undef
%cmp52 = fcmp une float %val52, undef
br i1 %cmp52, label %53, label %52
- %val53 = volatile load float* undef
+ %val53 = load volatile float* undef
%cmp53 = fcmp une float %val53, undef
br i1 %cmp53, label %54, label %53
- %val54 = volatile load float* undef
+ %val54 = load volatile float* undef
%cmp54 = fcmp une float %val54, undef
br i1 %cmp54, label %55, label %54
- %val55 = volatile load float* undef
+ %val55 = load volatile float* undef
%cmp55 = fcmp une float %val55, undef
br i1 %cmp55, label %56, label %55
- %val56 = volatile load float* undef
+ %val56 = load volatile float* undef
%cmp56 = fcmp une float %val56, undef
br i1 %cmp56, label %57, label %56
- %val57 = volatile load float* undef
+ %val57 = load volatile float* undef
%cmp57 = fcmp une float %val57, undef
br i1 %cmp57, label %58, label %57
- %val58 = volatile load float* undef
+ %val58 = load volatile float* undef
%cmp58 = fcmp une float %val58, undef
br i1 %cmp58, label %59, label %58
- %val59 = volatile load float* undef
+ %val59 = load volatile float* undef
%cmp59 = fcmp une float %val59, undef
br i1 %cmp59, label %60, label %59
- %val60 = volatile load float* undef
+ %val60 = load volatile float* undef
%cmp60 = fcmp une float %val60, undef
br i1 %cmp60, label %61, label %60
- %val61 = volatile load float* undef
+ %val61 = load volatile float* undef
%cmp61 = fcmp une float %val61, undef
br i1 %cmp61, label %62, label %61
- %val62 = volatile load float* undef
+ %val62 = load volatile float* undef
%cmp62 = fcmp une float %val62, undef
br i1 %cmp62, label %63, label %62
- %val63 = volatile load float* undef
+ %val63 = load volatile float* undef
%cmp63 = fcmp une float %val63, undef
br i1 %cmp63, label %64, label %63
- %val64 = volatile load float* undef
+ %val64 = load volatile float* undef
%cmp64 = fcmp une float %val64, undef
br i1 %cmp64, label %65, label %64
exit:
ret void
}
+
+define void @benchmark_heapsort(i32 %n, double* nocapture %ra) {
+; This test case comes from the heapsort benchmark, and exemplifies several
+; important aspects to block placement in the presence of loops:
+; 1) Loop rotation needs to *ensure* that the desired exiting edge can be
+; a fallthrough.
+; 2) The exiting edge from the loop which is rotated to be laid out at the
+; bottom of the loop needs to be exiting into the nearest enclosing loop (to
+; which there is an exit). Otherwise, we force that enclosing loop into
+; strange layouts that are siginificantly less efficient, often times maing
+; it discontiguous.
+;
+; CHECK: @benchmark_heapsort
+; CHECK: %entry
+; First rotated loop top.
+; CHECK: .align
+; CHECK: %while.end
+; CHECK: %for.cond
+; CHECK: %if.then
+; CHECK: %if.else
+; CHECK: %if.end10
+; Second rotated loop top
+; CHECK: .align
+; CHECK: %if.then24
+; CHECK: %while.cond.outer
+; Third rotated loop top
+; CHECK: .align
+; CHECK: %while.cond
+; CHECK: %while.body
+; CHECK: %land.lhs.true
+; CHECK: %if.then19
+; CHECK: %if.end20
+; CHECK: %if.then8
+; CHECK: ret
+
+entry:
+ %shr = ashr i32 %n, 1
+ %add = add nsw i32 %shr, 1
+ %arrayidx3 = getelementptr inbounds double* %ra, i64 1
+ br label %for.cond
+
+for.cond:
+ %ir.0 = phi i32 [ %n, %entry ], [ %ir.1, %while.end ]
+ %l.0 = phi i32 [ %add, %entry ], [ %l.1, %while.end ]
+ %cmp = icmp sgt i32 %l.0, 1
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then:
+ %dec = add nsw i32 %l.0, -1
+ %idxprom = sext i32 %dec to i64
+ %arrayidx = getelementptr inbounds double* %ra, i64 %idxprom
+ %0 = load double* %arrayidx, align 8
+ br label %if.end10
+
+if.else:
+ %idxprom1 = sext i32 %ir.0 to i64
+ %arrayidx2 = getelementptr inbounds double* %ra, i64 %idxprom1
+ %1 = load double* %arrayidx2, align 8
+ %2 = load double* %arrayidx3, align 8
+ store double %2, double* %arrayidx2, align 8
+ %dec6 = add nsw i32 %ir.0, -1
+ %cmp7 = icmp eq i32 %dec6, 1
+ br i1 %cmp7, label %if.then8, label %if.end10
+
+if.then8:
+ store double %1, double* %arrayidx3, align 8
+ ret void
+
+if.end10:
+ %ir.1 = phi i32 [ %ir.0, %if.then ], [ %dec6, %if.else ]
+ %l.1 = phi i32 [ %dec, %if.then ], [ %l.0, %if.else ]
+ %rra.0 = phi double [ %0, %if.then ], [ %1, %if.else ]
+ %add31 = add nsw i32 %ir.1, 1
+ br label %while.cond.outer
+
+while.cond.outer:
+ %j.0.ph.in = phi i32 [ %l.1, %if.end10 ], [ %j.1, %if.then24 ]
+ %j.0.ph = shl i32 %j.0.ph.in, 1
+ br label %while.cond
+
+while.cond:
+ %j.0 = phi i32 [ %add31, %if.end20 ], [ %j.0.ph, %while.cond.outer ]
+ %cmp11 = icmp sgt i32 %j.0, %ir.1
+ br i1 %cmp11, label %while.end, label %while.body
+
+while.body:
+ %cmp12 = icmp slt i32 %j.0, %ir.1
+ br i1 %cmp12, label %land.lhs.true, label %if.end20
+
+land.lhs.true:
+ %idxprom13 = sext i32 %j.0 to i64
+ %arrayidx14 = getelementptr inbounds double* %ra, i64 %idxprom13
+ %3 = load double* %arrayidx14, align 8
+ %add15 = add nsw i32 %j.0, 1
+ %idxprom16 = sext i32 %add15 to i64
+ %arrayidx17 = getelementptr inbounds double* %ra, i64 %idxprom16
+ %4 = load double* %arrayidx17, align 8
+ %cmp18 = fcmp olt double %3, %4
+ br i1 %cmp18, label %if.then19, label %if.end20
+
+if.then19:
+ br label %if.end20
+
+if.end20:
+ %j.1 = phi i32 [ %add15, %if.then19 ], [ %j.0, %land.lhs.true ], [ %j.0, %while.body ]
+ %idxprom21 = sext i32 %j.1 to i64
+ %arrayidx22 = getelementptr inbounds double* %ra, i64 %idxprom21
+ %5 = load double* %arrayidx22, align 8
+ %cmp23 = fcmp olt double %rra.0, %5
+ br i1 %cmp23, label %if.then24, label %while.cond
+
+if.then24:
+ %idxprom27 = sext i32 %j.0.ph.in to i64
+ %arrayidx28 = getelementptr inbounds double* %ra, i64 %idxprom27
+ store double %5, double* %arrayidx28, align 8
+ br label %while.cond.outer
+
+while.end:
+ %idxprom33 = sext i32 %j.0.ph.in to i64
+ %arrayidx34 = getelementptr inbounds double* %ra, i64 %idxprom33
+ store double %rra.0, double* %arrayidx34, align 8
+ br label %for.cond
+}
+
+declare void @cold_function() cold
+
+define i32 @test_cold_calls(i32* %a) {
+; Test that edges to blocks post-dominated by cold calls are
+; marked as not expected to be taken. They should be laid out
+; at the bottom.
+; CHECK-LABEL: test_cold_calls:
+; CHECK: %entry
+; CHECK: %else
+; CHECK: %exit
+; CHECK: %then
+
+entry:
+ %gep1 = getelementptr i32* %a, i32 1
+ %val1 = load i32* %gep1
+ %cond1 = icmp ugt i32 %val1, 1
+ br i1 %cond1, label %then, label %else
+
+then:
+ call void @cold_function()
+ br label %exit
+
+else:
+ %gep2 = getelementptr i32* %a, i32 2
+ %val2 = load i32* %gep2
+ br label %exit
+
+exit:
+ %ret = phi i32 [ %val1, %then ], [ %val2, %else ]
+ ret i32 %ret
+}