1 ; RUN: llc < %s -march=x86-64 -mcpu=bdver1 | FileCheck %s
2 ; Verify that for the architectures that are known to have poor latency
3 ; double precision shift instructions we generate alternative sequence
4 ; of instructions with lower latencies instead of shld instruction.
6 ;uint64_t lshift1(uint64_t a, uint64_t b)
8 ; return (a << 1) | (b >> 63);
12 ; CHECK: addq {{.*}},{{.*}}
13 ; CHECK-NEXT: shrq $63, {{.*}}
14 ; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}}
17 define i64 @lshift1(i64 %a, i64 %b) nounwind readnone uwtable {
20 %shr = lshr i64 %b, 63
21 %or = or i64 %shr, %shl
25 ;uint64_t lshift2(uint64_t a, uint64_t b)
27 ; return (a << 2) | (b >> 62);
31 ; CHECK: shlq $2, {{.*}}
32 ; CHECK-NEXT: shrq $62, {{.*}}
33 ; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}}
35 define i64 @lshift2(i64 %a, i64 %b) nounwind readnone uwtable {
38 %shr = lshr i64 %b, 62
39 %or = or i64 %shr, %shl
43 ;uint64_t lshift7(uint64_t a, uint64_t b)
45 ; return (a << 7) | (b >> 57);
49 ; CHECK: shlq $7, {{.*}}
50 ; CHECK-NEXT: shrq $57, {{.*}}
51 ; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}}
53 define i64 @lshift7(i64 %a, i64 %b) nounwind readnone uwtable {
56 %shr = lshr i64 %b, 57
57 %or = or i64 %shr, %shl
61 ;uint64_t lshift63(uint64_t a, uint64_t b)
63 ; return (a << 63) | (b >> 1);
67 ; CHECK: shlq $63, {{.*}}
68 ; CHECK-NEXT: shrq {{.*}}
69 ; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}}
71 define i64 @lshift63(i64 %a, i64 %b) nounwind readnone uwtable {
75 %or = or i64 %shr, %shl