1 ; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
2 ; RUN: -check-prefix=ALL -check-prefix=GP32 \
3 ; RUN: -check-prefix=M2
4 ; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
5 ; RUN: -check-prefix=ALL -check-prefix=GP32 \
6 ; RUN: -check-prefix=32R1-R5
7 ; RUN: llc < %s -march=mips -mcpu=mips32r2 | FileCheck %s \
8 ; RUN: -check-prefix=ALL -check-prefix=GP32 \
9 ; RUN: -check-prefix=32R1-R5
10 ; RUN: llc < %s -march=mips -mcpu=mips32r3 | FileCheck %s \
11 ; RUN: -check-prefix=ALL -check-prefix=GP32 \
12 ; RUN: -check-prefix=32R1-R5
13 ; RUN: llc < %s -march=mips -mcpu=mips32r5 | FileCheck %s \
14 ; RUN: -check-prefix=ALL -check-prefix=GP32 \
15 ; RUN: -check-prefix=32R1-R5
16 ; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
17 ; RUN: -check-prefix=ALL -check-prefix=GP32 \
18 ; RUN: -check-prefix=32R6
19 ; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
20 ; RUN: -check-prefix=ALL -check-prefix=GP64 \
21 ; RUN: -check-prefix=M3
22 ; RUN: llc < %s -march=mips64 -mcpu=mips4 | FileCheck %s \
23 ; RUN: -check-prefix=ALL -check-prefix=GP64 \
24 ; RUN: -check-prefix=GP64-NOT-R6
25 ; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
26 ; RUN: -check-prefix=ALL -check-prefix=GP64 \
27 ; RUN: -check-prefix=GP64-NOT-R6
28 ; RUN: llc < %s -march=mips64 -mcpu=mips64r2 | FileCheck %s \
29 ; RUN: -check-prefix=ALL -check-prefix=GP64 \
30 ; RUN: -check-prefix=GP64-NOT-R6
31 ; RUN: llc < %s -march=mips64 -mcpu=mips64r3 | FileCheck %s \
32 ; RUN: -check-prefix=ALL -check-prefix=GP64 \
33 ; RUN: -check-prefix=GP64-NOT-R6
34 ; RUN: llc < %s -march=mips64 -mcpu=mips64r5 | FileCheck %s \
35 ; RUN: -check-prefix=ALL -check-prefix=GP64 \
36 ; RUN: -check-prefix=GP64-NOT-R6
37 ; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
38 ; RUN: -check-prefix=ALL -check-prefix=GP64 \
39 ; RUN: -check-prefix=64R6
41 define signext i1 @ashr_i1(i1 signext %a, i1 signext %b) {
51 define signext i8 @ashr_i8(i8 signext %a, i8 signext %b) {
55 ; FIXME: The andi instruction is redundant.
56 ; ALL: andi $[[T0:[0-9]+]], $5, 255
57 ; ALL: srav $2, $4, $[[T0]]
63 define signext i16 @ashr_i16(i16 signext %a, i16 signext %b) {
65 ; ALL-LABEL: ashr_i16:
67 ; FIXME: The andi instruction is redundant.
68 ; ALL: andi $[[T0:[0-9]+]], $5, 65535
69 ; ALL: srav $2, $4, $[[T0]]
75 define signext i32 @ashr_i32(i32 signext %a, i32 signext %b) {
77 ; ALL-LABEL: ashr_i32:
79 ; ALL: srav $2, $4, $5
85 define signext i64 @ashr_i64(i64 signext %a, i64 signext %b) {
87 ; ALL-LABEL: ashr_i64:
89 ; M2: srav $[[T0:[0-9]+]], $4, $7
90 ; M2: andi $[[T1:[0-9]+]], $7, 32
91 ; M2: bnez $[[T1]], $[[BB0:BB[0-9_]+]]
92 ; M2: move $3, $[[T0]]
93 ; M2: srlv $[[T2:[0-9]+]], $5, $7
94 ; M2: not $[[T3:[0-9]+]], $7
95 ; M2: sll $[[T4:[0-9]+]], $4, 1
96 ; M2: sllv $[[T5:[0-9]+]], $[[T4]], $[[T3]]
97 ; M2: or $3, $[[T3]], $[[T2]]
99 ; M2: beqz $[[T1]], $[[BB1:BB[0-9_]+]]
106 ; 32R1-R5: srlv $[[T0:[0-9]+]], $5, $7
107 ; 32R1-R5: not $[[T1:[0-9]+]], $7
108 ; 32R1-R5: sll $[[T2:[0-9]+]], $4, 1
109 ; 32R1-R5: sllv $[[T3:[0-9]+]], $[[T2]], $[[T1]]
110 ; 32R1-R5: or $3, $[[T3]], $[[T0]]
111 ; 32R1-R5: srav $[[T4:[0-9]+]], $4, $7
112 ; 32R1-R5: andi $[[T5:[0-9]+]], $7, 32
113 ; 32R1-R5: movn $3, $[[T4]], $[[T5]]
114 ; 32R1-R5: sra $4, $4, 31
116 ; 32R1-R5: movn $2, $4, $[[T5]]
118 ; 32R6: srav $[[T0:[0-9]+]], $4, $7
119 ; 32R6: andi $[[T1:[0-9]+]], $7, 32
120 ; 32R6: seleqz $[[T2:[0-9]+]], $[[T0]], $[[T1]]
121 ; 32R6: sra $[[T3:[0-9]+]], $4, 31
122 ; 32R6: selnez $[[T4:[0-9]+]], $[[T3]], $[[T1]]
123 ; 32R6: or $[[T5:[0-9]+]], $[[T4]], $[[T2]]
124 ; 32R6: srlv $[[T6:[0-9]+]], $5, $7
125 ; 32R6: not $[[T7:[0-9]+]], $7
126 ; 32R6: sll $[[T8:[0-9]+]], $4, 1
127 ; 32R6: sllv $[[T9:[0-9]+]], $[[T8]], $[[T7]]
128 ; 32R6: or $[[T10:[0-9]+]], $[[T9]], $[[T6]]
129 ; 32R6: seleqz $[[T11:[0-9]+]], $[[T10]], $[[T1]]
130 ; 32R6: selnez $[[T12:[0-9]+]], $[[T0]], $[[T1]]
132 ; 32R6: or $3, $[[T0]], $[[T11]]
134 ; GP64: dsrav $2, $4, $5
140 define signext i128 @ashr_i128(i128 signext %a, i128 signext %b) {
142 ; ALL-LABEL: ashr_i128:
144 ; GP32: lw $25, %call16(__ashrti3)($gp)
146 ; M3: sll $[[T0:[0-9]+]], $7, 0
147 ; M3: dsrav $[[T1:[0-9]+]], $4, $7
148 ; M3: andi $[[T2:[0-9]+]], $[[T0]], 32
149 ; M3: bnez $[[T3:[0-9]+]], $[[BB0:BB[0-9_]+]]
150 ; M3: move $3, $[[T1]]
151 ; M3: dsrlv $[[T4:[0-9]+]], $5, $7
152 ; M3: dsll $[[T5:[0-9]+]], $4, 1
153 ; M3: not $[[T6:[0-9]+]], $[[T0]]
154 ; M3: dsllv $[[T7:[0-9]+]], $[[T5]], $[[T6]]
155 ; M3: or $3, $[[T7]], $[[T4]]
157 ; M3: beqz $[[T3]], $[[BB1:BB[0-9_]+]]
159 ; M3: dsra $2, $4, 31
164 ; GP64-NOT-R6: dsrlv $[[T0:[0-9]+]], $5, $7
165 ; GP64-NOT-R6: dsll $[[T1:[0-9]+]], $4, 1
166 ; GP64-NOT-R6: sll $[[T2:[0-9]+]], $7, 0
167 ; GP64-NOT-R6: not $[[T3:[0-9]+]], $[[T2]]
168 ; GP64-NOT-R6: dsllv $[[T4:[0-9]+]], $[[T1]], $[[T3]]
169 ; GP64-NOT-R6: or $3, $[[T4]], $[[T0]]
170 ; GP64-NOT-R6: dsrav $2, $4, $7
171 ; GP64-NOT-R6: andi $[[T5:[0-9]+]], $[[T2]], 32
172 ; GP64-NOT-R6: movn $3, $2, $[[T5]]
173 ; GP64-NOT-R6: dsra $[[T6:[0-9]+]], $4, 31
174 ; GP64-NOT-R6: jr $ra
175 ; GP64-NOT-R6: movn $2, $[[T6]], $[[T5]]
177 ; 64R6: dsrav $[[T0:[0-9]+]], $4, $7
178 ; 64R6: sll $[[T1:[0-9]+]], $7, 0
179 ; 64R6: andi $[[T2:[0-9]+]], $[[T1]], 32
180 ; 64R6: sll $[[T3:[0-9]+]], $[[T2]], 0
181 ; 64R6: seleqz $[[T4:[0-9]+]], $[[T0]], $[[T3]]
182 ; 64R6: dsra $[[T5:[0-9]+]], $4, 31
183 ; 64R6: selnez $[[T6:[0-9]+]], $[[T5]], $[[T3]]
184 ; 64R6: or $2, $[[T6]], $[[T4]]
185 ; 64R6: dsrlv $[[T7:[0-9]+]], $5, $7
186 ; 64R6: dsll $[[T8:[0-9]+]], $4, 1
187 ; 64R6: not $[[T9:[0-9]+]], $[[T1]]
188 ; 64R6: dsllv $[[T10:[0-9]+]], $[[T8]], $[[T9]]
189 ; 64R6: or $[[T11:[0-9]+]], $[[T10]], $[[T7]]
190 ; 64R6: seleqz $[[T12:[0-9]+]], $[[T11]], $[[T3]]
191 ; 64R6: selnez $[[T13:[0-9]+]], $[[T0]], $[[T3]]
193 ; 64R6: or $3, $[[T13]], $[[T12]]
195 %r = ashr i128 %a, %b