[Hexagon] Bit-based instruction simplification
[oota-llvm.git] / lib / Target / Hexagon / HexagonIntrinsicsV4.td
1 //===- HexagonIntrinsicsV4.td - V4 Instruction intrinsics --*- tablegen -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 // This is populated based on the following specs:
10 // Hexagon V4 Architecture Extensions
11 // Application-Level Specification
12 // 80-V9418-12 Rev. A
13 // June 15, 2010
14
15 // Vector reduce multiply word by signed half (32x16)
16 //Rdd=vrmpyweh(Rss,Rtt)[:<<1]
17 def : T_PP_pat <M4_vrmpyeh_s0, int_hexagon_M4_vrmpyeh_s0>;
18 def : T_PP_pat <M4_vrmpyeh_s1, int_hexagon_M4_vrmpyeh_s1>;
19
20 //Rdd=vrmpywoh(Rss,Rtt)[:<<1]
21 def : T_PP_pat <M4_vrmpyoh_s0, int_hexagon_M4_vrmpyoh_s0>;
22 def : T_PP_pat <M4_vrmpyoh_s1, int_hexagon_M4_vrmpyoh_s1>;
23
24 //Rdd+=vrmpyweh(Rss,Rtt)[:<<1]
25 def : T_PPP_pat <M4_vrmpyeh_acc_s0, int_hexagon_M4_vrmpyeh_acc_s0>;
26 def : T_PPP_pat <M4_vrmpyeh_acc_s1, int_hexagon_M4_vrmpyeh_acc_s1>;
27
28 //Rdd=vrmpywoh(Rss,Rtt)[:<<1]
29 def : T_PPP_pat <M4_vrmpyoh_acc_s0, int_hexagon_M4_vrmpyoh_acc_s0>;
30 def : T_PPP_pat <M4_vrmpyoh_acc_s1, int_hexagon_M4_vrmpyoh_acc_s1>;
31
32 // Vector multiply halfwords, signed by unsigned
33 // Rdd=vmpyhsu(Rs,Rt)[:<<1]:sat
34 def : T_RR_pat <M2_vmpy2su_s0, int_hexagon_M2_vmpy2su_s0>;
35 def : T_RR_pat <M2_vmpy2su_s1, int_hexagon_M2_vmpy2su_s1>;
36
37 // Rxx+=vmpyhsu(Rs,Rt)[:<<1]:sat
38 def : T_PRR_pat <M2_vmac2su_s0, int_hexagon_M2_vmac2su_s0>;
39 def : T_PRR_pat <M2_vmac2su_s1, int_hexagon_M2_vmac2su_s1>;
40
41 // Vector polynomial multiply halfwords
42 // Rdd=vpmpyh(Rs,Rt)
43 def : T_RR_pat <M4_vpmpyh, int_hexagon_M4_vpmpyh>;
44 // Rxx[^]=vpmpyh(Rs,Rt)
45 def : T_PRR_pat <M4_vpmpyh_acc, int_hexagon_M4_vpmpyh_acc>;
46
47 // Polynomial multiply words
48 // Rdd=pmpyw(Rs,Rt)
49 def : T_RR_pat <M4_pmpyw, int_hexagon_M4_pmpyw>;
50 // Rxx^=pmpyw(Rs,Rt)
51 def : T_PRR_pat <M4_pmpyw_acc, int_hexagon_M4_pmpyw_acc>;
52
53 //Rxx^=asr(Rss,Rt)
54 def : T_PPR_pat <S2_asr_r_p_xor, int_hexagon_S2_asr_r_p_xor>;
55 //Rxx^=asl(Rss,Rt)
56 def : T_PPR_pat <S2_asl_r_p_xor, int_hexagon_S2_asl_r_p_xor>;
57 //Rxx^=lsr(Rss,Rt)
58 def : T_PPR_pat <S2_lsr_r_p_xor, int_hexagon_S2_lsr_r_p_xor>;
59 //Rxx^=lsl(Rss,Rt)
60 def : T_PPR_pat <S2_lsl_r_p_xor, int_hexagon_S2_lsl_r_p_xor>;
61
62 // Multiply and use upper result
63 def : MType_R32_pat <int_hexagon_M2_mpysu_up, M2_mpysu_up>;
64 def : MType_R32_pat <int_hexagon_M2_mpy_up_s1, M2_mpy_up_s1>;
65 def : MType_R32_pat <int_hexagon_M2_hmmpyh_s1, M2_hmmpyh_s1>;
66 def : MType_R32_pat <int_hexagon_M2_hmmpyl_s1, M2_hmmpyl_s1>;
67 def : MType_R32_pat <int_hexagon_M2_mpy_up_s1_sat, M2_mpy_up_s1_sat>;
68
69 // Vector reduce add unsigned halfwords
70 def : Pat <(int_hexagon_M2_vraddh DoubleRegs:$src1, DoubleRegs:$src2),
71            (M2_vraddh DoubleRegs:$src1, DoubleRegs:$src2)>;
72
73 def : T_P_pat <S2_brevp, int_hexagon_S2_brevp>;
74
75 def: T_P_pat  <S2_ct0p,      int_hexagon_S2_ct0p>;
76 def: T_P_pat  <S2_ct1p,      int_hexagon_S2_ct1p>;
77 def: T_RR_pat<C4_nbitsset,  int_hexagon_C4_nbitsset>;
78 def: T_RR_pat<C4_nbitsclr,  int_hexagon_C4_nbitsclr>;
79 def: T_RI_pat<C4_nbitsclri, int_hexagon_C4_nbitsclri>;
80
81
82 class vcmpImm_pat <InstHexagon MI, Intrinsic IntID, PatLeaf immPred> :
83       Pat <(IntID  (i64 DoubleRegs:$src1), immPred:$src2),
84            (MI (i64 DoubleRegs:$src1), immPred:$src2)>;
85
86 def : vcmpImm_pat <A4_vcmpbeqi, int_hexagon_A4_vcmpbeqi, u8ImmPred>;
87 def : vcmpImm_pat <A4_vcmpbgti, int_hexagon_A4_vcmpbgti, s8ImmPred>;
88 def : vcmpImm_pat <A4_vcmpbgtui, int_hexagon_A4_vcmpbgtui, u7ImmPred>;
89
90 def : vcmpImm_pat <A4_vcmpheqi, int_hexagon_A4_vcmpheqi, s8ImmPred>;
91 def : vcmpImm_pat <A4_vcmphgti, int_hexagon_A4_vcmphgti, s8ImmPred>;
92 def : vcmpImm_pat <A4_vcmphgtui, int_hexagon_A4_vcmphgtui, u7ImmPred>;
93
94 def : vcmpImm_pat <A4_vcmpweqi, int_hexagon_A4_vcmpweqi, s8ImmPred>;
95 def : vcmpImm_pat <A4_vcmpwgti, int_hexagon_A4_vcmpwgti, s8ImmPred>;
96 def : vcmpImm_pat <A4_vcmpwgtui, int_hexagon_A4_vcmpwgtui, u7ImmPred>;
97
98 def : T_PP_pat<A4_vcmpbeq_any, int_hexagon_A4_vcmpbeq_any>;
99
100 def : T_RR_pat<A4_cmpbeq,   int_hexagon_A4_cmpbeq>;
101 def : T_RR_pat<A4_cmpbgt,   int_hexagon_A4_cmpbgt>;
102 def : T_RR_pat<A4_cmpbgtu,  int_hexagon_A4_cmpbgtu>;
103 def : T_RR_pat<A4_cmpheq,   int_hexagon_A4_cmpheq>;
104 def : T_RR_pat<A4_cmphgt,   int_hexagon_A4_cmphgt>;
105 def : T_RR_pat<A4_cmphgtu,  int_hexagon_A4_cmphgtu>;
106
107 def : T_RI_pat<A4_cmpbeqi,  int_hexagon_A4_cmpbeqi>;
108 def : T_RI_pat<A4_cmpbgti,  int_hexagon_A4_cmpbgti>;
109 def : T_RI_pat<A4_cmpbgtui, int_hexagon_A4_cmpbgtui>;
110
111 def : T_RI_pat<A4_cmpheqi,  int_hexagon_A4_cmpheqi>;
112 def : T_RI_pat<A4_cmphgti,  int_hexagon_A4_cmphgti>;
113 def : T_RI_pat<A4_cmphgtui, int_hexagon_A4_cmphgtui>;
114
115 def : T_RP_pat <A4_boundscheck, int_hexagon_A4_boundscheck>;
116
117 def : T_PR_pat<A4_tlbmatch, int_hexagon_A4_tlbmatch>;
118
119 def : Pat <(int_hexagon_M4_mpyrr_addr IntRegs:$src1, IntRegs:$src2,
120                                       IntRegs:$src3),
121            (M4_mpyrr_addr IntRegs:$src1, IntRegs:$src2, IntRegs:$src3)>;
122
123 def : T_IRR_pat <M4_mpyrr_addi, int_hexagon_M4_mpyrr_addi>;
124 def : T_IRI_pat <M4_mpyri_addi, int_hexagon_M4_mpyri_addi>;
125 def : T_RIR_pat <M4_mpyri_addr_u2, int_hexagon_M4_mpyri_addr_u2>;
126 def : T_RRI_pat <M4_mpyri_addr, int_hexagon_M4_mpyri_addr>;
127 // Multiply 32x32 and use upper result
128 def : T_RRR_pat <M4_mac_up_s1_sat, int_hexagon_M4_mac_up_s1_sat>;
129 def : T_RRR_pat <M4_nac_up_s1_sat, int_hexagon_M4_nac_up_s1_sat>;
130
131 // Complex multiply 32x16
132 def : T_PR_pat <M4_cmpyi_wh, int_hexagon_M4_cmpyi_wh>;
133 def : T_PR_pat <M4_cmpyr_wh, int_hexagon_M4_cmpyr_wh>;
134
135 def : T_PR_pat <M4_cmpyi_whc, int_hexagon_M4_cmpyi_whc>;
136 def : T_PR_pat <M4_cmpyr_whc, int_hexagon_M4_cmpyr_whc>;
137
138 def : T_PP_pat<A4_andnp, int_hexagon_A4_andnp>;
139 def : T_PP_pat<A4_ornp,  int_hexagon_A4_ornp>;
140
141 // Complex add/sub halfwords/words
142 def : T_PP_pat <S4_vxaddsubw, int_hexagon_S4_vxaddsubw>;
143 def : T_PP_pat <S4_vxsubaddw, int_hexagon_S4_vxsubaddw>;
144 def : T_PP_pat <S4_vxaddsubh, int_hexagon_S4_vxaddsubh>;
145 def : T_PP_pat <S4_vxsubaddh, int_hexagon_S4_vxsubaddh>;
146
147 def : T_PP_pat <S4_vxaddsubhr, int_hexagon_S4_vxaddsubhr>;
148 def : T_PP_pat <S4_vxsubaddhr, int_hexagon_S4_vxsubaddhr>;
149
150 // Extract bitfield
151 def : T_PP_pat  <S4_extractp_rp, int_hexagon_S4_extractp_rp>;
152 def : T_RP_pat  <S4_extract_rp, int_hexagon_S4_extract_rp>;
153 def : T_PII_pat <S4_extractp, int_hexagon_S4_extractp>;
154 def : T_RII_pat <S4_extract, int_hexagon_S4_extract>;
155
156 // Vector conditional negate
157 // Rdd=vcnegh(Rss,Rt)
158 def : T_PR_pat <S2_vcnegh, int_hexagon_S2_vcnegh>;
159
160 // Shift an immediate left by register amount
161 def : T_IR_pat<S4_lsli, int_hexagon_S4_lsli>;
162
163 // Vector reduce maximum halfwords
164 def : T_PPR_pat <A4_vrmaxh, int_hexagon_A4_vrmaxh>;
165 def : T_PPR_pat <A4_vrmaxuh, int_hexagon_A4_vrmaxuh>;
166
167 // Vector reduce maximum words
168 def : T_PPR_pat <A4_vrmaxw, int_hexagon_A4_vrmaxw>;
169 def : T_PPR_pat <A4_vrmaxuw, int_hexagon_A4_vrmaxuw>;
170
171 // Vector reduce minimum halfwords
172 def : T_PPR_pat <A4_vrminh, int_hexagon_A4_vrminh>;
173 def : T_PPR_pat <A4_vrminuh, int_hexagon_A4_vrminuh>;
174
175 // Vector reduce minimum words
176 def : T_PPR_pat <A4_vrminw, int_hexagon_A4_vrminw>;
177 def : T_PPR_pat <A4_vrminuw, int_hexagon_A4_vrminuw>;
178
179 // Rotate and reduce bytes
180 def : Pat <(int_hexagon_S4_vrcrotate DoubleRegs:$src1, IntRegs:$src2,
181                                      u2ImmPred:$src3),
182            (S4_vrcrotate DoubleRegs:$src1, IntRegs:$src2, u2ImmPred:$src3)>;
183
184 // Rotate and reduce bytes with accumulation
185 // Rxx+=vrcrotate(Rss,Rt,#u2)
186 def : Pat <(int_hexagon_S4_vrcrotate_acc DoubleRegs:$src1, DoubleRegs:$src2,
187                                          IntRegs:$src3, u2ImmPred:$src4),
188            (S4_vrcrotate_acc DoubleRegs:$src1, DoubleRegs:$src2,
189                              IntRegs:$src3, u2ImmPred:$src4)>;
190
191 // Vector conditional negate
192 def : T_PPR_pat<S2_vrcnegh, int_hexagon_S2_vrcnegh>;
193
194 // Logical xor with xor accumulation
195 def : T_PPP_pat<M4_xor_xacc, int_hexagon_M4_xor_xacc>;
196
197 // ALU64 - Vector min/max byte
198 def : T_PP_pat <A2_vminb, int_hexagon_A2_vminb>;
199 def : T_PP_pat <A2_vmaxb, int_hexagon_A2_vmaxb>;
200
201 // Shift and add/sub/and/or
202 def : T_IRI_pat <S4_andi_asl_ri, int_hexagon_S4_andi_asl_ri>;
203 def : T_IRI_pat <S4_ori_asl_ri,  int_hexagon_S4_ori_asl_ri>;
204 def : T_IRI_pat <S4_addi_asl_ri, int_hexagon_S4_addi_asl_ri>;
205 def : T_IRI_pat <S4_subi_asl_ri, int_hexagon_S4_subi_asl_ri>;
206 def : T_IRI_pat <S4_andi_lsr_ri, int_hexagon_S4_andi_lsr_ri>;
207 def : T_IRI_pat <S4_ori_lsr_ri,  int_hexagon_S4_ori_lsr_ri>;
208 def : T_IRI_pat <S4_addi_lsr_ri, int_hexagon_S4_addi_lsr_ri>;
209 def : T_IRI_pat <S4_subi_lsr_ri, int_hexagon_S4_subi_lsr_ri>;
210
211 // Split bitfield
212 def : T_RI_pat <A4_bitspliti, int_hexagon_A4_bitspliti>;
213 def : T_RR_pat <A4_bitsplit, int_hexagon_A4_bitsplit>;
214
215 def: T_RR_pat<S4_parity,   int_hexagon_S4_parity>;
216
217 def: T_RI_pat<S4_ntstbit_i,  int_hexagon_S4_ntstbit_i>;
218 def: T_RR_pat<S4_ntstbit_r,  int_hexagon_S4_ntstbit_r>;
219
220 def: T_RI_pat<S4_clbaddi,  int_hexagon_S4_clbaddi>;
221 def: T_PI_pat<S4_clbpaddi, int_hexagon_S4_clbpaddi>;
222 def: T_P_pat <S4_clbpnorm, int_hexagon_S4_clbpnorm>;
223
224 /********************************************************************
225 *            ALU32/ALU                                              *
226 *********************************************************************/
227
228 // ALU32 / ALU / Logical Operations.
229 def: T_RR_pat<A4_andn, int_hexagon_A4_andn>;
230 def: T_RR_pat<A4_orn,  int_hexagon_A4_orn>;
231
232 /********************************************************************
233 *            ALU32/PERM                                             *
234 *********************************************************************/
235
236 // Combine Words Into Doublewords.
237 def: T_RI_pat<A4_combineri, int_hexagon_A4_combineri, s32ImmPred>;
238 def: T_IR_pat<A4_combineir, int_hexagon_A4_combineir, s32ImmPred>;
239
240 /********************************************************************
241 *            ALU32/PRED                                             *
242 *********************************************************************/
243
244 // Compare
245 def : T_RI_pat<C4_cmpneqi, int_hexagon_C4_cmpneqi, s32ImmPred>;
246 def : T_RI_pat<C4_cmpltei, int_hexagon_C4_cmpltei, s32ImmPred>;
247 def : T_RI_pat<C4_cmplteui, int_hexagon_C4_cmplteui, u32ImmPred>;
248
249 def: T_RR_pat<A4_rcmpeq,  int_hexagon_A4_rcmpeq>;
250 def: T_RR_pat<A4_rcmpneq, int_hexagon_A4_rcmpneq>;
251
252 def: T_RI_pat<A4_rcmpeqi,  int_hexagon_A4_rcmpeqi>;
253 def: T_RI_pat<A4_rcmpneqi, int_hexagon_A4_rcmpneqi>;
254
255 /********************************************************************
256 *            CR                                                     *
257 *********************************************************************/
258
259 // CR / Logical Operations On Predicates.
260
261 class qi_CRInst_qiqiqi_pat<Intrinsic IntID, InstHexagon Inst> :
262   Pat<(i32 (IntID IntRegs:$Rs, IntRegs:$Rt, IntRegs:$Ru)),
263       (i32 (C2_tfrpr (Inst (C2_tfrrp IntRegs:$Rs),
264                            (C2_tfrrp IntRegs:$Rt),
265                            (C2_tfrrp IntRegs:$Ru))))>;
266
267 def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_and_and,   C4_and_and>;
268 def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_and_andn,  C4_and_andn>;
269 def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_and_or,    C4_and_or>;
270 def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_and_orn,   C4_and_orn>;
271 def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_or_and,    C4_or_and>;
272 def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_or_andn,   C4_or_andn>;
273 def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_or_or,     C4_or_or>;
274 def: qi_CRInst_qiqiqi_pat<int_hexagon_C4_or_orn,    C4_or_orn>;
275
276 /********************************************************************
277 *            XTYPE/ALU                                              *
278 *********************************************************************/
279
280 // Add And Accumulate.
281
282 def : T_RRI_pat <S4_addaddi, int_hexagon_S4_addaddi>;
283 def : T_RIR_pat <S4_subaddi, int_hexagon_S4_subaddi>;
284
285
286 // XTYPE / ALU / Logical-logical Words.
287 def : T_RRR_pat <M4_or_xor,   int_hexagon_M4_or_xor>;
288 def : T_RRR_pat <M4_and_xor,  int_hexagon_M4_and_xor>;
289 def : T_RRR_pat <M4_or_and,   int_hexagon_M4_or_and>;
290 def : T_RRR_pat <M4_and_and,  int_hexagon_M4_and_and>;
291 def : T_RRR_pat <M4_xor_and,  int_hexagon_M4_xor_and>;
292 def : T_RRR_pat <M4_or_or,    int_hexagon_M4_or_or>;
293 def : T_RRR_pat <M4_and_or,   int_hexagon_M4_and_or>;
294 def : T_RRR_pat <M4_xor_or,   int_hexagon_M4_xor_or>;
295 def : T_RRR_pat <M4_or_andn,  int_hexagon_M4_or_andn>;
296 def : T_RRR_pat <M4_and_andn, int_hexagon_M4_and_andn>;
297 def : T_RRR_pat <M4_xor_andn, int_hexagon_M4_xor_andn>;
298
299 def : T_RRI_pat <S4_or_andi, int_hexagon_S4_or_andi>;
300 def : T_RRI_pat <S4_or_andix,  int_hexagon_S4_or_andix>;
301 def : T_RRI_pat <S4_or_ori, int_hexagon_S4_or_ori>;
302
303 // Modulo wrap.
304 def : T_RR_pat <A4_modwrapu, int_hexagon_A4_modwrapu>;
305
306 // Arithmetic/Convergent round
307 // Rd=[cround|round](Rs,Rt)[:sat]
308 // Rd=[cround|round](Rs,#u5)[:sat]
309 def : T_RI_pat <A4_cround_ri, int_hexagon_A4_cround_ri>;
310 def : T_RR_pat <A4_cround_rr, int_hexagon_A4_cround_rr>;
311
312 def : T_RI_pat <A4_round_ri, int_hexagon_A4_round_ri>;
313 def : T_RR_pat <A4_round_rr, int_hexagon_A4_round_rr>;
314
315 def : T_RI_pat <A4_round_ri_sat, int_hexagon_A4_round_ri_sat>;
316 def : T_RR_pat <A4_round_rr_sat, int_hexagon_A4_round_rr_sat>;
317
318 def : T_P_pat <A2_roundsat, int_hexagon_A2_roundsat>;