1 //===- ARMRegisterInfo.td - ARM Register defs --------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 //===----------------------------------------------------------------------===//
11 // Declarations that describe the ARM register file
12 //===----------------------------------------------------------------------===//
14 // Registers are identified with 4-bit ID numbers.
15 class ARMReg<bits<4> num, string n, list<Register> subregs = []> : Register<n> {
17 let Namespace = "ARM";
18 let SubRegs = subregs;
21 class ARMFReg<bits<6> num, string n> : Register<n> {
23 let Namespace = "ARM";
26 // Subregister indices.
27 let Namespace = "ARM" in {
28 // Note: Code depends on these having consecutive numbers.
29 def ssub_0 : SubRegIndex;
30 def ssub_1 : SubRegIndex;
31 def ssub_2 : SubRegIndex; // In a Q reg.
32 def ssub_3 : SubRegIndex;
33 def ssub_4 : SubRegIndex; // In a QQ reg.
34 def ssub_5 : SubRegIndex;
35 def ssub_6 : SubRegIndex;
36 def ssub_7 : SubRegIndex;
37 def ssub_8 : SubRegIndex; // In a QQQQ reg.
38 def ssub_9 : SubRegIndex;
39 def ssub_10 : SubRegIndex;
40 def ssub_11 : SubRegIndex;
41 def ssub_12 : SubRegIndex;
42 def ssub_13 : SubRegIndex;
43 def ssub_14 : SubRegIndex;
44 def ssub_15 : SubRegIndex;
46 def dsub_0 : SubRegIndex;
47 def dsub_1 : SubRegIndex;
48 def dsub_2 : SubRegIndex;
49 def dsub_3 : SubRegIndex;
50 def dsub_4 : SubRegIndex;
51 def dsub_5 : SubRegIndex;
52 def dsub_6 : SubRegIndex;
53 def dsub_7 : SubRegIndex;
55 def qsub_0 : SubRegIndex;
56 def qsub_1 : SubRegIndex;
57 def qsub_2 : SubRegIndex;
58 def qsub_3 : SubRegIndex;
60 def qqsub_0 : SubRegIndex;
61 def qqsub_1 : SubRegIndex;
65 def R0 : ARMReg< 0, "r0">, DwarfRegNum<[0]>;
66 def R1 : ARMReg< 1, "r1">, DwarfRegNum<[1]>;
67 def R2 : ARMReg< 2, "r2">, DwarfRegNum<[2]>;
68 def R3 : ARMReg< 3, "r3">, DwarfRegNum<[3]>;
69 def R4 : ARMReg< 4, "r4">, DwarfRegNum<[4]>;
70 def R5 : ARMReg< 5, "r5">, DwarfRegNum<[5]>;
71 def R6 : ARMReg< 6, "r6">, DwarfRegNum<[6]>;
72 def R7 : ARMReg< 7, "r7">, DwarfRegNum<[7]>;
73 // These require 32-bit instructions.
74 let CostPerUse = 1 in {
75 def R8 : ARMReg< 8, "r8">, DwarfRegNum<[8]>;
76 def R9 : ARMReg< 9, "r9">, DwarfRegNum<[9]>;
77 def R10 : ARMReg<10, "r10">, DwarfRegNum<[10]>;
78 def R11 : ARMReg<11, "r11">, DwarfRegNum<[11]>;
79 def R12 : ARMReg<12, "r12">, DwarfRegNum<[12]>;
80 def SP : ARMReg<13, "sp">, DwarfRegNum<[13]>;
81 def LR : ARMReg<14, "lr">, DwarfRegNum<[14]>;
82 def PC : ARMReg<15, "pc">, DwarfRegNum<[15]>;
86 def S0 : ARMFReg< 0, "s0">; def S1 : ARMFReg< 1, "s1">;
87 def S2 : ARMFReg< 2, "s2">; def S3 : ARMFReg< 3, "s3">;
88 def S4 : ARMFReg< 4, "s4">; def S5 : ARMFReg< 5, "s5">;
89 def S6 : ARMFReg< 6, "s6">; def S7 : ARMFReg< 7, "s7">;
90 def S8 : ARMFReg< 8, "s8">; def S9 : ARMFReg< 9, "s9">;
91 def S10 : ARMFReg<10, "s10">; def S11 : ARMFReg<11, "s11">;
92 def S12 : ARMFReg<12, "s12">; def S13 : ARMFReg<13, "s13">;
93 def S14 : ARMFReg<14, "s14">; def S15 : ARMFReg<15, "s15">;
94 def S16 : ARMFReg<16, "s16">; def S17 : ARMFReg<17, "s17">;
95 def S18 : ARMFReg<18, "s18">; def S19 : ARMFReg<19, "s19">;
96 def S20 : ARMFReg<20, "s20">; def S21 : ARMFReg<21, "s21">;
97 def S22 : ARMFReg<22, "s22">; def S23 : ARMFReg<23, "s23">;
98 def S24 : ARMFReg<24, "s24">; def S25 : ARMFReg<25, "s25">;
99 def S26 : ARMFReg<26, "s26">; def S27 : ARMFReg<27, "s27">;
100 def S28 : ARMFReg<28, "s28">; def S29 : ARMFReg<29, "s29">;
101 def S30 : ARMFReg<30, "s30">; def S31 : ARMFReg<31, "s31">;
103 // Aliases of the F* registers used to hold 64-bit fp values (doubles)
104 let SubRegIndices = [ssub_0, ssub_1] in {
105 def D0 : ARMReg< 0, "d0", [S0, S1]>, DwarfRegNum<[256]>;
106 def D1 : ARMReg< 1, "d1", [S2, S3]>, DwarfRegNum<[257]>;
107 def D2 : ARMReg< 2, "d2", [S4, S5]>, DwarfRegNum<[258]>;
108 def D3 : ARMReg< 3, "d3", [S6, S7]>, DwarfRegNum<[259]>;
109 def D4 : ARMReg< 4, "d4", [S8, S9]>, DwarfRegNum<[260]>;
110 def D5 : ARMReg< 5, "d5", [S10, S11]>, DwarfRegNum<[261]>;
111 def D6 : ARMReg< 6, "d6", [S12, S13]>, DwarfRegNum<[262]>;
112 def D7 : ARMReg< 7, "d7", [S14, S15]>, DwarfRegNum<[263]>;
113 def D8 : ARMReg< 8, "d8", [S16, S17]>, DwarfRegNum<[264]>;
114 def D9 : ARMReg< 9, "d9", [S18, S19]>, DwarfRegNum<[265]>;
115 def D10 : ARMReg<10, "d10", [S20, S21]>, DwarfRegNum<[266]>;
116 def D11 : ARMReg<11, "d11", [S22, S23]>, DwarfRegNum<[267]>;
117 def D12 : ARMReg<12, "d12", [S24, S25]>, DwarfRegNum<[268]>;
118 def D13 : ARMReg<13, "d13", [S26, S27]>, DwarfRegNum<[269]>;
119 def D14 : ARMReg<14, "d14", [S28, S29]>, DwarfRegNum<[270]>;
120 def D15 : ARMReg<15, "d15", [S30, S31]>, DwarfRegNum<[271]>;
123 // VFP3 defines 16 additional double registers
124 def D16 : ARMFReg<16, "d16">, DwarfRegNum<[272]>;
125 def D17 : ARMFReg<17, "d17">, DwarfRegNum<[273]>;
126 def D18 : ARMFReg<18, "d18">, DwarfRegNum<[274]>;
127 def D19 : ARMFReg<19, "d19">, DwarfRegNum<[275]>;
128 def D20 : ARMFReg<20, "d20">, DwarfRegNum<[276]>;
129 def D21 : ARMFReg<21, "d21">, DwarfRegNum<[277]>;
130 def D22 : ARMFReg<22, "d22">, DwarfRegNum<[278]>;
131 def D23 : ARMFReg<23, "d23">, DwarfRegNum<[279]>;
132 def D24 : ARMFReg<24, "d24">, DwarfRegNum<[280]>;
133 def D25 : ARMFReg<25, "d25">, DwarfRegNum<[281]>;
134 def D26 : ARMFReg<26, "d26">, DwarfRegNum<[282]>;
135 def D27 : ARMFReg<27, "d27">, DwarfRegNum<[283]>;
136 def D28 : ARMFReg<28, "d28">, DwarfRegNum<[284]>;
137 def D29 : ARMFReg<29, "d29">, DwarfRegNum<[285]>;
138 def D30 : ARMFReg<30, "d30">, DwarfRegNum<[286]>;
139 def D31 : ARMFReg<31, "d31">, DwarfRegNum<[287]>;
141 // Advanced SIMD (NEON) defines 16 quad-word aliases
142 let SubRegIndices = [dsub_0, dsub_1],
143 CompositeIndices = [(ssub_2 dsub_1, ssub_0),
144 (ssub_3 dsub_1, ssub_1)] in {
145 def Q0 : ARMReg< 0, "q0", [D0, D1]>;
146 def Q1 : ARMReg< 1, "q1", [D2, D3]>;
147 def Q2 : ARMReg< 2, "q2", [D4, D5]>;
148 def Q3 : ARMReg< 3, "q3", [D6, D7]>;
149 def Q4 : ARMReg< 4, "q4", [D8, D9]>;
150 def Q5 : ARMReg< 5, "q5", [D10, D11]>;
151 def Q6 : ARMReg< 6, "q6", [D12, D13]>;
152 def Q7 : ARMReg< 7, "q7", [D14, D15]>;
154 let SubRegIndices = [dsub_0, dsub_1] in {
155 def Q8 : ARMReg< 8, "q8", [D16, D17]>;
156 def Q9 : ARMReg< 9, "q9", [D18, D19]>;
157 def Q10 : ARMReg<10, "q10", [D20, D21]>;
158 def Q11 : ARMReg<11, "q11", [D22, D23]>;
159 def Q12 : ARMReg<12, "q12", [D24, D25]>;
160 def Q13 : ARMReg<13, "q13", [D26, D27]>;
161 def Q14 : ARMReg<14, "q14", [D28, D29]>;
162 def Q15 : ARMReg<15, "q15", [D30, D31]>;
165 // Pseudo 256-bit registers to represent pairs of Q registers. These should
166 // never be present in the emitted code.
167 // These are used for NEON load / store instructions, e.g., vld4, vst3.
168 // NOTE: It's possible to define more QQ registers since technically the
169 // starting D register number doesn't have to be multiple of 4, e.g.,
170 // D1, D2, D3, D4 would be a legal quad, but that would make the subregister
172 let SubRegIndices = [qsub_0, qsub_1] in {
173 let CompositeIndices = [(dsub_2 qsub_1, dsub_0), (dsub_3 qsub_1, dsub_1),
174 (ssub_4 qsub_1, ssub_0), (ssub_5 qsub_1, ssub_1),
175 (ssub_6 qsub_1, ssub_2), (ssub_7 qsub_1, ssub_3)] in {
176 def QQ0 : ARMReg<0, "qq0", [Q0, Q1]>;
177 def QQ1 : ARMReg<1, "qq1", [Q2, Q3]>;
178 def QQ2 : ARMReg<2, "qq2", [Q4, Q5]>;
179 def QQ3 : ARMReg<3, "qq3", [Q6, Q7]>;
181 let CompositeIndices = [(dsub_2 qsub_1, dsub_0), (dsub_3 qsub_1, dsub_1)] in {
182 def QQ4 : ARMReg<4, "qq4", [Q8, Q9]>;
183 def QQ5 : ARMReg<5, "qq5", [Q10, Q11]>;
184 def QQ6 : ARMReg<6, "qq6", [Q12, Q13]>;
185 def QQ7 : ARMReg<7, "qq7", [Q14, Q15]>;
189 // Pseudo 512-bit registers to represent four consecutive Q registers.
190 let SubRegIndices = [qqsub_0, qqsub_1] in {
191 let CompositeIndices = [(qsub_2 qqsub_1, qsub_0), (qsub_3 qqsub_1, qsub_1),
192 (dsub_4 qqsub_1, dsub_0), (dsub_5 qqsub_1, dsub_1),
193 (dsub_6 qqsub_1, dsub_2), (dsub_7 qqsub_1, dsub_3),
194 (ssub_8 qqsub_1, ssub_0), (ssub_9 qqsub_1, ssub_1),
195 (ssub_10 qqsub_1, ssub_2), (ssub_11 qqsub_1, ssub_3),
196 (ssub_12 qqsub_1, ssub_4), (ssub_13 qqsub_1, ssub_5),
197 (ssub_14 qqsub_1, ssub_6), (ssub_15 qqsub_1, ssub_7)] in
199 def QQQQ0 : ARMReg<0, "qqqq0", [QQ0, QQ1]>;
200 def QQQQ1 : ARMReg<1, "qqqq1", [QQ2, QQ3]>;
202 let CompositeIndices = [(qsub_2 qqsub_1, qsub_0), (qsub_3 qqsub_1, qsub_1),
203 (dsub_4 qqsub_1, dsub_0), (dsub_5 qqsub_1, dsub_1),
204 (dsub_6 qqsub_1, dsub_2), (dsub_7 qqsub_1, dsub_3)] in {
205 def QQQQ2 : ARMReg<2, "qqqq2", [QQ4, QQ5]>;
206 def QQQQ3 : ARMReg<3, "qqqq3", [QQ6, QQ7]>;
210 // Current Program Status Register.
211 def CPSR : ARMReg<0, "cpsr">;
212 def FPSCR : ARMReg<1, "fpscr">;
213 def ITSTATE : ARMReg<2, "itstate">;
215 // Special Registers - only available in privileged mode.
216 def FPSID : ARMReg<0, "fpsid">;
217 def FPEXC : ARMReg<8, "fpexc">;
221 // pc == Program Counter
222 // lr == Link Register
223 // sp == Stack Pointer
224 // r12 == ip (scratch)
225 // r7 == Frame Pointer (thumb-style backtraces)
226 // r9 == May be reserved as Thread Register
227 // r11 == Frame Pointer (arm-style backtraces)
228 // r10 == Stack Limit
230 def GPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6,
231 R7, R8, R9, R10, R11, R12,
233 let MethodProtos = [{
234 iterator allocation_order_begin(const MachineFunction &MF) const;
235 iterator allocation_order_end(const MachineFunction &MF) const;
237 let MethodBodies = [{
238 static const unsigned ARM_GPR_AO[] = {
239 ARM::R0, ARM::R1, ARM::R2, ARM::R3,
241 ARM::R4, ARM::R5, ARM::R6, ARM::R7,
242 ARM::R8, ARM::R9, ARM::R10, ARM::R11 };
244 // For Thumb1 mode, we don't want to allocate hi regs at all, as we
245 // don't know how to spill them. If we make our prologue/epilogue code
246 // smarter at some point, we can go back to using the above allocation
247 // orders for the Thumb1 instructions that know how to use hi regs.
248 static const unsigned THUMB_GPR_AO[] = {
249 ARM::R0, ARM::R1, ARM::R2, ARM::R3,
250 ARM::R4, ARM::R5, ARM::R6, ARM::R7 };
253 GPRClass::allocation_order_begin(const MachineFunction &MF) const {
254 const TargetMachine &TM = MF.getTarget();
255 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
256 if (Subtarget.isThumb1Only())
262 GPRClass::allocation_order_end(const MachineFunction &MF) const {
263 const TargetMachine &TM = MF.getTarget();
264 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
265 if (Subtarget.isThumb1Only())
266 return THUMB_GPR_AO + (sizeof(THUMB_GPR_AO)/sizeof(unsigned));
267 return ARM_GPR_AO + (sizeof(ARM_GPR_AO)/sizeof(unsigned));
272 // restricted GPR register class. Many Thumb2 instructions allow the full
273 // register range for operands, but have undefined behaviours when PC
274 // or SP (R13 or R15) are used. The ARM ISA refers to these operands
275 // via the BadReg() pseudo-code description.
276 def rGPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6,
277 R7, R8, R9, R10, R11, R12, LR]> {
278 let MethodProtos = [{
279 iterator allocation_order_begin(const MachineFunction &MF) const;
280 iterator allocation_order_end(const MachineFunction &MF) const;
282 let MethodBodies = [{
283 static const unsigned ARM_rGPR_AO[] = {
284 ARM::R0, ARM::R1, ARM::R2, ARM::R3,
286 ARM::R4, ARM::R5, ARM::R6, ARM::R7,
287 ARM::R8, ARM::R9, ARM::R10,
290 // For Thumb1 mode, we don't want to allocate hi regs at all, as we
291 // don't know how to spill them. If we make our prologue/epilogue code
292 // smarter at some point, we can go back to using the above allocation
293 // orders for the Thumb1 instructions that know how to use hi regs.
294 static const unsigned THUMB_rGPR_AO[] = {
295 ARM::R0, ARM::R1, ARM::R2, ARM::R3,
296 ARM::R4, ARM::R5, ARM::R6, ARM::R7 };
299 rGPRClass::allocation_order_begin(const MachineFunction &MF) const {
300 const TargetMachine &TM = MF.getTarget();
301 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
302 if (Subtarget.isThumb1Only())
303 return THUMB_rGPR_AO;
308 rGPRClass::allocation_order_end(const MachineFunction &MF) const {
309 const TargetMachine &TM = MF.getTarget();
310 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
312 if (Subtarget.isThumb1Only())
313 return THUMB_rGPR_AO + (sizeof(THUMB_rGPR_AO)/sizeof(unsigned));
314 return ARM_rGPR_AO + (sizeof(ARM_rGPR_AO)/sizeof(unsigned));
319 // Thumb registers are R0-R7 normally. Some instructions can still use
320 // the general GPR register class above (MOV, e.g.)
321 def tGPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6, R7]> {}
323 // For tail calls, we can't use callee-saved registers, as they are restored
324 // to the saved value before the tail call, which would clobber a call address.
325 // Note, getMinimalPhysRegClass(R0) returns tGPR because of the names of
326 // this class and the preceding one(!) This is what we want.
327 def tcGPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R9, R12]> {
328 let MethodProtos = [{
329 iterator allocation_order_begin(const MachineFunction &MF) const;
330 iterator allocation_order_end(const MachineFunction &MF) const;
332 let MethodBodies = [{
334 static const unsigned ARM_GPR_R9_TC[] = {
335 ARM::R0, ARM::R1, ARM::R2, ARM::R3,
337 // R9 is not available.
338 static const unsigned ARM_GPR_NOR9_TC[] = {
339 ARM::R0, ARM::R1, ARM::R2, ARM::R3,
342 // For Thumb1 mode, we don't want to allocate hi regs at all, as we
343 // don't know how to spill them. If we make our prologue/epilogue code
344 // smarter at some point, we can go back to using the above allocation
345 // orders for the Thumb1 instructions that know how to use hi regs.
346 static const unsigned THUMB_GPR_AO_TC[] = {
347 ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
350 tcGPRClass::allocation_order_begin(const MachineFunction &MF) const {
351 const TargetMachine &TM = MF.getTarget();
352 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
353 if (Subtarget.isThumb1Only())
354 return THUMB_GPR_AO_TC;
355 return Subtarget.isTargetDarwin() ? ARM_GPR_R9_TC : ARM_GPR_NOR9_TC;
359 tcGPRClass::allocation_order_end(const MachineFunction &MF) const {
360 const TargetMachine &TM = MF.getTarget();
361 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
363 if (Subtarget.isThumb1Only())
364 return THUMB_GPR_AO_TC + (sizeof(THUMB_GPR_AO_TC)/sizeof(unsigned));
366 return Subtarget.isTargetDarwin() ?
367 ARM_GPR_R9_TC + (sizeof(ARM_GPR_R9_TC)/sizeof(unsigned)) :
368 ARM_GPR_NOR9_TC + (sizeof(ARM_GPR_NOR9_TC)/sizeof(unsigned));
374 // Scalar single precision floating point register class..
375 def SPR : RegisterClass<"ARM", [f32], 32, [S0, S1, S2, S3, S4, S5, S6, S7, S8,
376 S9, S10, S11, S12, S13, S14, S15, S16, S17, S18, S19, S20, S21, S22,
377 S23, S24, S25, S26, S27, S28, S29, S30, S31]>;
379 // Subset of SPR which can be used as a source of NEON scalars for 16-bit
381 def SPR_8 : RegisterClass<"ARM", [f32], 32,
382 [S0, S1, S2, S3, S4, S5, S6, S7,
383 S8, S9, S10, S11, S12, S13, S14, S15]>;
385 // Scalar double precision floating point / generic 64-bit vector register
387 // ARM requires only word alignment for double. It's more performant if it
388 // is double-word alignment though.
389 def DPR : RegisterClass<"ARM", [f64, v8i8, v4i16, v2i32, v1i64, v2f32], 64,
390 [D0, D1, D2, D3, D4, D5, D6, D7,
391 D8, D9, D10, D11, D12, D13, D14, D15,
392 D16, D17, D18, D19, D20, D21, D22, D23,
393 D24, D25, D26, D27, D28, D29, D30, D31]> {
394 let MethodProtos = [{
395 iterator allocation_order_begin(const MachineFunction &MF) const;
396 iterator allocation_order_end(const MachineFunction &MF) const;
398 let MethodBodies = [{
400 static const unsigned ARM_DPR_VFP2[] = {
401 ARM::D0, ARM::D1, ARM::D2, ARM::D3,
402 ARM::D4, ARM::D5, ARM::D6, ARM::D7,
403 ARM::D8, ARM::D9, ARM::D10, ARM::D11,
404 ARM::D12, ARM::D13, ARM::D14, ARM::D15 };
405 // VFP3: D8-D15 are callee saved and should be allocated last.
406 // Save other low registers for use as DPR_VFP2 and DPR_8 classes.
407 static const unsigned ARM_DPR_VFP3[] = {
408 ARM::D16, ARM::D17, ARM::D18, ARM::D19,
409 ARM::D20, ARM::D21, ARM::D22, ARM::D23,
410 ARM::D24, ARM::D25, ARM::D26, ARM::D27,
411 ARM::D28, ARM::D29, ARM::D30, ARM::D31,
412 ARM::D0, ARM::D1, ARM::D2, ARM::D3,
413 ARM::D4, ARM::D5, ARM::D6, ARM::D7,
414 ARM::D8, ARM::D9, ARM::D10, ARM::D11,
415 ARM::D12, ARM::D13, ARM::D14, ARM::D15 };
418 DPRClass::allocation_order_begin(const MachineFunction &MF) const {
419 const TargetMachine &TM = MF.getTarget();
420 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
421 if (Subtarget.hasVFP3() && !Subtarget.hasD16())
427 DPRClass::allocation_order_end(const MachineFunction &MF) const {
428 const TargetMachine &TM = MF.getTarget();
429 const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
430 if (Subtarget.hasVFP3() && !Subtarget.hasD16())
431 return ARM_DPR_VFP3 + (sizeof(ARM_DPR_VFP3)/sizeof(unsigned));
433 return ARM_DPR_VFP2 + (sizeof(ARM_DPR_VFP2)/sizeof(unsigned));
438 // Subset of DPR that are accessible with VFP2 (and so that also have
439 // 32-bit SPR subregs).
440 def DPR_VFP2 : RegisterClass<"ARM", [f64, v8i8, v4i16, v2i32, v1i64, v2f32], 64,
441 [D0, D1, D2, D3, D4, D5, D6, D7,
442 D8, D9, D10, D11, D12, D13, D14, D15]> {
443 let SubRegClasses = [(SPR ssub_0, ssub_1)];
446 // Subset of DPR which can be used as a source of NEON scalars for 16-bit
448 def DPR_8 : RegisterClass<"ARM", [f64, v8i8, v4i16, v2i32, v1i64, v2f32], 64,
449 [D0, D1, D2, D3, D4, D5, D6, D7]> {
450 let SubRegClasses = [(SPR_8 ssub_0, ssub_1)];
453 // Generic 128-bit vector register class.
454 def QPR : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 128,
455 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7,
456 Q8, Q9, Q10, Q11, Q12, Q13, Q14, Q15]> {
457 let SubRegClasses = [(DPR dsub_0, dsub_1)];
458 let MethodProtos = [{
459 iterator allocation_order_begin(const MachineFunction &MF) const;
460 iterator allocation_order_end(const MachineFunction &MF) const;
462 let MethodBodies = [{
463 // Q4-Q7 are callee saved and should be allocated last.
464 // Save other low registers for use as QPR_VFP2 and QPR_8 classes.
465 static const unsigned ARM_QPR[] = {
466 ARM::Q8, ARM::Q9, ARM::Q10, ARM::Q11,
467 ARM::Q12, ARM::Q13, ARM::Q14, ARM::Q15,
468 ARM::Q0, ARM::Q1, ARM::Q2, ARM::Q3,
469 ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7 };
472 QPRClass::allocation_order_begin(const MachineFunction &MF) const {
477 QPRClass::allocation_order_end(const MachineFunction &MF) const {
478 return ARM_QPR + (sizeof(ARM_QPR)/sizeof(unsigned));
483 // Subset of QPR that have 32-bit SPR subregs.
484 def QPR_VFP2 : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
486 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]> {
487 let SubRegClasses = [(SPR ssub_0, ssub_1, ssub_2, ssub_3),
488 (DPR_VFP2 dsub_0, dsub_1)];
491 // Subset of QPR that have DPR_8 and SPR_8 subregs.
492 def QPR_8 : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
495 let SubRegClasses = [(SPR_8 ssub_0, ssub_1, ssub_2, ssub_3),
496 (DPR_8 dsub_0, dsub_1)];
499 // Pseudo 256-bit vector register class to model pairs of Q registers
500 // (4 consecutive D registers).
501 def QQPR : RegisterClass<"ARM", [v4i64],
503 [QQ0, QQ1, QQ2, QQ3, QQ4, QQ5, QQ6, QQ7]> {
504 let SubRegClasses = [(DPR dsub_0, dsub_1, dsub_2, dsub_3),
505 (QPR qsub_0, qsub_1)];
506 let MethodProtos = [{
507 iterator allocation_order_begin(const MachineFunction &MF) const;
508 iterator allocation_order_end(const MachineFunction &MF) const;
510 let MethodBodies = [{
511 // QQ2-QQ3 are callee saved and should be allocated last.
512 // Save other low registers for use as QPR_VFP2 and QPR_8 classes.
513 static const unsigned ARM_QQPR[] = {
514 ARM::QQ4, ARM::QQ5, ARM::QQ6, ARM::QQ7,
515 ARM::QQ0, ARM::QQ1, ARM::QQ2, ARM::QQ3 };
518 QQPRClass::allocation_order_begin(const MachineFunction &MF) const {
523 QQPRClass::allocation_order_end(const MachineFunction &MF) const {
524 return ARM_QQPR + (sizeof(ARM_QQPR)/sizeof(unsigned));
529 // Subset of QQPR that have 32-bit SPR subregs.
530 def QQPR_VFP2 : RegisterClass<"ARM", [v4i64],
532 [QQ0, QQ1, QQ2, QQ3]> {
533 let SubRegClasses = [(SPR ssub_0, ssub_1, ssub_2, ssub_3),
534 (DPR_VFP2 dsub_0, dsub_1, dsub_2, dsub_3),
535 (QPR_VFP2 qsub_0, qsub_1)];
539 // Pseudo 512-bit vector register class to model 4 consecutive Q registers
540 // (8 consecutive D registers).
541 def QQQQPR : RegisterClass<"ARM", [v8i64],
543 [QQQQ0, QQQQ1, QQQQ2, QQQQ3]> {
544 let SubRegClasses = [(DPR dsub_0, dsub_1, dsub_2, dsub_3,
545 dsub_4, dsub_5, dsub_6, dsub_7),
546 (QPR qsub_0, qsub_1, qsub_2, qsub_3)];
547 let MethodProtos = [{
548 iterator allocation_order_begin(const MachineFunction &MF) const;
549 iterator allocation_order_end(const MachineFunction &MF) const;
551 let MethodBodies = [{
552 // QQQQ1 is callee saved and should be allocated last.
553 // Save QQQQ0 for use as QPR_VFP2 and QPR_8 classes.
554 static const unsigned ARM_QQQQPR[] = {
555 ARM::QQQQ2, ARM::QQQQ3, ARM::QQQQ0, ARM::QQQQ1 };
557 QQQQPRClass::iterator
558 QQQQPRClass::allocation_order_begin(const MachineFunction &MF) const {
562 QQQQPRClass::iterator
563 QQQQPRClass::allocation_order_end(const MachineFunction &MF) const {
564 return ARM_QQQQPR + (sizeof(ARM_QQQQPR)/sizeof(unsigned));
569 // Condition code registers.
570 def CCR : RegisterClass<"ARM", [i32], 32, [CPSR]>;