1 //===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM target.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "arm-isel"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMTargetMachine.h"
18 #include "MCTargetDesc/ARMAddressingModes.h"
19 #include "llvm/CallingConv.h"
20 #include "llvm/Constants.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/LLVMContext.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/SelectionDAGISel.h"
30 #include "llvm/Target/TargetLowering.h"
31 #include "llvm/Target/TargetOptions.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Compiler.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
41 DisableShifterOp("disable-shifter-op", cl::Hidden,
42 cl::desc("Disable isel of shifter-op"),
46 CheckVMLxHazard("check-vmlx-hazard", cl::Hidden,
47 cl::desc("Check fp vmla / vmls hazard at isel time"),
50 //===--------------------------------------------------------------------===//
51 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
52 /// instructions for SelectionDAG operations.
57 AM2_BASE, // Simple AM2 (+-imm12)
58 AM2_SHOP // Shifter-op AM2
61 class ARMDAGToDAGISel : public SelectionDAGISel {
62 ARMBaseTargetMachine &TM;
63 const ARMBaseInstrInfo *TII;
65 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
66 /// make the right decision when generating code for different targets.
67 const ARMSubtarget *Subtarget;
70 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm,
71 CodeGenOpt::Level OptLevel)
72 : SelectionDAGISel(tm, OptLevel), TM(tm),
73 TII(static_cast<const ARMBaseInstrInfo*>(TM.getInstrInfo())),
74 Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
77 virtual const char *getPassName() const {
78 return "ARM Instruction Selection";
81 /// getI32Imm - Return a target constant of type i32 with the specified
83 inline SDValue getI32Imm(unsigned Imm) {
84 return CurDAG->getTargetConstant(Imm, MVT::i32);
87 SDNode *Select(SDNode *N);
90 bool hasNoVMLxHazardUse(SDNode *N) const;
91 bool isShifterOpProfitable(const SDValue &Shift,
92 ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
93 bool SelectRegShifterOperand(SDValue N, SDValue &A,
94 SDValue &B, SDValue &C,
95 bool CheckProfitability = true);
96 bool SelectImmShifterOperand(SDValue N, SDValue &A,
97 SDValue &B, bool CheckProfitability = true);
98 bool SelectShiftRegShifterOperand(SDValue N, SDValue &A,
99 SDValue &B, SDValue &C) {
100 // Don't apply the profitability check
101 return SelectRegShifterOperand(N, A, B, C, false);
103 bool SelectShiftImmShifterOperand(SDValue N, SDValue &A,
105 // Don't apply the profitability check
106 return SelectImmShifterOperand(N, A, B, false);
109 bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
110 bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
112 AddrMode2Type SelectAddrMode2Worker(SDValue N, SDValue &Base,
113 SDValue &Offset, SDValue &Opc);
114 bool SelectAddrMode2Base(SDValue N, SDValue &Base, SDValue &Offset,
116 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_BASE;
119 bool SelectAddrMode2ShOp(SDValue N, SDValue &Base, SDValue &Offset,
121 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_SHOP;
124 bool SelectAddrMode2(SDValue N, SDValue &Base, SDValue &Offset,
126 SelectAddrMode2Worker(N, Base, Offset, Opc);
127 // return SelectAddrMode2ShOp(N, Base, Offset, Opc);
128 // This always matches one way or another.
132 bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
133 SDValue &Offset, SDValue &Opc);
134 bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
135 SDValue &Offset, SDValue &Opc);
136 bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
137 SDValue &Offset, SDValue &Opc);
138 bool SelectAddrOffsetNone(SDValue N, SDValue &Base);
139 bool SelectAddrMode3(SDValue N, SDValue &Base,
140 SDValue &Offset, SDValue &Opc);
141 bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
142 SDValue &Offset, SDValue &Opc);
143 bool SelectAddrMode5(SDValue N, SDValue &Base,
145 bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
146 bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset);
148 bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label);
150 // Thumb Addressing Modes:
151 bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
152 bool SelectThumbAddrModeRI(SDValue N, SDValue &Base, SDValue &Offset,
154 bool SelectThumbAddrModeRI5S1(SDValue N, SDValue &Base, SDValue &Offset);
155 bool SelectThumbAddrModeRI5S2(SDValue N, SDValue &Base, SDValue &Offset);
156 bool SelectThumbAddrModeRI5S4(SDValue N, SDValue &Base, SDValue &Offset);
157 bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
159 bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
161 bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
163 bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
165 bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
167 // Thumb 2 Addressing Modes:
168 bool SelectT2ShifterOperandReg(SDValue N,
169 SDValue &BaseReg, SDValue &Opc);
170 bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
171 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
173 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
175 bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
176 SDValue &OffReg, SDValue &ShImm);
178 inline bool is_so_imm(unsigned Imm) const {
179 return ARM_AM::getSOImmVal(Imm) != -1;
182 inline bool is_so_imm_not(unsigned Imm) const {
183 return ARM_AM::getSOImmVal(~Imm) != -1;
186 inline bool is_t2_so_imm(unsigned Imm) const {
187 return ARM_AM::getT2SOImmVal(Imm) != -1;
190 inline bool is_t2_so_imm_not(unsigned Imm) const {
191 return ARM_AM::getT2SOImmVal(~Imm) != -1;
194 // Include the pieces autogenerated from the target description.
195 #include "ARMGenDAGISel.inc"
198 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for
200 SDNode *SelectARMIndexedLoad(SDNode *N);
201 SDNode *SelectT2IndexedLoad(SDNode *N);
203 /// SelectVLD - Select NEON load intrinsics. NumVecs should be
204 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
205 /// loads of D registers and even subregs and odd subregs of Q registers.
206 /// For NumVecs <= 2, QOpcodes1 is not used.
207 SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
208 const uint16_t *DOpcodes,
209 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
211 /// SelectVST - Select NEON store intrinsics. NumVecs should
212 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
213 /// stores of D registers and even subregs and odd subregs of Q registers.
214 /// For NumVecs <= 2, QOpcodes1 is not used.
215 SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
216 const uint16_t *DOpcodes,
217 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
219 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
220 /// be 2, 3 or 4. The opcode arrays specify the instructions used for
221 /// load/store of D registers and Q registers.
222 SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad,
223 bool isUpdating, unsigned NumVecs,
224 const uint16_t *DOpcodes, const uint16_t *QOpcodes);
226 /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
227 /// should be 2, 3 or 4. The opcode array specifies the instructions used
228 /// for loading D registers. (Q registers are not supported.)
229 SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
230 const uint16_t *Opcodes);
232 /// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
233 /// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
234 /// generated to force the table registers to be consecutive.
235 SDNode *SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc);
237 /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
238 SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
240 /// SelectCMOVOp - Select CMOV instructions for ARM.
241 SDNode *SelectCMOVOp(SDNode *N);
242 SDNode *SelectConditionalOp(SDNode *N);
243 SDNode *SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
244 ARMCC::CondCodes CCVal, SDValue CCR,
246 SDNode *SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
247 ARMCC::CondCodes CCVal, SDValue CCR,
249 SDNode *SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
250 ARMCC::CondCodes CCVal, SDValue CCR,
252 SDNode *SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
253 ARMCC::CondCodes CCVal, SDValue CCR,
256 // Select special operations if node forms integer ABS pattern
257 SDNode *SelectABSOp(SDNode *N);
259 SDNode *SelectConcatVector(SDNode *N);
261 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
263 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
264 /// inline asm expressions.
265 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
267 std::vector<SDValue> &OutOps);
269 // Form pairs of consecutive S, D, or Q registers.
270 SDNode *PairSRegs(EVT VT, SDValue V0, SDValue V1);
271 SDNode *PairDRegs(EVT VT, SDValue V0, SDValue V1);
272 SDNode *PairQRegs(EVT VT, SDValue V0, SDValue V1);
274 // Form sequences of 4 consecutive S, D, or Q registers.
275 SDNode *QuadSRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
276 SDNode *QuadDRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
277 SDNode *QuadQRegs(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
279 // Get the alignment operand for a NEON VLD or VST instruction.
280 SDValue GetVLDSTAlign(SDValue Align, unsigned NumVecs, bool is64BitVector);
284 /// isInt32Immediate - This method tests to see if the node is a 32-bit constant
285 /// operand. If so Imm will receive the 32-bit value.
286 static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
287 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
288 Imm = cast<ConstantSDNode>(N)->getZExtValue();
294 // isInt32Immediate - This method tests to see if a constant operand.
295 // If so Imm will receive the 32 bit value.
296 static bool isInt32Immediate(SDValue N, unsigned &Imm) {
297 return isInt32Immediate(N.getNode(), Imm);
300 // isOpcWithIntImmediate - This method tests to see if the node is a specific
301 // opcode and that it has a immediate integer right operand.
302 // If so Imm will receive the 32 bit value.
303 static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
304 return N->getOpcode() == Opc &&
305 isInt32Immediate(N->getOperand(1).getNode(), Imm);
308 /// \brief Check whether a particular node is a constant value representable as
309 /// (N * Scale) where (N in [\arg RangeMin, \arg RangeMax).
311 /// \param ScaledConstant [out] - On success, the pre-scaled constant value.
312 static bool isScaledConstantInRange(SDValue Node, int Scale,
313 int RangeMin, int RangeMax,
314 int &ScaledConstant) {
315 assert(Scale > 0 && "Invalid scale!");
317 // Check that this is a constant.
318 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node);
322 ScaledConstant = (int) C->getZExtValue();
323 if ((ScaledConstant % Scale) != 0)
326 ScaledConstant /= Scale;
327 return ScaledConstant >= RangeMin && ScaledConstant < RangeMax;
330 /// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
331 /// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
332 /// least on current ARM implementations) which should be avoidded.
333 bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
334 if (OptLevel == CodeGenOpt::None)
337 if (!CheckVMLxHazard)
340 if (!Subtarget->isCortexA8() && !Subtarget->isCortexA9())
346 SDNode *Use = *N->use_begin();
347 if (Use->getOpcode() == ISD::CopyToReg)
349 if (Use->isMachineOpcode()) {
350 const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
353 unsigned Opcode = MCID.getOpcode();
354 if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
356 // vmlx feeding into another vmlx. We actually want to unfold
357 // the use later in the MLxExpansion pass. e.g.
359 // vmla (stall 8 cycles)
364 // This adds up to about 18 - 19 cycles.
367 // vmul (stall 4 cycles)
368 // vadd adds up to about 14 cycles.
369 return TII->isFpMLxInstruction(Opcode);
375 bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
376 ARM_AM::ShiftOpc ShOpcVal,
378 if (!Subtarget->isCortexA9())
380 if (Shift.hasOneUse())
383 return ShOpcVal == ARM_AM::lsl && ShAmt == 2;
386 bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
389 bool CheckProfitability) {
390 if (DisableShifterOp)
393 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
395 // Don't match base register only case. That is matched to a separate
396 // lower complexity pattern with explicit register operand.
397 if (ShOpcVal == ARM_AM::no_shift) return false;
399 BaseReg = N.getOperand(0);
400 unsigned ShImmVal = 0;
401 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
402 if (!RHS) return false;
403 ShImmVal = RHS->getZExtValue() & 31;
404 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
409 bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N,
413 bool CheckProfitability) {
414 if (DisableShifterOp)
417 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
419 // Don't match base register only case. That is matched to a separate
420 // lower complexity pattern with explicit register operand.
421 if (ShOpcVal == ARM_AM::no_shift) return false;
423 BaseReg = N.getOperand(0);
424 unsigned ShImmVal = 0;
425 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
426 if (RHS) return false;
428 ShReg = N.getOperand(1);
429 if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal))
431 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
437 bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
440 // Match simple R + imm12 operands.
443 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
444 !CurDAG->isBaseWithConstantOffset(N)) {
445 if (N.getOpcode() == ISD::FrameIndex) {
446 // Match frame index.
447 int FI = cast<FrameIndexSDNode>(N)->getIndex();
448 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
449 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
453 if (N.getOpcode() == ARMISD::Wrapper &&
454 !(Subtarget->useMovt() &&
455 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
456 Base = N.getOperand(0);
459 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
463 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
464 int RHSC = (int)RHS->getZExtValue();
465 if (N.getOpcode() == ISD::SUB)
468 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
469 Base = N.getOperand(0);
470 if (Base.getOpcode() == ISD::FrameIndex) {
471 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
472 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
474 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
481 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
487 bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
489 if (N.getOpcode() == ISD::MUL &&
490 (!Subtarget->isCortexA9() || N.hasOneUse())) {
491 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
492 // X * [3,5,9] -> X + X * [2,4,8] etc.
493 int RHSC = (int)RHS->getZExtValue();
496 ARM_AM::AddrOpc AddSub = ARM_AM::add;
498 AddSub = ARM_AM::sub;
501 if (isPowerOf2_32(RHSC)) {
502 unsigned ShAmt = Log2_32(RHSC);
503 Base = Offset = N.getOperand(0);
504 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
513 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
514 // ISD::OR that is equivalent to an ISD::ADD.
515 !CurDAG->isBaseWithConstantOffset(N))
518 // Leave simple R +/- imm12 operands for LDRi12
519 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
521 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
522 -0x1000+1, 0x1000, RHSC)) // 12 bits.
526 // Otherwise this is R +/- [possibly shifted] R.
527 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add;
528 ARM_AM::ShiftOpc ShOpcVal =
529 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
532 Base = N.getOperand(0);
533 Offset = N.getOperand(1);
535 if (ShOpcVal != ARM_AM::no_shift) {
536 // Check to see if the RHS of the shift is a constant, if not, we can't fold
538 if (ConstantSDNode *Sh =
539 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
540 ShAmt = Sh->getZExtValue();
541 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
542 Offset = N.getOperand(1).getOperand(0);
545 ShOpcVal = ARM_AM::no_shift;
548 ShOpcVal = ARM_AM::no_shift;
552 // Try matching (R shl C) + (R).
553 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
554 !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) {
555 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
556 if (ShOpcVal != ARM_AM::no_shift) {
557 // Check to see if the RHS of the shift is a constant, if not, we can't
559 if (ConstantSDNode *Sh =
560 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
561 ShAmt = Sh->getZExtValue();
562 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
563 Offset = N.getOperand(0).getOperand(0);
564 Base = N.getOperand(1);
567 ShOpcVal = ARM_AM::no_shift;
570 ShOpcVal = ARM_AM::no_shift;
575 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
583 AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
587 if (N.getOpcode() == ISD::MUL &&
588 (!Subtarget->isCortexA9() || N.hasOneUse())) {
589 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
590 // X * [3,5,9] -> X + X * [2,4,8] etc.
591 int RHSC = (int)RHS->getZExtValue();
594 ARM_AM::AddrOpc AddSub = ARM_AM::add;
596 AddSub = ARM_AM::sub;
599 if (isPowerOf2_32(RHSC)) {
600 unsigned ShAmt = Log2_32(RHSC);
601 Base = Offset = N.getOperand(0);
602 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
611 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
612 // ISD::OR that is equivalent to an ADD.
613 !CurDAG->isBaseWithConstantOffset(N)) {
615 if (N.getOpcode() == ISD::FrameIndex) {
616 int FI = cast<FrameIndexSDNode>(N)->getIndex();
617 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
618 } else if (N.getOpcode() == ARMISD::Wrapper &&
619 !(Subtarget->useMovt() &&
620 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
621 Base = N.getOperand(0);
623 Offset = CurDAG->getRegister(0, MVT::i32);
624 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
630 // Match simple R +/- imm12 operands.
631 if (N.getOpcode() != ISD::SUB) {
633 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
634 -0x1000+1, 0x1000, RHSC)) { // 12 bits.
635 Base = N.getOperand(0);
636 if (Base.getOpcode() == ISD::FrameIndex) {
637 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
638 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
640 Offset = CurDAG->getRegister(0, MVT::i32);
642 ARM_AM::AddrOpc AddSub = ARM_AM::add;
644 AddSub = ARM_AM::sub;
647 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
654 if (Subtarget->isCortexA9() && !N.hasOneUse()) {
655 // Compute R +/- (R << N) and reuse it.
657 Offset = CurDAG->getRegister(0, MVT::i32);
658 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
664 // Otherwise this is R +/- [possibly shifted] R.
665 ARM_AM::AddrOpc AddSub = N.getOpcode() != ISD::SUB ? ARM_AM::add:ARM_AM::sub;
666 ARM_AM::ShiftOpc ShOpcVal =
667 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
670 Base = N.getOperand(0);
671 Offset = N.getOperand(1);
673 if (ShOpcVal != ARM_AM::no_shift) {
674 // Check to see if the RHS of the shift is a constant, if not, we can't fold
676 if (ConstantSDNode *Sh =
677 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
678 ShAmt = Sh->getZExtValue();
679 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
680 Offset = N.getOperand(1).getOperand(0);
683 ShOpcVal = ARM_AM::no_shift;
686 ShOpcVal = ARM_AM::no_shift;
690 // Try matching (R shl C) + (R).
691 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
692 !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) {
693 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
694 if (ShOpcVal != ARM_AM::no_shift) {
695 // Check to see if the RHS of the shift is a constant, if not, we can't
697 if (ConstantSDNode *Sh =
698 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
699 ShAmt = Sh->getZExtValue();
700 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
701 Offset = N.getOperand(0).getOperand(0);
702 Base = N.getOperand(1);
705 ShOpcVal = ARM_AM::no_shift;
708 ShOpcVal = ARM_AM::no_shift;
713 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
718 bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
719 SDValue &Offset, SDValue &Opc) {
720 unsigned Opcode = Op->getOpcode();
721 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
722 ? cast<LoadSDNode>(Op)->getAddressingMode()
723 : cast<StoreSDNode>(Op)->getAddressingMode();
724 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
725 ? ARM_AM::add : ARM_AM::sub;
727 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val))
731 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
733 if (ShOpcVal != ARM_AM::no_shift) {
734 // Check to see if the RHS of the shift is a constant, if not, we can't fold
736 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
737 ShAmt = Sh->getZExtValue();
738 if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
739 Offset = N.getOperand(0);
742 ShOpcVal = ARM_AM::no_shift;
745 ShOpcVal = ARM_AM::no_shift;
749 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
754 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
755 SDValue &Offset, SDValue &Opc) {
756 unsigned Opcode = Op->getOpcode();
757 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
758 ? cast<LoadSDNode>(Op)->getAddressingMode()
759 : cast<StoreSDNode>(Op)->getAddressingMode();
760 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
761 ? ARM_AM::add : ARM_AM::sub;
763 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
764 if (AddSub == ARM_AM::sub) Val *= -1;
765 Offset = CurDAG->getRegister(0, MVT::i32);
766 Opc = CurDAG->getTargetConstant(Val, MVT::i32);
774 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
775 SDValue &Offset, SDValue &Opc) {
776 unsigned Opcode = Op->getOpcode();
777 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
778 ? cast<LoadSDNode>(Op)->getAddressingMode()
779 : cast<StoreSDNode>(Op)->getAddressingMode();
780 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
781 ? ARM_AM::add : ARM_AM::sub;
783 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
784 Offset = CurDAG->getRegister(0, MVT::i32);
785 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
794 bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) {
799 bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
800 SDValue &Base, SDValue &Offset,
802 if (N.getOpcode() == ISD::SUB) {
803 // X - C is canonicalize to X + -C, no need to handle it here.
804 Base = N.getOperand(0);
805 Offset = N.getOperand(1);
806 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
810 if (!CurDAG->isBaseWithConstantOffset(N)) {
812 if (N.getOpcode() == ISD::FrameIndex) {
813 int FI = cast<FrameIndexSDNode>(N)->getIndex();
814 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
816 Offset = CurDAG->getRegister(0, MVT::i32);
817 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
821 // If the RHS is +/- imm8, fold into addr mode.
823 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
824 -256 + 1, 256, RHSC)) { // 8 bits.
825 Base = N.getOperand(0);
826 if (Base.getOpcode() == ISD::FrameIndex) {
827 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
828 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
830 Offset = CurDAG->getRegister(0, MVT::i32);
832 ARM_AM::AddrOpc AddSub = ARM_AM::add;
834 AddSub = ARM_AM::sub;
837 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
841 Base = N.getOperand(0);
842 Offset = N.getOperand(1);
843 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
847 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
848 SDValue &Offset, SDValue &Opc) {
849 unsigned Opcode = Op->getOpcode();
850 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
851 ? cast<LoadSDNode>(Op)->getAddressingMode()
852 : cast<StoreSDNode>(Op)->getAddressingMode();
853 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
854 ? ARM_AM::add : ARM_AM::sub;
856 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
857 Offset = CurDAG->getRegister(0, MVT::i32);
858 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
863 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
867 bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
868 SDValue &Base, SDValue &Offset) {
869 if (!CurDAG->isBaseWithConstantOffset(N)) {
871 if (N.getOpcode() == ISD::FrameIndex) {
872 int FI = cast<FrameIndexSDNode>(N)->getIndex();
873 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
874 } else if (N.getOpcode() == ARMISD::Wrapper &&
875 !(Subtarget->useMovt() &&
876 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
877 Base = N.getOperand(0);
879 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
884 // If the RHS is +/- imm8, fold into addr mode.
886 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4,
887 -256 + 1, 256, RHSC)) {
888 Base = N.getOperand(0);
889 if (Base.getOpcode() == ISD::FrameIndex) {
890 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
891 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
894 ARM_AM::AddrOpc AddSub = ARM_AM::add;
896 AddSub = ARM_AM::sub;
899 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
905 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
910 bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
914 unsigned Alignment = 0;
915 if (LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(Parent)) {
916 // This case occurs only for VLD1-lane/dup and VST1-lane instructions.
917 // The maximum alignment is equal to the memory size being referenced.
918 unsigned LSNAlign = LSN->getAlignment();
919 unsigned MemSize = LSN->getMemoryVT().getSizeInBits() / 8;
920 if (LSNAlign >= MemSize && MemSize > 1)
923 // All other uses of addrmode6 are for intrinsics. For now just record
924 // the raw alignment value; it will be refined later based on the legal
925 // alignment operands for the intrinsic.
926 Alignment = cast<MemIntrinsicSDNode>(Parent)->getAlignment();
929 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
933 bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N,
935 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op);
936 ISD::MemIndexedMode AM = LdSt->getAddressingMode();
937 if (AM != ISD::POST_INC)
940 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) {
941 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits())
942 Offset = CurDAG->getRegister(0, MVT::i32);
947 bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
948 SDValue &Offset, SDValue &Label) {
949 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
950 Offset = N.getOperand(0);
951 SDValue N1 = N.getOperand(1);
952 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
961 //===----------------------------------------------------------------------===//
962 // Thumb Addressing Modes
963 //===----------------------------------------------------------------------===//
965 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N,
966 SDValue &Base, SDValue &Offset){
967 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) {
968 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
969 if (!NC || !NC->isNullValue())
976 Base = N.getOperand(0);
977 Offset = N.getOperand(1);
982 ARMDAGToDAGISel::SelectThumbAddrModeRI(SDValue N, SDValue &Base,
983 SDValue &Offset, unsigned Scale) {
985 SDValue TmpBase, TmpOffImm;
986 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
987 return false; // We want to select tLDRspi / tSTRspi instead.
989 if (N.getOpcode() == ARMISD::Wrapper &&
990 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
991 return false; // We want to select tLDRpci instead.
994 if (!CurDAG->isBaseWithConstantOffset(N))
997 // Thumb does not have [sp, r] address mode.
998 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
999 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
1000 if ((LHSR && LHSR->getReg() == ARM::SP) ||
1001 (RHSR && RHSR->getReg() == ARM::SP))
1004 // FIXME: Why do we explicitly check for a match here and then return false?
1005 // Presumably to allow something else to match, but shouldn't this be
1008 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC))
1011 Base = N.getOperand(0);
1012 Offset = N.getOperand(1);
1017 ARMDAGToDAGISel::SelectThumbAddrModeRI5S1(SDValue N,
1020 return SelectThumbAddrModeRI(N, Base, Offset, 1);
1024 ARMDAGToDAGISel::SelectThumbAddrModeRI5S2(SDValue N,
1027 return SelectThumbAddrModeRI(N, Base, Offset, 2);
1031 ARMDAGToDAGISel::SelectThumbAddrModeRI5S4(SDValue N,
1034 return SelectThumbAddrModeRI(N, Base, Offset, 4);
1038 ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
1039 SDValue &Base, SDValue &OffImm) {
1041 SDValue TmpBase, TmpOffImm;
1042 if (SelectThumbAddrModeSP(N, TmpBase, TmpOffImm))
1043 return false; // We want to select tLDRspi / tSTRspi instead.
1045 if (N.getOpcode() == ARMISD::Wrapper &&
1046 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
1047 return false; // We want to select tLDRpci instead.
1050 if (!CurDAG->isBaseWithConstantOffset(N)) {
1051 if (N.getOpcode() == ARMISD::Wrapper &&
1052 !(Subtarget->useMovt() &&
1053 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
1054 Base = N.getOperand(0);
1059 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1063 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1064 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
1065 if ((LHSR && LHSR->getReg() == ARM::SP) ||
1066 (RHSR && RHSR->getReg() == ARM::SP)) {
1067 ConstantSDNode *LHS = dyn_cast<ConstantSDNode>(N.getOperand(0));
1068 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1069 unsigned LHSC = LHS ? LHS->getZExtValue() : 0;
1070 unsigned RHSC = RHS ? RHS->getZExtValue() : 0;
1072 // Thumb does not have [sp, #imm5] address mode for non-zero imm5.
1073 if (LHSC != 0 || RHSC != 0) return false;
1076 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1080 // If the RHS is + imm5 * scale, fold into addr mode.
1082 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
1083 Base = N.getOperand(0);
1084 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1088 Base = N.getOperand(0);
1089 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1094 ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
1096 return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
1100 ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
1102 return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
1106 ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
1108 return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
1111 bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
1112 SDValue &Base, SDValue &OffImm) {
1113 if (N.getOpcode() == ISD::FrameIndex) {
1114 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1115 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1116 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1120 if (!CurDAG->isBaseWithConstantOffset(N))
1123 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1124 if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
1125 (LHSR && LHSR->getReg() == ARM::SP)) {
1126 // If the RHS is + imm8 * scale, fold into addr mode.
1128 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) {
1129 Base = N.getOperand(0);
1130 if (Base.getOpcode() == ISD::FrameIndex) {
1131 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1132 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1134 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1143 //===----------------------------------------------------------------------===//
1144 // Thumb 2 Addressing Modes
1145 //===----------------------------------------------------------------------===//
1148 bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg,
1150 if (DisableShifterOp)
1153 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
1155 // Don't match base register only case. That is matched to a separate
1156 // lower complexity pattern with explicit register operand.
1157 if (ShOpcVal == ARM_AM::no_shift) return false;
1159 BaseReg = N.getOperand(0);
1160 unsigned ShImmVal = 0;
1161 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1162 ShImmVal = RHS->getZExtValue() & 31;
1163 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
1170 bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
1171 SDValue &Base, SDValue &OffImm) {
1172 // Match simple R + imm12 operands.
1175 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1176 !CurDAG->isBaseWithConstantOffset(N)) {
1177 if (N.getOpcode() == ISD::FrameIndex) {
1178 // Match frame index.
1179 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1180 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1181 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1185 if (N.getOpcode() == ARMISD::Wrapper &&
1186 !(Subtarget->useMovt() &&
1187 N.getOperand(0).getOpcode() == ISD::TargetGlobalAddress)) {
1188 Base = N.getOperand(0);
1189 if (Base.getOpcode() == ISD::TargetConstantPool)
1190 return false; // We want to select t2LDRpci instead.
1193 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1197 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1198 if (SelectT2AddrModeImm8(N, Base, OffImm))
1199 // Let t2LDRi8 handle (R - imm8).
1202 int RHSC = (int)RHS->getZExtValue();
1203 if (N.getOpcode() == ISD::SUB)
1206 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
1207 Base = N.getOperand(0);
1208 if (Base.getOpcode() == ISD::FrameIndex) {
1209 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1210 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1212 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1219 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
1223 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
1224 SDValue &Base, SDValue &OffImm) {
1225 // Match simple R - imm8 operands.
1226 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1227 !CurDAG->isBaseWithConstantOffset(N))
1230 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1231 int RHSC = (int)RHS->getSExtValue();
1232 if (N.getOpcode() == ISD::SUB)
1235 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
1236 Base = N.getOperand(0);
1237 if (Base.getOpcode() == ISD::FrameIndex) {
1238 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1239 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
1241 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
1249 bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
1251 unsigned Opcode = Op->getOpcode();
1252 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
1253 ? cast<LoadSDNode>(Op)->getAddressingMode()
1254 : cast<StoreSDNode>(Op)->getAddressingMode();
1256 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
1257 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1258 ? CurDAG->getTargetConstant(RHSC, MVT::i32)
1259 : CurDAG->getTargetConstant(-RHSC, MVT::i32);
1266 bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
1268 SDValue &OffReg, SDValue &ShImm) {
1269 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
1270 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N))
1273 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
1274 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1275 int RHSC = (int)RHS->getZExtValue();
1276 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
1278 else if (RHSC < 0 && RHSC >= -255) // 8 bits
1282 // Look for (R + R) or (R + (R << [1,2,3])).
1284 Base = N.getOperand(0);
1285 OffReg = N.getOperand(1);
1287 // Swap if it is ((R << c) + R).
1288 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg.getOpcode());
1289 if (ShOpcVal != ARM_AM::lsl) {
1290 ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode());
1291 if (ShOpcVal == ARM_AM::lsl)
1292 std::swap(Base, OffReg);
1295 if (ShOpcVal == ARM_AM::lsl) {
1296 // Check to see if the RHS of the shift is a constant, if not, we can't fold
1298 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
1299 ShAmt = Sh->getZExtValue();
1300 if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
1301 OffReg = OffReg.getOperand(0);
1304 ShOpcVal = ARM_AM::no_shift;
1307 ShOpcVal = ARM_AM::no_shift;
1311 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
1316 //===--------------------------------------------------------------------===//
1318 /// getAL - Returns a ARMCC::AL immediate node.
1319 static inline SDValue getAL(SelectionDAG *CurDAG) {
1320 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
1323 SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
1324 LoadSDNode *LD = cast<LoadSDNode>(N);
1325 ISD::MemIndexedMode AM = LD->getAddressingMode();
1326 if (AM == ISD::UNINDEXED)
1329 EVT LoadedVT = LD->getMemoryVT();
1330 SDValue Offset, AMOpc;
1331 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1332 unsigned Opcode = 0;
1334 if (LoadedVT == MVT::i32 && isPre &&
1335 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1336 Opcode = ARM::LDR_PRE_IMM;
1338 } else if (LoadedVT == MVT::i32 && !isPre &&
1339 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1340 Opcode = ARM::LDR_POST_IMM;
1342 } else if (LoadedVT == MVT::i32 &&
1343 SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1344 Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG;
1347 } else if (LoadedVT == MVT::i16 &&
1348 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1350 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
1351 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
1352 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
1353 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
1354 if (LD->getExtensionType() == ISD::SEXTLOAD) {
1355 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1357 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
1361 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1363 Opcode = ARM::LDRB_PRE_IMM;
1364 } else if (!isPre &&
1365 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1367 Opcode = ARM::LDRB_POST_IMM;
1368 } else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1370 Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG;
1376 if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) {
1377 SDValue Chain = LD->getChain();
1378 SDValue Base = LD->getBasePtr();
1379 SDValue Ops[]= { Base, AMOpc, getAL(CurDAG),
1380 CurDAG->getRegister(0, MVT::i32), Chain };
1381 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32,
1382 MVT::i32, MVT::Other, Ops, 5);
1384 SDValue Chain = LD->getChain();
1385 SDValue Base = LD->getBasePtr();
1386 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
1387 CurDAG->getRegister(0, MVT::i32), Chain };
1388 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32,
1389 MVT::i32, MVT::Other, Ops, 6);
1396 SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
1397 LoadSDNode *LD = cast<LoadSDNode>(N);
1398 ISD::MemIndexedMode AM = LD->getAddressingMode();
1399 if (AM == ISD::UNINDEXED)
1402 EVT LoadedVT = LD->getMemoryVT();
1403 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1405 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1406 unsigned Opcode = 0;
1408 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
1409 switch (LoadedVT.getSimpleVT().SimpleTy) {
1411 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
1415 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
1417 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
1422 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
1424 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
1433 SDValue Chain = LD->getChain();
1434 SDValue Base = LD->getBasePtr();
1435 SDValue Ops[]= { Base, Offset, getAL(CurDAG),
1436 CurDAG->getRegister(0, MVT::i32), Chain };
1437 return CurDAG->getMachineNode(Opcode, N->getDebugLoc(), MVT::i32, MVT::i32,
1438 MVT::Other, Ops, 5);
1444 /// PairSRegs - Form a D register from a pair of S registers.
1446 SDNode *ARMDAGToDAGISel::PairSRegs(EVT VT, SDValue V0, SDValue V1) {
1447 DebugLoc dl = V0.getNode()->getDebugLoc();
1449 CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, MVT::i32);
1450 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1451 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1452 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1453 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5);
1456 /// PairDRegs - Form a quad register from a pair of D registers.
1458 SDNode *ARMDAGToDAGISel::PairDRegs(EVT VT, SDValue V0, SDValue V1) {
1459 DebugLoc dl = V0.getNode()->getDebugLoc();
1460 SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, MVT::i32);
1461 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1462 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1463 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1464 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5);
1467 /// PairQRegs - Form 4 consecutive D registers from a pair of Q registers.
1469 SDNode *ARMDAGToDAGISel::PairQRegs(EVT VT, SDValue V0, SDValue V1) {
1470 DebugLoc dl = V0.getNode()->getDebugLoc();
1471 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1472 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1473 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1474 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1475 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 5);
1478 /// QuadSRegs - Form 4 consecutive S registers.
1480 SDNode *ARMDAGToDAGISel::QuadSRegs(EVT VT, SDValue V0, SDValue V1,
1481 SDValue V2, SDValue V3) {
1482 DebugLoc dl = V0.getNode()->getDebugLoc();
1484 CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, MVT::i32);
1485 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, MVT::i32);
1486 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, MVT::i32);
1487 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, MVT::i32);
1488 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, MVT::i32);
1489 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1490 V2, SubReg2, V3, SubReg3 };
1491 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9);
1494 /// QuadDRegs - Form 4 consecutive D registers.
1496 SDNode *ARMDAGToDAGISel::QuadDRegs(EVT VT, SDValue V0, SDValue V1,
1497 SDValue V2, SDValue V3) {
1498 DebugLoc dl = V0.getNode()->getDebugLoc();
1499 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, MVT::i32);
1500 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, MVT::i32);
1501 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, MVT::i32);
1502 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, MVT::i32);
1503 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, MVT::i32);
1504 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1505 V2, SubReg2, V3, SubReg3 };
1506 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9);
1509 /// QuadQRegs - Form 4 consecutive Q registers.
1511 SDNode *ARMDAGToDAGISel::QuadQRegs(EVT VT, SDValue V0, SDValue V1,
1512 SDValue V2, SDValue V3) {
1513 DebugLoc dl = V0.getNode()->getDebugLoc();
1514 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, MVT::i32);
1515 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, MVT::i32);
1516 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, MVT::i32);
1517 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, MVT::i32);
1518 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, MVT::i32);
1519 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1520 V2, SubReg2, V3, SubReg3 };
1521 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops, 9);
1524 /// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
1525 /// of a NEON VLD or VST instruction. The supported values depend on the
1526 /// number of registers being loaded.
1527 SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, unsigned NumVecs,
1528 bool is64BitVector) {
1529 unsigned NumRegs = NumVecs;
1530 if (!is64BitVector && NumVecs < 3)
1533 unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1534 if (Alignment >= 32 && NumRegs == 4)
1536 else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
1538 else if (Alignment >= 8)
1543 return CurDAG->getTargetConstant(Alignment, MVT::i32);
1546 // Get the register stride update opcode of a VLD/VST instruction that
1547 // is otherwise equivalent to the given fixed stride updating instruction.
1548 static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
1551 case ARM::VLD1d8wb_fixed: return ARM::VLD1d8wb_register;
1552 case ARM::VLD1d16wb_fixed: return ARM::VLD1d16wb_register;
1553 case ARM::VLD1d32wb_fixed: return ARM::VLD1d32wb_register;
1554 case ARM::VLD1d64wb_fixed: return ARM::VLD1d64wb_register;
1555 case ARM::VLD1q8wb_fixed: return ARM::VLD1q8wb_register;
1556 case ARM::VLD1q16wb_fixed: return ARM::VLD1q16wb_register;
1557 case ARM::VLD1q32wb_fixed: return ARM::VLD1q32wb_register;
1558 case ARM::VLD1q64wb_fixed: return ARM::VLD1q64wb_register;
1560 case ARM::VST1d8wb_fixed: return ARM::VST1d8wb_register;
1561 case ARM::VST1d16wb_fixed: return ARM::VST1d16wb_register;
1562 case ARM::VST1d32wb_fixed: return ARM::VST1d32wb_register;
1563 case ARM::VST1d64wb_fixed: return ARM::VST1d64wb_register;
1564 case ARM::VST1q8wb_fixed: return ARM::VST1q8wb_register;
1565 case ARM::VST1q16wb_fixed: return ARM::VST1q16wb_register;
1566 case ARM::VST1q32wb_fixed: return ARM::VST1q32wb_register;
1567 case ARM::VST1q64wb_fixed: return ARM::VST1q64wb_register;
1568 case ARM::VST1d64TPseudoWB_fixed: return ARM::VST1d64TPseudoWB_register;
1569 case ARM::VST1d64QPseudoWB_fixed: return ARM::VST1d64QPseudoWB_register;
1571 case ARM::VLD2d8wb_fixed: return ARM::VLD2d8wb_register;
1572 case ARM::VLD2d16wb_fixed: return ARM::VLD2d16wb_register;
1573 case ARM::VLD2d32wb_fixed: return ARM::VLD2d32wb_register;
1574 case ARM::VLD2q8PseudoWB_fixed: return ARM::VLD2q8PseudoWB_register;
1575 case ARM::VLD2q16PseudoWB_fixed: return ARM::VLD2q16PseudoWB_register;
1576 case ARM::VLD2q32PseudoWB_fixed: return ARM::VLD2q32PseudoWB_register;
1578 case ARM::VST2d8wb_fixed: return ARM::VST2d8wb_register;
1579 case ARM::VST2d16wb_fixed: return ARM::VST2d16wb_register;
1580 case ARM::VST2d32wb_fixed: return ARM::VST2d32wb_register;
1581 case ARM::VST2q8PseudoWB_fixed: return ARM::VST2q8PseudoWB_register;
1582 case ARM::VST2q16PseudoWB_fixed: return ARM::VST2q16PseudoWB_register;
1583 case ARM::VST2q32PseudoWB_fixed: return ARM::VST2q32PseudoWB_register;
1585 case ARM::VLD2DUPd8wb_fixed: return ARM::VLD2DUPd8wb_register;
1586 case ARM::VLD2DUPd16wb_fixed: return ARM::VLD2DUPd16wb_register;
1587 case ARM::VLD2DUPd32wb_fixed: return ARM::VLD2DUPd32wb_register;
1589 return Opc; // If not one we handle, return it unchanged.
1592 SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
1593 const uint16_t *DOpcodes,
1594 const uint16_t *QOpcodes0,
1595 const uint16_t *QOpcodes1) {
1596 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
1597 DebugLoc dl = N->getDebugLoc();
1599 SDValue MemAddr, Align;
1600 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1601 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1604 SDValue Chain = N->getOperand(0);
1605 EVT VT = N->getValueType(0);
1606 bool is64BitVector = VT.is64BitVector();
1607 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1609 unsigned OpcodeIndex;
1610 switch (VT.getSimpleVT().SimpleTy) {
1611 default: llvm_unreachable("unhandled vld type");
1612 // Double-register operations:
1613 case MVT::v8i8: OpcodeIndex = 0; break;
1614 case MVT::v4i16: OpcodeIndex = 1; break;
1616 case MVT::v2i32: OpcodeIndex = 2; break;
1617 case MVT::v1i64: OpcodeIndex = 3; break;
1618 // Quad-register operations:
1619 case MVT::v16i8: OpcodeIndex = 0; break;
1620 case MVT::v8i16: OpcodeIndex = 1; break;
1622 case MVT::v4i32: OpcodeIndex = 2; break;
1623 case MVT::v2i64: OpcodeIndex = 3;
1624 assert(NumVecs == 1 && "v2i64 type only supported for VLD1");
1632 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1635 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
1637 std::vector<EVT> ResTys;
1638 ResTys.push_back(ResTy);
1640 ResTys.push_back(MVT::i32);
1641 ResTys.push_back(MVT::Other);
1643 SDValue Pred = getAL(CurDAG);
1644 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1646 SmallVector<SDValue, 7> Ops;
1648 // Double registers and VLD1/VLD2 quad registers are directly supported.
1649 if (is64BitVector || NumVecs <= 2) {
1650 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1651 QOpcodes0[OpcodeIndex]);
1652 Ops.push_back(MemAddr);
1653 Ops.push_back(Align);
1655 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1656 // FIXME: VLD1/VLD2 fixed increment doesn't need Reg0. Remove the reg0
1657 // case entirely when the rest are updated to that form, too.
1658 if ((NumVecs == 1 || NumVecs == 2) && !isa<ConstantSDNode>(Inc.getNode()))
1659 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1660 // We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so
1661 // check for that explicitly too. Horribly hacky, but temporary.
1662 if ((NumVecs != 1 && NumVecs != 2 && Opc != ARM::VLD1q64wb_fixed) ||
1663 !isa<ConstantSDNode>(Inc.getNode()))
1664 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1666 Ops.push_back(Pred);
1667 Ops.push_back(Reg0);
1668 Ops.push_back(Chain);
1669 VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
1672 // Otherwise, quad registers are loaded with two separate instructions,
1673 // where one loads the even registers and the other loads the odd registers.
1674 EVT AddrTy = MemAddr.getValueType();
1676 // Load the even subregs. This is always an updating load, so that it
1677 // provides the address to the second load for the odd subregs.
1679 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
1680 const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
1681 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1682 ResTy, AddrTy, MVT::Other, OpsA, 7);
1683 Chain = SDValue(VLdA, 2);
1685 // Load the odd subregs.
1686 Ops.push_back(SDValue(VLdA, 1));
1687 Ops.push_back(Align);
1689 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1690 assert(isa<ConstantSDNode>(Inc.getNode()) &&
1691 "only constant post-increment update allowed for VLD3/4");
1693 Ops.push_back(Reg0);
1695 Ops.push_back(SDValue(VLdA, 0));
1696 Ops.push_back(Pred);
1697 Ops.push_back(Reg0);
1698 Ops.push_back(Chain);
1699 VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
1700 Ops.data(), Ops.size());
1703 // Transfer memoperands.
1704 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1705 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1706 cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
1711 // Extract out the subregisters.
1712 SDValue SuperReg = SDValue(VLd, 0);
1713 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
1714 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
1715 unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
1716 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1717 ReplaceUses(SDValue(N, Vec),
1718 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
1719 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
1721 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
1725 SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
1726 const uint16_t *DOpcodes,
1727 const uint16_t *QOpcodes0,
1728 const uint16_t *QOpcodes1) {
1729 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
1730 DebugLoc dl = N->getDebugLoc();
1732 SDValue MemAddr, Align;
1733 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1734 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
1735 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1738 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1739 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1741 SDValue Chain = N->getOperand(0);
1742 EVT VT = N->getOperand(Vec0Idx).getValueType();
1743 bool is64BitVector = VT.is64BitVector();
1744 Align = GetVLDSTAlign(Align, NumVecs, is64BitVector);
1746 unsigned OpcodeIndex;
1747 switch (VT.getSimpleVT().SimpleTy) {
1748 default: llvm_unreachable("unhandled vst type");
1749 // Double-register operations:
1750 case MVT::v8i8: OpcodeIndex = 0; break;
1751 case MVT::v4i16: OpcodeIndex = 1; break;
1753 case MVT::v2i32: OpcodeIndex = 2; break;
1754 case MVT::v1i64: OpcodeIndex = 3; break;
1755 // Quad-register operations:
1756 case MVT::v16i8: OpcodeIndex = 0; break;
1757 case MVT::v8i16: OpcodeIndex = 1; break;
1759 case MVT::v4i32: OpcodeIndex = 2; break;
1760 case MVT::v2i64: OpcodeIndex = 3;
1761 assert(NumVecs == 1 && "v2i64 type only supported for VST1");
1765 std::vector<EVT> ResTys;
1767 ResTys.push_back(MVT::i32);
1768 ResTys.push_back(MVT::Other);
1770 SDValue Pred = getAL(CurDAG);
1771 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1772 SmallVector<SDValue, 7> Ops;
1774 // Double registers and VST1/VST2 quad registers are directly supported.
1775 if (is64BitVector || NumVecs <= 2) {
1778 SrcReg = N->getOperand(Vec0Idx);
1779 } else if (is64BitVector) {
1780 // Form a REG_SEQUENCE to force register allocation.
1781 SDValue V0 = N->getOperand(Vec0Idx + 0);
1782 SDValue V1 = N->getOperand(Vec0Idx + 1);
1784 SrcReg = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
1786 SDValue V2 = N->getOperand(Vec0Idx + 2);
1787 // If it's a vst3, form a quad D-register and leave the last part as
1789 SDValue V3 = (NumVecs == 3)
1790 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
1791 : N->getOperand(Vec0Idx + 3);
1792 SrcReg = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
1795 // Form a QQ register.
1796 SDValue Q0 = N->getOperand(Vec0Idx);
1797 SDValue Q1 = N->getOperand(Vec0Idx + 1);
1798 SrcReg = SDValue(PairQRegs(MVT::v4i64, Q0, Q1), 0);
1801 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1802 QOpcodes0[OpcodeIndex]);
1803 Ops.push_back(MemAddr);
1804 Ops.push_back(Align);
1806 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1807 // FIXME: VST1/VST2 fixed increment doesn't need Reg0. Remove the reg0
1808 // case entirely when the rest are updated to that form, too.
1809 if (NumVecs <= 2 && !isa<ConstantSDNode>(Inc.getNode()))
1810 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1811 // We use a VST1 for v1i64 even if the pseudo says vld2/3/4, so
1812 // check for that explicitly too. Horribly hacky, but temporary.
1813 if ((NumVecs > 2 && Opc != ARM::VST1q64wb_fixed) ||
1814 !isa<ConstantSDNode>(Inc.getNode()))
1815 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1817 Ops.push_back(SrcReg);
1818 Ops.push_back(Pred);
1819 Ops.push_back(Reg0);
1820 Ops.push_back(Chain);
1822 CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
1824 // Transfer memoperands.
1825 cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
1830 // Otherwise, quad registers are stored with two separate instructions,
1831 // where one stores the even registers and the other stores the odd registers.
1833 // Form the QQQQ REG_SEQUENCE.
1834 SDValue V0 = N->getOperand(Vec0Idx + 0);
1835 SDValue V1 = N->getOperand(Vec0Idx + 1);
1836 SDValue V2 = N->getOperand(Vec0Idx + 2);
1837 SDValue V3 = (NumVecs == 3)
1838 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
1839 : N->getOperand(Vec0Idx + 3);
1840 SDValue RegSeq = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
1842 // Store the even D registers. This is always an updating store, so that it
1843 // provides the address to the second store for the odd subregs.
1844 const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain };
1845 SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1846 MemAddr.getValueType(),
1847 MVT::Other, OpsA, 7);
1848 cast<MachineSDNode>(VStA)->setMemRefs(MemOp, MemOp + 1);
1849 Chain = SDValue(VStA, 1);
1851 // Store the odd D registers.
1852 Ops.push_back(SDValue(VStA, 0));
1853 Ops.push_back(Align);
1855 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1856 assert(isa<ConstantSDNode>(Inc.getNode()) &&
1857 "only constant post-increment update allowed for VST3/4");
1859 Ops.push_back(Reg0);
1861 Ops.push_back(RegSeq);
1862 Ops.push_back(Pred);
1863 Ops.push_back(Reg0);
1864 Ops.push_back(Chain);
1865 SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
1866 Ops.data(), Ops.size());
1867 cast<MachineSDNode>(VStB)->setMemRefs(MemOp, MemOp + 1);
1871 SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
1872 bool isUpdating, unsigned NumVecs,
1873 const uint16_t *DOpcodes,
1874 const uint16_t *QOpcodes) {
1875 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
1876 DebugLoc dl = N->getDebugLoc();
1878 SDValue MemAddr, Align;
1879 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1880 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
1881 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1884 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1885 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1887 SDValue Chain = N->getOperand(0);
1889 cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
1890 EVT VT = N->getOperand(Vec0Idx).getValueType();
1891 bool is64BitVector = VT.is64BitVector();
1893 unsigned Alignment = 0;
1895 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1896 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
1897 if (Alignment > NumBytes)
1898 Alignment = NumBytes;
1899 if (Alignment < 8 && Alignment < NumBytes)
1901 // Alignment must be a power of two; make sure of that.
1902 Alignment = (Alignment & -Alignment);
1906 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
1908 unsigned OpcodeIndex;
1909 switch (VT.getSimpleVT().SimpleTy) {
1910 default: llvm_unreachable("unhandled vld/vst lane type");
1911 // Double-register operations:
1912 case MVT::v8i8: OpcodeIndex = 0; break;
1913 case MVT::v4i16: OpcodeIndex = 1; break;
1915 case MVT::v2i32: OpcodeIndex = 2; break;
1916 // Quad-register operations:
1917 case MVT::v8i16: OpcodeIndex = 0; break;
1919 case MVT::v4i32: OpcodeIndex = 1; break;
1922 std::vector<EVT> ResTys;
1924 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1927 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(),
1928 MVT::i64, ResTyElts));
1931 ResTys.push_back(MVT::i32);
1932 ResTys.push_back(MVT::Other);
1934 SDValue Pred = getAL(CurDAG);
1935 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1937 SmallVector<SDValue, 8> Ops;
1938 Ops.push_back(MemAddr);
1939 Ops.push_back(Align);
1941 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1942 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1946 SDValue V0 = N->getOperand(Vec0Idx + 0);
1947 SDValue V1 = N->getOperand(Vec0Idx + 1);
1950 SuperReg = SDValue(PairDRegs(MVT::v2i64, V0, V1), 0);
1952 SuperReg = SDValue(PairQRegs(MVT::v4i64, V0, V1), 0);
1954 SDValue V2 = N->getOperand(Vec0Idx + 2);
1955 SDValue V3 = (NumVecs == 3)
1956 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
1957 : N->getOperand(Vec0Idx + 3);
1959 SuperReg = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
1961 SuperReg = SDValue(QuadQRegs(MVT::v8i64, V0, V1, V2, V3), 0);
1963 Ops.push_back(SuperReg);
1964 Ops.push_back(getI32Imm(Lane));
1965 Ops.push_back(Pred);
1966 Ops.push_back(Reg0);
1967 Ops.push_back(Chain);
1969 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1970 QOpcodes[OpcodeIndex]);
1971 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys,
1972 Ops.data(), Ops.size());
1973 cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1);
1977 // Extract the subregisters.
1978 SuperReg = SDValue(VLdLn, 0);
1979 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
1980 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
1981 unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
1982 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1983 ReplaceUses(SDValue(N, Vec),
1984 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
1985 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
1987 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
1991 SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
1993 const uint16_t *Opcodes) {
1994 assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
1995 DebugLoc dl = N->getDebugLoc();
1997 SDValue MemAddr, Align;
1998 if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
2001 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2002 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2004 SDValue Chain = N->getOperand(0);
2005 EVT VT = N->getValueType(0);
2007 unsigned Alignment = 0;
2009 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2010 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
2011 if (Alignment > NumBytes)
2012 Alignment = NumBytes;
2013 if (Alignment < 8 && Alignment < NumBytes)
2015 // Alignment must be a power of two; make sure of that.
2016 Alignment = (Alignment & -Alignment);
2020 Align = CurDAG->getTargetConstant(Alignment, MVT::i32);
2022 unsigned OpcodeIndex;
2023 switch (VT.getSimpleVT().SimpleTy) {
2024 default: llvm_unreachable("unhandled vld-dup type");
2025 case MVT::v8i8: OpcodeIndex = 0; break;
2026 case MVT::v4i16: OpcodeIndex = 1; break;
2028 case MVT::v2i32: OpcodeIndex = 2; break;
2031 SDValue Pred = getAL(CurDAG);
2032 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2034 unsigned Opc = Opcodes[OpcodeIndex];
2035 SmallVector<SDValue, 6> Ops;
2036 Ops.push_back(MemAddr);
2037 Ops.push_back(Align);
2039 // fixed-stride update instructions don't have an explicit writeback
2040 // operand. It's implicit in the opcode itself.
2041 SDValue Inc = N->getOperand(2);
2042 if (!isa<ConstantSDNode>(Inc.getNode()))
2044 // FIXME: VLD3 and VLD4 haven't been updated to that form yet.
2045 else if (NumVecs > 2)
2046 Ops.push_back(Reg0);
2048 Ops.push_back(Pred);
2049 Ops.push_back(Reg0);
2050 Ops.push_back(Chain);
2052 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2053 std::vector<EVT> ResTys;
2054 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,ResTyElts));
2056 ResTys.push_back(MVT::i32);
2057 ResTys.push_back(MVT::Other);
2059 CurDAG->getMachineNode(Opc, dl, ResTys, Ops.data(), Ops.size());
2060 cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1);
2061 SuperReg = SDValue(VLdDup, 0);
2063 // Extract the subregisters.
2064 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
2065 unsigned SubIdx = ARM::dsub_0;
2066 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2067 ReplaceUses(SDValue(N, Vec),
2068 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
2069 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
2071 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
2075 SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
2077 assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range");
2078 DebugLoc dl = N->getDebugLoc();
2079 EVT VT = N->getValueType(0);
2080 unsigned FirstTblReg = IsExt ? 2 : 1;
2082 // Form a REG_SEQUENCE to force register allocation.
2084 SDValue V0 = N->getOperand(FirstTblReg + 0);
2085 SDValue V1 = N->getOperand(FirstTblReg + 1);
2087 RegSeq = SDValue(PairDRegs(MVT::v16i8, V0, V1), 0);
2089 SDValue V2 = N->getOperand(FirstTblReg + 2);
2090 // If it's a vtbl3, form a quad D-register and leave the last part as
2092 SDValue V3 = (NumVecs == 3)
2093 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2094 : N->getOperand(FirstTblReg + 3);
2095 RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
2098 SmallVector<SDValue, 6> Ops;
2100 Ops.push_back(N->getOperand(1));
2101 Ops.push_back(RegSeq);
2102 Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
2103 Ops.push_back(getAL(CurDAG)); // predicate
2104 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
2105 return CurDAG->getMachineNode(Opc, dl, VT, Ops.data(), Ops.size());
2108 SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
2110 if (!Subtarget->hasV6T2Ops())
2113 unsigned Opc = isSigned ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
2114 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
2117 // For unsigned extracts, check for a shift right and mask
2118 unsigned And_imm = 0;
2119 if (N->getOpcode() == ISD::AND) {
2120 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) {
2122 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
2123 if (And_imm & (And_imm + 1))
2126 unsigned Srl_imm = 0;
2127 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
2129 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2131 // Note: The width operand is encoded as width-1.
2132 unsigned Width = CountTrailingOnes_32(And_imm) - 1;
2133 unsigned LSB = Srl_imm;
2134 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2135 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2136 CurDAG->getTargetConstant(LSB, MVT::i32),
2137 CurDAG->getTargetConstant(Width, MVT::i32),
2138 getAL(CurDAG), Reg0 };
2139 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2145 // Otherwise, we're looking for a shift of a shift
2146 unsigned Shl_imm = 0;
2147 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
2148 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
2149 unsigned Srl_imm = 0;
2150 if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
2151 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2152 // Note: The width operand is encoded as width-1.
2153 unsigned Width = 32 - Srl_imm - 1;
2154 int LSB = Srl_imm - Shl_imm;
2157 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2158 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2159 CurDAG->getTargetConstant(LSB, MVT::i32),
2160 CurDAG->getTargetConstant(Width, MVT::i32),
2161 getAL(CurDAG), Reg0 };
2162 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2168 SDNode *ARMDAGToDAGISel::
2169 SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2170 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2173 if (SelectT2ShifterOperandReg(TrueVal, CPTmp0, CPTmp1)) {
2174 unsigned SOVal = cast<ConstantSDNode>(CPTmp1)->getZExtValue();
2175 unsigned SOShOp = ARM_AM::getSORegShOp(SOVal);
2178 case ARM_AM::lsl: Opc = ARM::t2MOVCClsl; break;
2179 case ARM_AM::lsr: Opc = ARM::t2MOVCClsr; break;
2180 case ARM_AM::asr: Opc = ARM::t2MOVCCasr; break;
2181 case ARM_AM::ror: Opc = ARM::t2MOVCCror; break;
2183 llvm_unreachable("Unknown so_reg opcode!");
2186 CurDAG->getTargetConstant(ARM_AM::getSORegOffset(SOVal), MVT::i32);
2187 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2188 SDValue Ops[] = { FalseVal, CPTmp0, SOShImm, CC, CCR, InFlag };
2189 return CurDAG->SelectNodeTo(N, Opc, MVT::i32,Ops, 6);
2194 SDNode *ARMDAGToDAGISel::
2195 SelectARMCMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2196 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2200 if (SelectImmShifterOperand(TrueVal, CPTmp0, CPTmp2)) {
2201 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2202 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp2, CC, CCR, InFlag };
2203 return CurDAG->SelectNodeTo(N, ARM::MOVCCsi, MVT::i32, Ops, 6);
2206 if (SelectRegShifterOperand(TrueVal, CPTmp0, CPTmp1, CPTmp2)) {
2207 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2208 SDValue Ops[] = { FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, InFlag };
2209 return CurDAG->SelectNodeTo(N, ARM::MOVCCsr, MVT::i32, Ops, 7);
2214 SDNode *ARMDAGToDAGISel::
2215 SelectT2CMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2216 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2217 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
2222 unsigned TrueImm = T->getZExtValue();
2223 if (is_t2_so_imm(TrueImm)) {
2224 Opc = ARM::t2MOVCCi;
2225 } else if (TrueImm <= 0xffff) {
2226 Opc = ARM::t2MOVCCi16;
2227 } else if (is_t2_so_imm_not(TrueImm)) {
2229 Opc = ARM::t2MVNCCi;
2230 } else if (TrueVal.getNode()->hasOneUse() && Subtarget->hasV6T2Ops()) {
2232 Opc = ARM::t2MOVCCi32imm;
2236 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2237 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2238 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
2239 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2245 SDNode *ARMDAGToDAGISel::
2246 SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
2247 ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag) {
2248 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
2253 unsigned TrueImm = T->getZExtValue();
2254 bool isSoImm = is_so_imm(TrueImm);
2257 } else if (Subtarget->hasV6T2Ops() && TrueImm <= 0xffff) {
2258 Opc = ARM::MOVCCi16;
2259 } else if (is_so_imm_not(TrueImm)) {
2262 } else if (TrueVal.getNode()->hasOneUse() &&
2263 (Subtarget->hasV6T2Ops() || ARM_AM::isSOImmTwoPartVal(TrueImm))) {
2265 Opc = ARM::MOVCCi32imm;
2269 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2270 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2271 SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
2272 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2278 SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) {
2279 EVT VT = N->getValueType(0);
2280 SDValue FalseVal = N->getOperand(0);
2281 SDValue TrueVal = N->getOperand(1);
2282 SDValue CC = N->getOperand(2);
2283 SDValue CCR = N->getOperand(3);
2284 SDValue InFlag = N->getOperand(4);
2285 assert(CC.getOpcode() == ISD::Constant);
2286 assert(CCR.getOpcode() == ISD::Register);
2287 ARMCC::CondCodes CCVal =
2288 (ARMCC::CondCodes)cast<ConstantSDNode>(CC)->getZExtValue();
2290 if (!Subtarget->isThumb1Only() && VT == MVT::i32) {
2291 // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
2292 // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
2293 // Pattern complexity = 18 cost = 1 size = 0
2294 if (Subtarget->isThumb()) {
2295 SDNode *Res = SelectT2CMOVShiftOp(N, FalseVal, TrueVal,
2296 CCVal, CCR, InFlag);
2298 Res = SelectT2CMOVShiftOp(N, TrueVal, FalseVal,
2299 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2303 SDNode *Res = SelectARMCMOVShiftOp(N, FalseVal, TrueVal,
2304 CCVal, CCR, InFlag);
2306 Res = SelectARMCMOVShiftOp(N, TrueVal, FalseVal,
2307 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2312 // Pattern: (ARMcmov:i32 GPR:i32:$false,
2313 // (imm:i32)<<P:Pred_so_imm>>:$true,
2315 // Emits: (MOVCCi:i32 GPR:i32:$false,
2316 // (so_imm:i32 (imm:i32):$true), (imm:i32):$cc)
2317 // Pattern complexity = 10 cost = 1 size = 0
2318 if (Subtarget->isThumb()) {
2319 SDNode *Res = SelectT2CMOVImmOp(N, FalseVal, TrueVal,
2320 CCVal, CCR, InFlag);
2322 Res = SelectT2CMOVImmOp(N, TrueVal, FalseVal,
2323 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2327 SDNode *Res = SelectARMCMOVImmOp(N, FalseVal, TrueVal,
2328 CCVal, CCR, InFlag);
2330 Res = SelectARMCMOVImmOp(N, TrueVal, FalseVal,
2331 ARMCC::getOppositeCondition(CCVal), CCR, InFlag);
2337 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2338 // Emits: (MOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2339 // Pattern complexity = 6 cost = 1 size = 0
2341 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2342 // Emits: (tMOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
2343 // Pattern complexity = 6 cost = 11 size = 0
2345 // Also VMOVScc and VMOVDcc.
2346 SDValue Tmp2 = CurDAG->getTargetConstant(CCVal, MVT::i32);
2347 SDValue Ops[] = { FalseVal, TrueVal, Tmp2, CCR, InFlag };
2349 switch (VT.getSimpleVT().SimpleTy) {
2350 default: llvm_unreachable("Illegal conditional move type!");
2352 Opc = Subtarget->isThumb()
2353 ? (Subtarget->hasThumb2() ? ARM::t2MOVCCr : ARM::tMOVCCr_pseudo)
2363 return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5);
2366 SDNode *ARMDAGToDAGISel::SelectConditionalOp(SDNode *N) {
2367 SDValue FalseVal = N->getOperand(0);
2368 SDValue TrueVal = N->getOperand(1);
2369 ARMCC::CondCodes CCVal =
2370 (ARMCC::CondCodes)cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
2371 SDValue CCR = N->getOperand(3);
2372 assert(CCR.getOpcode() == ISD::Register);
2373 SDValue InFlag = N->getOperand(4);
2374 SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32);
2375 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2377 if (Subtarget->isThumb()) {
2380 if (SelectT2ShifterOperandReg(TrueVal, CPTmp0, CPTmp1)) {
2382 switch (N->getOpcode()) {
2383 default: llvm_unreachable("Unexpected node");
2384 case ARMISD::CAND: Opc = ARM::t2ANDCCrs; break;
2385 case ARMISD::COR: Opc = ARM::t2ORRCCrs; break;
2386 case ARMISD::CXOR: Opc = ARM::t2EORCCrs; break;
2389 FalseVal, FalseVal, CPTmp0, CPTmp1, CC, CCR, Reg0, InFlag
2391 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 8);
2394 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
2396 unsigned TrueImm = T->getZExtValue();
2397 if (is_t2_so_imm(TrueImm)) {
2399 switch (N->getOpcode()) {
2400 default: llvm_unreachable("Unexpected node");
2401 case ARMISD::CAND: Opc = ARM::t2ANDCCri; break;
2402 case ARMISD::COR: Opc = ARM::t2ORRCCri; break;
2403 case ARMISD::CXOR: Opc = ARM::t2EORCCri; break;
2405 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2406 SDValue Ops[] = { FalseVal, FalseVal, True, CC, CCR, Reg0, InFlag };
2407 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
2412 switch (N->getOpcode()) {
2413 default: llvm_unreachable("Unexpected node");
2414 case ARMISD::CAND: Opc = ARM::t2ANDCCrr; break;
2415 case ARMISD::COR: Opc = ARM::t2ORRCCrr; break;
2416 case ARMISD::CXOR: Opc = ARM::t2EORCCrr; break;
2418 SDValue Ops[] = { FalseVal, FalseVal, TrueVal, CC, CCR, Reg0, InFlag };
2419 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
2425 if (SelectImmShifterOperand(TrueVal, CPTmp0, CPTmp2)) {
2427 switch (N->getOpcode()) {
2428 default: llvm_unreachable("Unexpected node");
2429 case ARMISD::CAND: Opc = ARM::ANDCCrsi; break;
2430 case ARMISD::COR: Opc = ARM::ORRCCrsi; break;
2431 case ARMISD::CXOR: Opc = ARM::EORCCrsi; break;
2434 FalseVal, FalseVal, CPTmp0, CPTmp2, CC, CCR, Reg0, InFlag
2436 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 8);
2439 if (SelectRegShifterOperand(TrueVal, CPTmp0, CPTmp1, CPTmp2)) {
2441 switch (N->getOpcode()) {
2442 default: llvm_unreachable("Unexpected node");
2443 case ARMISD::CAND: Opc = ARM::ANDCCrsr; break;
2444 case ARMISD::COR: Opc = ARM::ORRCCrsr; break;
2445 case ARMISD::CXOR: Opc = ARM::EORCCrsr; break;
2448 FalseVal, FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, Reg0, InFlag
2450 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 9);
2453 ConstantSDNode *T = dyn_cast<ConstantSDNode>(TrueVal);
2455 unsigned TrueImm = T->getZExtValue();
2456 if (is_so_imm(TrueImm)) {
2458 switch (N->getOpcode()) {
2459 default: llvm_unreachable("Unexpected node");
2460 case ARMISD::CAND: Opc = ARM::ANDCCri; break;
2461 case ARMISD::COR: Opc = ARM::ORRCCri; break;
2462 case ARMISD::CXOR: Opc = ARM::EORCCri; break;
2464 SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32);
2465 SDValue Ops[] = { FalseVal, FalseVal, True, CC, CCR, Reg0, InFlag };
2466 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
2471 switch (N->getOpcode()) {
2472 default: llvm_unreachable("Unexpected node");
2473 case ARMISD::CAND: Opc = ARM::ANDCCrr; break;
2474 case ARMISD::COR: Opc = ARM::ORRCCrr; break;
2475 case ARMISD::CXOR: Opc = ARM::EORCCrr; break;
2477 SDValue Ops[] = { FalseVal, FalseVal, TrueVal, CC, CCR, Reg0, InFlag };
2478 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7);
2481 /// Target-specific DAG combining for ISD::XOR.
2482 /// Target-independent combining lowers SELECT_CC nodes of the form
2483 /// select_cc setg[ge] X, 0, X, -X
2484 /// select_cc setgt X, -1, X, -X
2485 /// select_cc setl[te] X, 0, -X, X
2486 /// select_cc setlt X, 1, -X, X
2487 /// which represent Integer ABS into:
2488 /// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
2489 /// ARM instruction selection detects the latter and matches it to
2490 /// ARM::ABS or ARM::t2ABS machine node.
2491 SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){
2492 SDValue XORSrc0 = N->getOperand(0);
2493 SDValue XORSrc1 = N->getOperand(1);
2494 EVT VT = N->getValueType(0);
2496 if (Subtarget->isThumb1Only())
2499 if (XORSrc0.getOpcode() != ISD::ADD || XORSrc1.getOpcode() != ISD::SRA)
2502 SDValue ADDSrc0 = XORSrc0.getOperand(0);
2503 SDValue ADDSrc1 = XORSrc0.getOperand(1);
2504 SDValue SRASrc0 = XORSrc1.getOperand(0);
2505 SDValue SRASrc1 = XORSrc1.getOperand(1);
2506 ConstantSDNode *SRAConstant = dyn_cast<ConstantSDNode>(SRASrc1);
2507 EVT XType = SRASrc0.getValueType();
2508 unsigned Size = XType.getSizeInBits() - 1;
2510 if (ADDSrc1 == XORSrc1 && ADDSrc0 == SRASrc0 &&
2511 XType.isInteger() && SRAConstant != NULL &&
2512 Size == SRAConstant->getZExtValue()) {
2513 unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS;
2514 return CurDAG->SelectNodeTo(N, Opcode, VT, ADDSrc0);
2520 SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
2521 // The only time a CONCAT_VECTORS operation can have legal types is when
2522 // two 64-bit vectors are concatenated to a 128-bit vector.
2523 EVT VT = N->getValueType(0);
2524 if (!VT.is128BitVector() || N->getNumOperands() != 2)
2525 llvm_unreachable("unexpected CONCAT_VECTORS");
2526 return PairDRegs(VT, N->getOperand(0), N->getOperand(1));
2529 SDNode *ARMDAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
2530 SmallVector<SDValue, 6> Ops;
2531 Ops.push_back(Node->getOperand(1)); // Ptr
2532 Ops.push_back(Node->getOperand(2)); // Low part of Val1
2533 Ops.push_back(Node->getOperand(3)); // High part of Val1
2534 if (Opc == ARM::ATOMCMPXCHG6432) {
2535 Ops.push_back(Node->getOperand(4)); // Low part of Val2
2536 Ops.push_back(Node->getOperand(5)); // High part of Val2
2538 Ops.push_back(Node->getOperand(0)); // Chain
2539 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2540 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
2541 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
2542 MVT::i32, MVT::i32, MVT::Other,
2543 Ops.data() ,Ops.size());
2544 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
2548 SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
2549 DebugLoc dl = N->getDebugLoc();
2551 if (N->isMachineOpcode())
2552 return NULL; // Already selected.
2554 switch (N->getOpcode()) {
2557 // Select special operations if XOR node forms integer ABS pattern
2558 SDNode *ResNode = SelectABSOp(N);
2561 // Other cases are autogenerated.
2564 case ISD::Constant: {
2565 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
2567 if (Subtarget->hasThumb2())
2568 // Thumb2-aware targets have the MOVT instruction, so all immediates can
2569 // be done with MOV + MOVT, at worst.
2572 if (Subtarget->isThumb()) {
2573 UseCP = (Val > 255 && // MOV
2574 ~Val > 255 && // MOV + MVN
2575 !ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL
2577 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
2578 ARM_AM::getSOImmVal(~Val) == -1 && // MVN
2579 !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs.
2584 CurDAG->getTargetConstantPool(ConstantInt::get(
2585 Type::getInt32Ty(*CurDAG->getContext()), Val),
2586 TLI.getPointerTy());
2589 if (Subtarget->isThumb1Only()) {
2590 SDValue Pred = getAL(CurDAG);
2591 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2592 SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
2593 ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
2598 CurDAG->getTargetConstant(0, MVT::i32),
2600 CurDAG->getRegister(0, MVT::i32),
2601 CurDAG->getEntryNode()
2603 ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
2606 ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0));
2610 // Other cases are autogenerated.
2613 case ISD::FrameIndex: {
2614 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
2615 int FI = cast<FrameIndexSDNode>(N)->getIndex();
2616 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
2617 if (Subtarget->isThumb1Only()) {
2618 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2619 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2620 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, Ops, 4);
2622 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
2623 ARM::t2ADDri : ARM::ADDri);
2624 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
2625 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2626 CurDAG->getRegister(0, MVT::i32) };
2627 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
2631 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2635 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true))
2639 if (Subtarget->isThumb1Only())
2641 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
2642 unsigned RHSV = C->getZExtValue();
2644 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
2645 unsigned ShImm = Log2_32(RHSV-1);
2648 SDValue V = N->getOperand(0);
2649 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2650 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2651 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2652 if (Subtarget->isThumb()) {
2653 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2654 return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6);
2656 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2657 return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops, 7);
2660 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
2661 unsigned ShImm = Log2_32(RHSV+1);
2664 SDValue V = N->getOperand(0);
2665 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2666 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
2667 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2668 if (Subtarget->isThumb()) {
2669 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2670 return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 6);
2672 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
2673 return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops, 7);
2679 // Check for unsigned bitfield extract
2680 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2683 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
2684 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
2685 // are entirely contributed by c2 and lower 16-bits are entirely contributed
2686 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
2687 // Select it to: "movt x, ((c1 & 0xffff) >> 16)
2688 EVT VT = N->getValueType(0);
2691 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
2693 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
2696 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
2697 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2700 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
2701 SDValue N2 = N0.getOperand(1);
2702 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
2705 unsigned N1CVal = N1C->getZExtValue();
2706 unsigned N2CVal = N2C->getZExtValue();
2707 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
2708 (N1CVal & 0xffffU) == 0xffffU &&
2709 (N2CVal & 0xffffU) == 0x0U) {
2710 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
2712 SDValue Ops[] = { N0.getOperand(0), Imm16,
2713 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2714 return CurDAG->getMachineNode(Opc, dl, VT, Ops, 4);
2719 case ARMISD::VMOVRRD:
2720 return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32,
2721 N->getOperand(0), getAL(CurDAG),
2722 CurDAG->getRegister(0, MVT::i32));
2723 case ISD::UMUL_LOHI: {
2724 if (Subtarget->isThumb1Only())
2726 if (Subtarget->isThumb()) {
2727 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2728 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2729 CurDAG->getRegister(0, MVT::i32) };
2730 return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32,Ops,4);
2732 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2733 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2734 CurDAG->getRegister(0, MVT::i32) };
2735 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2736 ARM::UMULL : ARM::UMULLv5,
2737 dl, MVT::i32, MVT::i32, Ops, 5);
2740 case ISD::SMUL_LOHI: {
2741 if (Subtarget->isThumb1Only())
2743 if (Subtarget->isThumb()) {
2744 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2745 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
2746 return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32,Ops,4);
2748 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2749 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
2750 CurDAG->getRegister(0, MVT::i32) };
2751 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2752 ARM::SMULL : ARM::SMULLv5,
2753 dl, MVT::i32, MVT::i32, Ops, 5);
2757 SDNode *ResNode = 0;
2758 if (Subtarget->isThumb() && Subtarget->hasThumb2())
2759 ResNode = SelectT2IndexedLoad(N);
2761 ResNode = SelectARMIndexedLoad(N);
2764 // Other cases are autogenerated.
2767 case ARMISD::BRCOND: {
2768 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2769 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2770 // Pattern complexity = 6 cost = 1 size = 0
2772 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2773 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
2774 // Pattern complexity = 6 cost = 1 size = 0
2776 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2777 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2778 // Pattern complexity = 6 cost = 1 size = 0
2780 unsigned Opc = Subtarget->isThumb() ?
2781 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
2782 SDValue Chain = N->getOperand(0);
2783 SDValue N1 = N->getOperand(1);
2784 SDValue N2 = N->getOperand(2);
2785 SDValue N3 = N->getOperand(3);
2786 SDValue InFlag = N->getOperand(4);
2787 assert(N1.getOpcode() == ISD::BasicBlock);
2788 assert(N2.getOpcode() == ISD::Constant);
2789 assert(N3.getOpcode() == ISD::Register);
2791 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
2792 cast<ConstantSDNode>(N2)->getZExtValue()),
2794 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
2795 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
2797 Chain = SDValue(ResNode, 0);
2798 if (N->getNumValues() == 2) {
2799 InFlag = SDValue(ResNode, 1);
2800 ReplaceUses(SDValue(N, 1), InFlag);
2802 ReplaceUses(SDValue(N, 0),
2803 SDValue(Chain.getNode(), Chain.getResNo()));
2807 return SelectCMOVOp(N);
2811 return SelectConditionalOp(N);
2812 case ARMISD::VZIP: {
2814 EVT VT = N->getValueType(0);
2815 switch (VT.getSimpleVT().SimpleTy) {
2816 default: return NULL;
2817 case MVT::v8i8: Opc = ARM::VZIPd8; break;
2818 case MVT::v4i16: Opc = ARM::VZIPd16; break;
2820 // vzip.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2821 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2822 case MVT::v16i8: Opc = ARM::VZIPq8; break;
2823 case MVT::v8i16: Opc = ARM::VZIPq16; break;
2825 case MVT::v4i32: Opc = ARM::VZIPq32; break;
2827 SDValue Pred = getAL(CurDAG);
2828 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2829 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2830 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2832 case ARMISD::VUZP: {
2834 EVT VT = N->getValueType(0);
2835 switch (VT.getSimpleVT().SimpleTy) {
2836 default: return NULL;
2837 case MVT::v8i8: Opc = ARM::VUZPd8; break;
2838 case MVT::v4i16: Opc = ARM::VUZPd16; break;
2840 // vuzp.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2841 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2842 case MVT::v16i8: Opc = ARM::VUZPq8; break;
2843 case MVT::v8i16: Opc = ARM::VUZPq16; break;
2845 case MVT::v4i32: Opc = ARM::VUZPq32; break;
2847 SDValue Pred = getAL(CurDAG);
2848 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2849 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2850 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2852 case ARMISD::VTRN: {
2854 EVT VT = N->getValueType(0);
2855 switch (VT.getSimpleVT().SimpleTy) {
2856 default: return NULL;
2857 case MVT::v8i8: Opc = ARM::VTRNd8; break;
2858 case MVT::v4i16: Opc = ARM::VTRNd16; break;
2860 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2861 case MVT::v16i8: Opc = ARM::VTRNq8; break;
2862 case MVT::v8i16: Opc = ARM::VTRNq16; break;
2864 case MVT::v4i32: Opc = ARM::VTRNq32; break;
2866 SDValue Pred = getAL(CurDAG);
2867 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2868 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2869 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops, 4);
2871 case ARMISD::BUILD_VECTOR: {
2872 EVT VecVT = N->getValueType(0);
2873 EVT EltVT = VecVT.getVectorElementType();
2874 unsigned NumElts = VecVT.getVectorNumElements();
2875 if (EltVT == MVT::f64) {
2876 assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
2877 return PairDRegs(VecVT, N->getOperand(0), N->getOperand(1));
2879 assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
2881 return PairSRegs(VecVT, N->getOperand(0), N->getOperand(1));
2882 assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
2883 return QuadSRegs(VecVT, N->getOperand(0), N->getOperand(1),
2884 N->getOperand(2), N->getOperand(3));
2887 case ARMISD::VLD2DUP: {
2888 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
2890 return SelectVLDDup(N, false, 2, Opcodes);
2893 case ARMISD::VLD3DUP: {
2894 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo,
2895 ARM::VLD3DUPd16Pseudo,
2896 ARM::VLD3DUPd32Pseudo };
2897 return SelectVLDDup(N, false, 3, Opcodes);
2900 case ARMISD::VLD4DUP: {
2901 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo,
2902 ARM::VLD4DUPd16Pseudo,
2903 ARM::VLD4DUPd32Pseudo };
2904 return SelectVLDDup(N, false, 4, Opcodes);
2907 case ARMISD::VLD2DUP_UPD: {
2908 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8wb_fixed,
2909 ARM::VLD2DUPd16wb_fixed,
2910 ARM::VLD2DUPd32wb_fixed };
2911 return SelectVLDDup(N, true, 2, Opcodes);
2914 case ARMISD::VLD3DUP_UPD: {
2915 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD,
2916 ARM::VLD3DUPd16Pseudo_UPD,
2917 ARM::VLD3DUPd32Pseudo_UPD };
2918 return SelectVLDDup(N, true, 3, Opcodes);
2921 case ARMISD::VLD4DUP_UPD: {
2922 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD,
2923 ARM::VLD4DUPd16Pseudo_UPD,
2924 ARM::VLD4DUPd32Pseudo_UPD };
2925 return SelectVLDDup(N, true, 4, Opcodes);
2928 case ARMISD::VLD1_UPD: {
2929 static const uint16_t DOpcodes[] = { ARM::VLD1d8wb_fixed,
2930 ARM::VLD1d16wb_fixed,
2931 ARM::VLD1d32wb_fixed,
2932 ARM::VLD1d64wb_fixed };
2933 static const uint16_t QOpcodes[] = { ARM::VLD1q8wb_fixed,
2934 ARM::VLD1q16wb_fixed,
2935 ARM::VLD1q32wb_fixed,
2936 ARM::VLD1q64wb_fixed };
2937 return SelectVLD(N, true, 1, DOpcodes, QOpcodes, 0);
2940 case ARMISD::VLD2_UPD: {
2941 static const uint16_t DOpcodes[] = { ARM::VLD2d8wb_fixed,
2942 ARM::VLD2d16wb_fixed,
2943 ARM::VLD2d32wb_fixed,
2944 ARM::VLD1q64wb_fixed};
2945 static const uint16_t QOpcodes[] = { ARM::VLD2q8PseudoWB_fixed,
2946 ARM::VLD2q16PseudoWB_fixed,
2947 ARM::VLD2q32PseudoWB_fixed };
2948 return SelectVLD(N, true, 2, DOpcodes, QOpcodes, 0);
2951 case ARMISD::VLD3_UPD: {
2952 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo_UPD,
2953 ARM::VLD3d16Pseudo_UPD,
2954 ARM::VLD3d32Pseudo_UPD,
2955 ARM::VLD1q64wb_fixed};
2956 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
2957 ARM::VLD3q16Pseudo_UPD,
2958 ARM::VLD3q32Pseudo_UPD };
2959 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
2960 ARM::VLD3q16oddPseudo_UPD,
2961 ARM::VLD3q32oddPseudo_UPD };
2962 return SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2965 case ARMISD::VLD4_UPD: {
2966 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo_UPD,
2967 ARM::VLD4d16Pseudo_UPD,
2968 ARM::VLD4d32Pseudo_UPD,
2969 ARM::VLD1q64wb_fixed};
2970 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
2971 ARM::VLD4q16Pseudo_UPD,
2972 ARM::VLD4q32Pseudo_UPD };
2973 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
2974 ARM::VLD4q16oddPseudo_UPD,
2975 ARM::VLD4q32oddPseudo_UPD };
2976 return SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2979 case ARMISD::VLD2LN_UPD: {
2980 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD,
2981 ARM::VLD2LNd16Pseudo_UPD,
2982 ARM::VLD2LNd32Pseudo_UPD };
2983 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
2984 ARM::VLD2LNq32Pseudo_UPD };
2985 return SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
2988 case ARMISD::VLD3LN_UPD: {
2989 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD,
2990 ARM::VLD3LNd16Pseudo_UPD,
2991 ARM::VLD3LNd32Pseudo_UPD };
2992 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
2993 ARM::VLD3LNq32Pseudo_UPD };
2994 return SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
2997 case ARMISD::VLD4LN_UPD: {
2998 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD,
2999 ARM::VLD4LNd16Pseudo_UPD,
3000 ARM::VLD4LNd32Pseudo_UPD };
3001 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
3002 ARM::VLD4LNq32Pseudo_UPD };
3003 return SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
3006 case ARMISD::VST1_UPD: {
3007 static const uint16_t DOpcodes[] = { ARM::VST1d8wb_fixed,
3008 ARM::VST1d16wb_fixed,
3009 ARM::VST1d32wb_fixed,
3010 ARM::VST1d64wb_fixed };
3011 static const uint16_t QOpcodes[] = { ARM::VST1q8wb_fixed,
3012 ARM::VST1q16wb_fixed,
3013 ARM::VST1q32wb_fixed,
3014 ARM::VST1q64wb_fixed };
3015 return SelectVST(N, true, 1, DOpcodes, QOpcodes, 0);
3018 case ARMISD::VST2_UPD: {
3019 static const uint16_t DOpcodes[] = { ARM::VST2d8wb_fixed,
3020 ARM::VST2d16wb_fixed,
3021 ARM::VST2d32wb_fixed,
3022 ARM::VST1q64wb_fixed};
3023 static const uint16_t QOpcodes[] = { ARM::VST2q8PseudoWB_fixed,
3024 ARM::VST2q16PseudoWB_fixed,
3025 ARM::VST2q32PseudoWB_fixed };
3026 return SelectVST(N, true, 2, DOpcodes, QOpcodes, 0);
3029 case ARMISD::VST3_UPD: {
3030 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo_UPD,
3031 ARM::VST3d16Pseudo_UPD,
3032 ARM::VST3d32Pseudo_UPD,
3033 ARM::VST1d64TPseudoWB_fixed};
3034 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
3035 ARM::VST3q16Pseudo_UPD,
3036 ARM::VST3q32Pseudo_UPD };
3037 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
3038 ARM::VST3q16oddPseudo_UPD,
3039 ARM::VST3q32oddPseudo_UPD };
3040 return SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
3043 case ARMISD::VST4_UPD: {
3044 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo_UPD,
3045 ARM::VST4d16Pseudo_UPD,
3046 ARM::VST4d32Pseudo_UPD,
3047 ARM::VST1d64QPseudoWB_fixed};
3048 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
3049 ARM::VST4q16Pseudo_UPD,
3050 ARM::VST4q32Pseudo_UPD };
3051 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
3052 ARM::VST4q16oddPseudo_UPD,
3053 ARM::VST4q32oddPseudo_UPD };
3054 return SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
3057 case ARMISD::VST2LN_UPD: {
3058 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD,
3059 ARM::VST2LNd16Pseudo_UPD,
3060 ARM::VST2LNd32Pseudo_UPD };
3061 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
3062 ARM::VST2LNq32Pseudo_UPD };
3063 return SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
3066 case ARMISD::VST3LN_UPD: {
3067 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD,
3068 ARM::VST3LNd16Pseudo_UPD,
3069 ARM::VST3LNd32Pseudo_UPD };
3070 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
3071 ARM::VST3LNq32Pseudo_UPD };
3072 return SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
3075 case ARMISD::VST4LN_UPD: {
3076 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD,
3077 ARM::VST4LNd16Pseudo_UPD,
3078 ARM::VST4LNd32Pseudo_UPD };
3079 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
3080 ARM::VST4LNq32Pseudo_UPD };
3081 return SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
3084 case ISD::INTRINSIC_VOID:
3085 case ISD::INTRINSIC_W_CHAIN: {
3086 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
3091 case Intrinsic::arm_ldrexd: {
3092 SDValue MemAddr = N->getOperand(2);
3093 DebugLoc dl = N->getDebugLoc();
3094 SDValue Chain = N->getOperand(0);
3096 unsigned NewOpc = ARM::LDREXD;
3097 if (Subtarget->isThumb() && Subtarget->hasThumb2())
3098 NewOpc = ARM::t2LDREXD;
3100 // arm_ldrexd returns a i64 value in {i32, i32}
3101 std::vector<EVT> ResTys;
3102 ResTys.push_back(MVT::i32);
3103 ResTys.push_back(MVT::i32);
3104 ResTys.push_back(MVT::Other);
3106 // place arguments in the right order
3107 SmallVector<SDValue, 7> Ops;
3108 Ops.push_back(MemAddr);
3109 Ops.push_back(getAL(CurDAG));
3110 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3111 Ops.push_back(Chain);
3112 SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops.data(),
3114 // Transfer memoperands.
3115 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3116 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3117 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
3119 // Until there's support for specifing explicit register constraints
3120 // like the use of even/odd register pair, hardcode ldrexd to always
3121 // use the pair [R0, R1] to hold the load result.
3122 Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ARM::R0,
3123 SDValue(Ld, 0), SDValue(0,0));
3124 Chain = CurDAG->getCopyToReg(Chain, dl, ARM::R1,
3125 SDValue(Ld, 1), Chain.getValue(1));
3128 SDValue Glue = Chain.getValue(1);
3129 if (!SDValue(N, 0).use_empty()) {
3130 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
3131 ARM::R0, MVT::i32, Glue);
3132 Glue = Result.getValue(2);
3133 ReplaceUses(SDValue(N, 0), Result);
3135 if (!SDValue(N, 1).use_empty()) {
3136 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
3137 ARM::R1, MVT::i32, Glue);
3138 Glue = Result.getValue(2);
3139 ReplaceUses(SDValue(N, 1), Result);
3142 ReplaceUses(SDValue(N, 2), SDValue(Ld, 2));
3146 case Intrinsic::arm_strexd: {
3147 DebugLoc dl = N->getDebugLoc();
3148 SDValue Chain = N->getOperand(0);
3149 SDValue Val0 = N->getOperand(2);
3150 SDValue Val1 = N->getOperand(3);
3151 SDValue MemAddr = N->getOperand(4);
3153 // Until there's support for specifing explicit register constraints
3154 // like the use of even/odd register pair, hardcode strexd to always
3155 // use the pair [R2, R3] to hold the i64 (i32, i32) value to be stored.
3156 Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ARM::R2, Val0,
3158 Chain = CurDAG->getCopyToReg(Chain, dl, ARM::R3, Val1, Chain.getValue(1));
3160 SDValue Glue = Chain.getValue(1);
3161 Val0 = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
3162 ARM::R2, MVT::i32, Glue);
3163 Glue = Val0.getValue(1);
3164 Val1 = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
3165 ARM::R3, MVT::i32, Glue);
3167 // Store exclusive double return a i32 value which is the return status
3168 // of the issued store.
3169 std::vector<EVT> ResTys;
3170 ResTys.push_back(MVT::i32);
3171 ResTys.push_back(MVT::Other);
3173 // place arguments in the right order
3174 SmallVector<SDValue, 7> Ops;
3175 Ops.push_back(Val0);
3176 Ops.push_back(Val1);
3177 Ops.push_back(MemAddr);
3178 Ops.push_back(getAL(CurDAG));
3179 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3180 Ops.push_back(Chain);
3182 unsigned NewOpc = ARM::STREXD;
3183 if (Subtarget->isThumb() && Subtarget->hasThumb2())
3184 NewOpc = ARM::t2STREXD;
3186 SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops.data(),
3188 // Transfer memoperands.
3189 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3190 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3191 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
3196 case Intrinsic::arm_neon_vld1: {
3197 static const uint16_t DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
3198 ARM::VLD1d32, ARM::VLD1d64 };
3199 static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
3200 ARM::VLD1q32, ARM::VLD1q64};
3201 return SelectVLD(N, false, 1, DOpcodes, QOpcodes, 0);
3204 case Intrinsic::arm_neon_vld2: {
3205 static const uint16_t DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
3206 ARM::VLD2d32, ARM::VLD1q64 };
3207 static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
3208 ARM::VLD2q32Pseudo };
3209 return SelectVLD(N, false, 2, DOpcodes, QOpcodes, 0);
3212 case Intrinsic::arm_neon_vld3: {
3213 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo,
3216 ARM::VLD1d64TPseudo };
3217 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
3218 ARM::VLD3q16Pseudo_UPD,
3219 ARM::VLD3q32Pseudo_UPD };
3220 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo,
3221 ARM::VLD3q16oddPseudo,
3222 ARM::VLD3q32oddPseudo };
3223 return SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3226 case Intrinsic::arm_neon_vld4: {
3227 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo,
3230 ARM::VLD1d64QPseudo };
3231 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
3232 ARM::VLD4q16Pseudo_UPD,
3233 ARM::VLD4q32Pseudo_UPD };
3234 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo,
3235 ARM::VLD4q16oddPseudo,
3236 ARM::VLD4q32oddPseudo };
3237 return SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3240 case Intrinsic::arm_neon_vld2lane: {
3241 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo,
3242 ARM::VLD2LNd16Pseudo,
3243 ARM::VLD2LNd32Pseudo };
3244 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo,
3245 ARM::VLD2LNq32Pseudo };
3246 return SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
3249 case Intrinsic::arm_neon_vld3lane: {
3250 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo,
3251 ARM::VLD3LNd16Pseudo,
3252 ARM::VLD3LNd32Pseudo };
3253 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo,
3254 ARM::VLD3LNq32Pseudo };
3255 return SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
3258 case Intrinsic::arm_neon_vld4lane: {
3259 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo,
3260 ARM::VLD4LNd16Pseudo,
3261 ARM::VLD4LNd32Pseudo };
3262 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo,
3263 ARM::VLD4LNq32Pseudo };
3264 return SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
3267 case Intrinsic::arm_neon_vst1: {
3268 static const uint16_t DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
3269 ARM::VST1d32, ARM::VST1d64 };
3270 static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
3271 ARM::VST1q32, ARM::VST1q64 };
3272 return SelectVST(N, false, 1, DOpcodes, QOpcodes, 0);
3275 case Intrinsic::arm_neon_vst2: {
3276 static const uint16_t DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
3277 ARM::VST2d32, ARM::VST1q64 };
3278 static uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
3279 ARM::VST2q32Pseudo };
3280 return SelectVST(N, false, 2, DOpcodes, QOpcodes, 0);
3283 case Intrinsic::arm_neon_vst3: {
3284 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo,
3287 ARM::VST1d64TPseudo };
3288 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
3289 ARM::VST3q16Pseudo_UPD,
3290 ARM::VST3q32Pseudo_UPD };
3291 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo,
3292 ARM::VST3q16oddPseudo,
3293 ARM::VST3q32oddPseudo };
3294 return SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3297 case Intrinsic::arm_neon_vst4: {
3298 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo,
3301 ARM::VST1d64QPseudo };
3302 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
3303 ARM::VST4q16Pseudo_UPD,
3304 ARM::VST4q32Pseudo_UPD };
3305 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo,
3306 ARM::VST4q16oddPseudo,
3307 ARM::VST4q32oddPseudo };
3308 return SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3311 case Intrinsic::arm_neon_vst2lane: {
3312 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo,
3313 ARM::VST2LNd16Pseudo,
3314 ARM::VST2LNd32Pseudo };
3315 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo,
3316 ARM::VST2LNq32Pseudo };
3317 return SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
3320 case Intrinsic::arm_neon_vst3lane: {
3321 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo,
3322 ARM::VST3LNd16Pseudo,
3323 ARM::VST3LNd32Pseudo };
3324 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo,
3325 ARM::VST3LNq32Pseudo };
3326 return SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
3329 case Intrinsic::arm_neon_vst4lane: {
3330 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo,
3331 ARM::VST4LNd16Pseudo,
3332 ARM::VST4LNd32Pseudo };
3333 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo,
3334 ARM::VST4LNq32Pseudo };
3335 return SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
3341 case ISD::INTRINSIC_WO_CHAIN: {
3342 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3347 case Intrinsic::arm_neon_vtbl2:
3348 return SelectVTBL(N, false, 2, ARM::VTBL2);
3349 case Intrinsic::arm_neon_vtbl3:
3350 return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
3351 case Intrinsic::arm_neon_vtbl4:
3352 return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
3354 case Intrinsic::arm_neon_vtbx2:
3355 return SelectVTBL(N, true, 2, ARM::VTBX2);
3356 case Intrinsic::arm_neon_vtbx3:
3357 return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
3358 case Intrinsic::arm_neon_vtbx4:
3359 return SelectVTBL(N, true, 4, ARM::VTBX4Pseudo);
3364 case ARMISD::VTBL1: {
3365 DebugLoc dl = N->getDebugLoc();
3366 EVT VT = N->getValueType(0);
3367 SmallVector<SDValue, 6> Ops;
3369 Ops.push_back(N->getOperand(0));
3370 Ops.push_back(N->getOperand(1));
3371 Ops.push_back(getAL(CurDAG)); // Predicate
3372 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3373 return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops.data(), Ops.size());
3375 case ARMISD::VTBL2: {
3376 DebugLoc dl = N->getDebugLoc();
3377 EVT VT = N->getValueType(0);
3379 // Form a REG_SEQUENCE to force register allocation.
3380 SDValue V0 = N->getOperand(0);
3381 SDValue V1 = N->getOperand(1);
3382 SDValue RegSeq = SDValue(PairDRegs(MVT::v16i8, V0, V1), 0);
3384 SmallVector<SDValue, 6> Ops;
3385 Ops.push_back(RegSeq);
3386 Ops.push_back(N->getOperand(2));
3387 Ops.push_back(getAL(CurDAG)); // Predicate
3388 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3389 return CurDAG->getMachineNode(ARM::VTBL2, dl, VT,
3390 Ops.data(), Ops.size());
3393 case ISD::CONCAT_VECTORS:
3394 return SelectConcatVector(N);
3396 case ARMISD::ATOMOR64_DAG:
3397 return SelectAtomic64(N, ARM::ATOMOR6432);
3398 case ARMISD::ATOMXOR64_DAG:
3399 return SelectAtomic64(N, ARM::ATOMXOR6432);
3400 case ARMISD::ATOMADD64_DAG:
3401 return SelectAtomic64(N, ARM::ATOMADD6432);
3402 case ARMISD::ATOMSUB64_DAG:
3403 return SelectAtomic64(N, ARM::ATOMSUB6432);
3404 case ARMISD::ATOMNAND64_DAG:
3405 return SelectAtomic64(N, ARM::ATOMNAND6432);
3406 case ARMISD::ATOMAND64_DAG:
3407 return SelectAtomic64(N, ARM::ATOMAND6432);
3408 case ARMISD::ATOMSWAP64_DAG:
3409 return SelectAtomic64(N, ARM::ATOMSWAP6432);
3410 case ARMISD::ATOMCMPXCHG64_DAG:
3411 return SelectAtomic64(N, ARM::ATOMCMPXCHG6432);
3414 return SelectCode(N);
3417 bool ARMDAGToDAGISel::
3418 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
3419 std::vector<SDValue> &OutOps) {
3420 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
3421 // Require the address to be in a register. That is safe for all ARM
3422 // variants and it is hard to do anything much smarter without knowing
3423 // how the operand is used.
3424 OutOps.push_back(Op);
3428 /// createARMISelDag - This pass converts a legalized DAG into a
3429 /// ARM-specific DAG, ready for instruction scheduling.
3431 FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
3432 CodeGenOpt::Level OptLevel) {
3433 return new ARMDAGToDAGISel(TM, OptLevel);