1 //===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM target.
12 //===----------------------------------------------------------------------===//
15 #include "ARMBaseInstrInfo.h"
16 #include "ARMTargetMachine.h"
17 #include "MCTargetDesc/ARMAddressingModes.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/SelectionDAGISel.h"
25 #include "llvm/IR/CallingConv.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DerivedTypes.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/LLVMContext.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Compiler.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Target/TargetLowering.h"
36 #include "llvm/Target/TargetOptions.h"
40 #define DEBUG_TYPE "arm-isel"
43 DisableShifterOp("disable-shifter-op", cl::Hidden,
44 cl::desc("Disable isel of shifter-op"),
48 CheckVMLxHazard("check-vmlx-hazard", cl::Hidden,
49 cl::desc("Check fp vmla / vmls hazard at isel time"),
52 //===--------------------------------------------------------------------===//
53 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
54 /// instructions for SelectionDAG operations.
59 AM2_BASE, // Simple AM2 (+-imm12)
60 AM2_SHOP // Shifter-op AM2
63 class ARMDAGToDAGISel : public SelectionDAGISel {
64 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
65 /// make the right decision when generating code for different targets.
66 const ARMSubtarget *Subtarget;
69 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm, CodeGenOpt::Level OptLevel)
70 : SelectionDAGISel(tm, OptLevel) {}
72 bool runOnMachineFunction(MachineFunction &MF) override {
73 // Reset the subtarget each time through.
74 Subtarget = &MF.getSubtarget<ARMSubtarget>();
75 SelectionDAGISel::runOnMachineFunction(MF);
79 const char *getPassName() const override {
80 return "ARM Instruction Selection";
83 void PreprocessISelDAG() override;
85 /// getI32Imm - Return a target constant of type i32 with the specified
87 inline SDValue getI32Imm(unsigned Imm, SDLoc dl) {
88 return CurDAG->getTargetConstant(Imm, dl, MVT::i32);
91 SDNode *Select(SDNode *N) override;
94 bool hasNoVMLxHazardUse(SDNode *N) const;
95 bool isShifterOpProfitable(const SDValue &Shift,
96 ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt);
97 bool SelectRegShifterOperand(SDValue N, SDValue &A,
98 SDValue &B, SDValue &C,
99 bool CheckProfitability = true);
100 bool SelectImmShifterOperand(SDValue N, SDValue &A,
101 SDValue &B, bool CheckProfitability = true);
102 bool SelectShiftRegShifterOperand(SDValue N, SDValue &A,
103 SDValue &B, SDValue &C) {
104 // Don't apply the profitability check
105 return SelectRegShifterOperand(N, A, B, C, false);
107 bool SelectShiftImmShifterOperand(SDValue N, SDValue &A,
109 // Don't apply the profitability check
110 return SelectImmShifterOperand(N, A, B, false);
113 bool SelectAddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
114 bool SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc);
116 AddrMode2Type SelectAddrMode2Worker(SDValue N, SDValue &Base,
117 SDValue &Offset, SDValue &Opc);
118 bool SelectAddrMode2Base(SDValue N, SDValue &Base, SDValue &Offset,
120 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_BASE;
123 bool SelectAddrMode2ShOp(SDValue N, SDValue &Base, SDValue &Offset,
125 return SelectAddrMode2Worker(N, Base, Offset, Opc) == AM2_SHOP;
128 bool SelectAddrMode2(SDValue N, SDValue &Base, SDValue &Offset,
130 SelectAddrMode2Worker(N, Base, Offset, Opc);
131 // return SelectAddrMode2ShOp(N, Base, Offset, Opc);
132 // This always matches one way or another.
136 bool SelectCMOVPred(SDValue N, SDValue &Pred, SDValue &Reg) {
137 const ConstantSDNode *CN = cast<ConstantSDNode>(N);
138 Pred = CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(N), MVT::i32);
139 Reg = CurDAG->getRegister(ARM::CPSR, MVT::i32);
143 bool SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
144 SDValue &Offset, SDValue &Opc);
145 bool SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
146 SDValue &Offset, SDValue &Opc);
147 bool SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
148 SDValue &Offset, SDValue &Opc);
149 bool SelectAddrOffsetNone(SDValue N, SDValue &Base);
150 bool SelectAddrMode3(SDValue N, SDValue &Base,
151 SDValue &Offset, SDValue &Opc);
152 bool SelectAddrMode3Offset(SDNode *Op, SDValue N,
153 SDValue &Offset, SDValue &Opc);
154 bool SelectAddrMode5(SDValue N, SDValue &Base,
156 bool SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,SDValue &Align);
157 bool SelectAddrMode6Offset(SDNode *Op, SDValue N, SDValue &Offset);
159 bool SelectAddrModePC(SDValue N, SDValue &Offset, SDValue &Label);
161 // Thumb Addressing Modes:
162 bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset);
163 bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base,
165 bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
167 bool SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
169 bool SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
171 bool SelectThumbAddrModeSP(SDValue N, SDValue &Base, SDValue &OffImm);
173 // Thumb 2 Addressing Modes:
174 bool SelectT2ShifterOperandReg(SDValue N,
175 SDValue &BaseReg, SDValue &Opc);
176 bool SelectT2AddrModeImm12(SDValue N, SDValue &Base, SDValue &OffImm);
177 bool SelectT2AddrModeImm8(SDValue N, SDValue &Base,
179 bool SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
181 bool SelectT2AddrModeSoReg(SDValue N, SDValue &Base,
182 SDValue &OffReg, SDValue &ShImm);
183 bool SelectT2AddrModeExclusive(SDValue N, SDValue &Base, SDValue &OffImm);
185 inline bool is_so_imm(unsigned Imm) const {
186 return ARM_AM::getSOImmVal(Imm) != -1;
189 inline bool is_so_imm_not(unsigned Imm) const {
190 return ARM_AM::getSOImmVal(~Imm) != -1;
193 inline bool is_t2_so_imm(unsigned Imm) const {
194 return ARM_AM::getT2SOImmVal(Imm) != -1;
197 inline bool is_t2_so_imm_not(unsigned Imm) const {
198 return ARM_AM::getT2SOImmVal(~Imm) != -1;
201 // Include the pieces autogenerated from the target description.
202 #include "ARMGenDAGISel.inc"
205 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for
207 SDNode *SelectARMIndexedLoad(SDNode *N);
208 SDNode *SelectT2IndexedLoad(SDNode *N);
210 /// SelectVLD - Select NEON load intrinsics. NumVecs should be
211 /// 1, 2, 3 or 4. The opcode arrays specify the instructions used for
212 /// loads of D registers and even subregs and odd subregs of Q registers.
213 /// For NumVecs <= 2, QOpcodes1 is not used.
214 SDNode *SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
215 const uint16_t *DOpcodes,
216 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
218 /// SelectVST - Select NEON store intrinsics. NumVecs should
219 /// be 1, 2, 3 or 4. The opcode arrays specify the instructions used for
220 /// stores of D registers and even subregs and odd subregs of Q registers.
221 /// For NumVecs <= 2, QOpcodes1 is not used.
222 SDNode *SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
223 const uint16_t *DOpcodes,
224 const uint16_t *QOpcodes0, const uint16_t *QOpcodes1);
226 /// SelectVLDSTLane - Select NEON load/store lane intrinsics. NumVecs should
227 /// be 2, 3 or 4. The opcode arrays specify the instructions used for
228 /// load/store of D registers and Q registers.
229 SDNode *SelectVLDSTLane(SDNode *N, bool IsLoad,
230 bool isUpdating, unsigned NumVecs,
231 const uint16_t *DOpcodes, const uint16_t *QOpcodes);
233 /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs
234 /// should be 2, 3 or 4. The opcode array specifies the instructions used
235 /// for loading D registers. (Q registers are not supported.)
236 SDNode *SelectVLDDup(SDNode *N, bool isUpdating, unsigned NumVecs,
237 const uint16_t *Opcodes);
239 /// SelectVTBL - Select NEON VTBL and VTBX intrinsics. NumVecs should be 2,
240 /// 3 or 4. These are custom-selected so that a REG_SEQUENCE can be
241 /// generated to force the table registers to be consecutive.
242 SDNode *SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs, unsigned Opc);
244 /// SelectV6T2BitfieldExtractOp - Select SBFX/UBFX instructions for ARM.
245 SDNode *SelectV6T2BitfieldExtractOp(SDNode *N, bool isSigned);
247 // Select special operations if node forms integer ABS pattern
248 SDNode *SelectABSOp(SDNode *N);
250 SDNode *SelectReadRegister(SDNode *N);
251 SDNode *SelectWriteRegister(SDNode *N);
253 SDNode *SelectInlineAsm(SDNode *N);
255 SDNode *SelectConcatVector(SDNode *N);
257 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
258 /// inline asm expressions.
259 bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
260 std::vector<SDValue> &OutOps) override;
262 // Form pairs of consecutive R, S, D, or Q registers.
263 SDNode *createGPRPairNode(EVT VT, SDValue V0, SDValue V1);
264 SDNode *createSRegPairNode(EVT VT, SDValue V0, SDValue V1);
265 SDNode *createDRegPairNode(EVT VT, SDValue V0, SDValue V1);
266 SDNode *createQRegPairNode(EVT VT, SDValue V0, SDValue V1);
268 // Form sequences of 4 consecutive S, D, or Q registers.
269 SDNode *createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
270 SDNode *createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
271 SDNode *createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1, SDValue V2, SDValue V3);
273 // Get the alignment operand for a NEON VLD or VST instruction.
274 SDValue GetVLDSTAlign(SDValue Align, SDLoc dl, unsigned NumVecs,
279 /// isInt32Immediate - This method tests to see if the node is a 32-bit constant
280 /// operand. If so Imm will receive the 32-bit value.
281 static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
282 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
283 Imm = cast<ConstantSDNode>(N)->getZExtValue();
289 // isInt32Immediate - This method tests to see if a constant operand.
290 // If so Imm will receive the 32 bit value.
291 static bool isInt32Immediate(SDValue N, unsigned &Imm) {
292 return isInt32Immediate(N.getNode(), Imm);
295 // isOpcWithIntImmediate - This method tests to see if the node is a specific
296 // opcode and that it has a immediate integer right operand.
297 // If so Imm will receive the 32 bit value.
298 static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) {
299 return N->getOpcode() == Opc &&
300 isInt32Immediate(N->getOperand(1).getNode(), Imm);
303 /// \brief Check whether a particular node is a constant value representable as
304 /// (N * Scale) where (N in [\p RangeMin, \p RangeMax).
306 /// \param ScaledConstant [out] - On success, the pre-scaled constant value.
307 static bool isScaledConstantInRange(SDValue Node, int Scale,
308 int RangeMin, int RangeMax,
309 int &ScaledConstant) {
310 assert(Scale > 0 && "Invalid scale!");
312 // Check that this is a constant.
313 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Node);
317 ScaledConstant = (int) C->getZExtValue();
318 if ((ScaledConstant % Scale) != 0)
321 ScaledConstant /= Scale;
322 return ScaledConstant >= RangeMin && ScaledConstant < RangeMax;
325 void ARMDAGToDAGISel::PreprocessISelDAG() {
326 if (!Subtarget->hasV6T2Ops())
329 bool isThumb2 = Subtarget->isThumb();
330 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
331 E = CurDAG->allnodes_end(); I != E; ) {
332 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
334 if (N->getOpcode() != ISD::ADD)
337 // Look for (add X1, (and (srl X2, c1), c2)) where c2 is constant with
338 // leading zeros, followed by consecutive set bits, followed by 1 or 2
339 // trailing zeros, e.g. 1020.
340 // Transform the expression to
341 // (add X1, (shl (and (srl X2, c1), (c2>>tz)), tz)) where tz is the number
342 // of trailing zeros of c2. The left shift would be folded as an shifter
343 // operand of 'add' and the 'and' and 'srl' would become a bits extraction
346 SDValue N0 = N->getOperand(0);
347 SDValue N1 = N->getOperand(1);
348 unsigned And_imm = 0;
349 if (!isOpcWithIntImmediate(N1.getNode(), ISD::AND, And_imm)) {
350 if (isOpcWithIntImmediate(N0.getNode(), ISD::AND, And_imm))
356 // Check if the AND mask is an immediate of the form: 000.....1111111100
357 unsigned TZ = countTrailingZeros(And_imm);
358 if (TZ != 1 && TZ != 2)
359 // Be conservative here. Shifter operands aren't always free. e.g. On
360 // Swift, left shifter operand of 1 / 2 for free but others are not.
362 // ubfx r3, r1, #16, #8
363 // ldr.w r3, [r0, r3, lsl #2]
366 // and.w r2, r9, r1, lsr #14
370 if (And_imm & (And_imm + 1))
373 // Look for (and (srl X, c1), c2).
374 SDValue Srl = N1.getOperand(0);
375 unsigned Srl_imm = 0;
376 if (!isOpcWithIntImmediate(Srl.getNode(), ISD::SRL, Srl_imm) ||
380 // Make sure first operand is not a shifter operand which would prevent
381 // folding of the left shift.
386 if (SelectT2ShifterOperandReg(N0, CPTmp0, CPTmp1))
389 if (SelectImmShifterOperand(N0, CPTmp0, CPTmp1) ||
390 SelectRegShifterOperand(N0, CPTmp0, CPTmp1, CPTmp2))
394 // Now make the transformation.
395 Srl = CurDAG->getNode(ISD::SRL, SDLoc(Srl), MVT::i32,
397 CurDAG->getConstant(Srl_imm + TZ, SDLoc(Srl),
399 N1 = CurDAG->getNode(ISD::AND, SDLoc(N1), MVT::i32,
401 CurDAG->getConstant(And_imm, SDLoc(Srl), MVT::i32));
402 N1 = CurDAG->getNode(ISD::SHL, SDLoc(N1), MVT::i32,
403 N1, CurDAG->getConstant(TZ, SDLoc(Srl), MVT::i32));
404 CurDAG->UpdateNodeOperands(N, N0, N1);
408 /// hasNoVMLxHazardUse - Return true if it's desirable to select a FP MLA / MLS
409 /// node. VFP / NEON fp VMLA / VMLS instructions have special RAW hazards (at
410 /// least on current ARM implementations) which should be avoidded.
411 bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const {
412 if (OptLevel == CodeGenOpt::None)
415 if (!CheckVMLxHazard)
418 if (!Subtarget->isCortexA7() && !Subtarget->isCortexA8() &&
419 !Subtarget->isCortexA9() && !Subtarget->isSwift())
425 SDNode *Use = *N->use_begin();
426 if (Use->getOpcode() == ISD::CopyToReg)
428 if (Use->isMachineOpcode()) {
429 const ARMBaseInstrInfo *TII = static_cast<const ARMBaseInstrInfo *>(
430 CurDAG->getSubtarget().getInstrInfo());
432 const MCInstrDesc &MCID = TII->get(Use->getMachineOpcode());
435 unsigned Opcode = MCID.getOpcode();
436 if (Opcode == ARM::VMOVRS || Opcode == ARM::VMOVRRD)
438 // vmlx feeding into another vmlx. We actually want to unfold
439 // the use later in the MLxExpansion pass. e.g.
441 // vmla (stall 8 cycles)
446 // This adds up to about 18 - 19 cycles.
449 // vmul (stall 4 cycles)
450 // vadd adds up to about 14 cycles.
451 return TII->isFpMLxInstruction(Opcode);
457 bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift,
458 ARM_AM::ShiftOpc ShOpcVal,
460 if (!Subtarget->isLikeA9() && !Subtarget->isSwift())
462 if (Shift.hasOneUse())
465 return ShOpcVal == ARM_AM::lsl &&
466 (ShAmt == 2 || (Subtarget->isSwift() && ShAmt == 1));
469 bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N,
472 bool CheckProfitability) {
473 if (DisableShifterOp)
476 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
478 // Don't match base register only case. That is matched to a separate
479 // lower complexity pattern with explicit register operand.
480 if (ShOpcVal == ARM_AM::no_shift) return false;
482 BaseReg = N.getOperand(0);
483 unsigned ShImmVal = 0;
484 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
485 if (!RHS) return false;
486 ShImmVal = RHS->getZExtValue() & 31;
487 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
492 bool ARMDAGToDAGISel::SelectRegShifterOperand(SDValue N,
496 bool CheckProfitability) {
497 if (DisableShifterOp)
500 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
502 // Don't match base register only case. That is matched to a separate
503 // lower complexity pattern with explicit register operand.
504 if (ShOpcVal == ARM_AM::no_shift) return false;
506 BaseReg = N.getOperand(0);
507 unsigned ShImmVal = 0;
508 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
509 if (RHS) return false;
511 ShReg = N.getOperand(1);
512 if (CheckProfitability && !isShifterOpProfitable(N, ShOpcVal, ShImmVal))
514 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
520 bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N,
523 // Match simple R + imm12 operands.
526 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
527 !CurDAG->isBaseWithConstantOffset(N)) {
528 if (N.getOpcode() == ISD::FrameIndex) {
529 // Match frame index.
530 int FI = cast<FrameIndexSDNode>(N)->getIndex();
531 Base = CurDAG->getTargetFrameIndex(
532 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
533 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
537 if (N.getOpcode() == ARMISD::Wrapper &&
538 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
539 Base = N.getOperand(0);
542 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
546 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
547 int RHSC = (int)RHS->getSExtValue();
548 if (N.getOpcode() == ISD::SUB)
551 if (RHSC > -0x1000 && RHSC < 0x1000) { // 12 bits
552 Base = N.getOperand(0);
553 if (Base.getOpcode() == ISD::FrameIndex) {
554 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
555 Base = CurDAG->getTargetFrameIndex(
556 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
558 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
565 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
571 bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset,
573 if (N.getOpcode() == ISD::MUL &&
574 ((!Subtarget->isLikeA9() && !Subtarget->isSwift()) || N.hasOneUse())) {
575 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
576 // X * [3,5,9] -> X + X * [2,4,8] etc.
577 int RHSC = (int)RHS->getZExtValue();
580 ARM_AM::AddrOpc AddSub = ARM_AM::add;
582 AddSub = ARM_AM::sub;
585 if (isPowerOf2_32(RHSC)) {
586 unsigned ShAmt = Log2_32(RHSC);
587 Base = Offset = N.getOperand(0);
588 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
597 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
598 // ISD::OR that is equivalent to an ISD::ADD.
599 !CurDAG->isBaseWithConstantOffset(N))
602 // Leave simple R +/- imm12 operands for LDRi12
603 if (N.getOpcode() == ISD::ADD || N.getOpcode() == ISD::OR) {
605 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
606 -0x1000+1, 0x1000, RHSC)) // 12 bits.
610 // Otherwise this is R +/- [possibly shifted] R.
611 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::SUB ? ARM_AM::sub:ARM_AM::add;
612 ARM_AM::ShiftOpc ShOpcVal =
613 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
616 Base = N.getOperand(0);
617 Offset = N.getOperand(1);
619 if (ShOpcVal != ARM_AM::no_shift) {
620 // Check to see if the RHS of the shift is a constant, if not, we can't fold
622 if (ConstantSDNode *Sh =
623 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
624 ShAmt = Sh->getZExtValue();
625 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
626 Offset = N.getOperand(1).getOperand(0);
629 ShOpcVal = ARM_AM::no_shift;
632 ShOpcVal = ARM_AM::no_shift;
636 // Try matching (R shl C) + (R).
637 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
638 !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
639 N.getOperand(0).hasOneUse())) {
640 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
641 if (ShOpcVal != ARM_AM::no_shift) {
642 // Check to see if the RHS of the shift is a constant, if not, we can't
644 if (ConstantSDNode *Sh =
645 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
646 ShAmt = Sh->getZExtValue();
647 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
648 Offset = N.getOperand(0).getOperand(0);
649 Base = N.getOperand(1);
652 ShOpcVal = ARM_AM::no_shift;
655 ShOpcVal = ARM_AM::no_shift;
660 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
668 AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N,
672 if (N.getOpcode() == ISD::MUL &&
673 (!(Subtarget->isLikeA9() || Subtarget->isSwift()) || N.hasOneUse())) {
674 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
675 // X * [3,5,9] -> X + X * [2,4,8] etc.
676 int RHSC = (int)RHS->getZExtValue();
679 ARM_AM::AddrOpc AddSub = ARM_AM::add;
681 AddSub = ARM_AM::sub;
684 if (isPowerOf2_32(RHSC)) {
685 unsigned ShAmt = Log2_32(RHSC);
686 Base = Offset = N.getOperand(0);
687 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
696 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
697 // ISD::OR that is equivalent to an ADD.
698 !CurDAG->isBaseWithConstantOffset(N)) {
700 if (N.getOpcode() == ISD::FrameIndex) {
701 int FI = cast<FrameIndexSDNode>(N)->getIndex();
702 Base = CurDAG->getTargetFrameIndex(
703 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
704 } else if (N.getOpcode() == ARMISD::Wrapper &&
705 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
706 Base = N.getOperand(0);
708 Offset = CurDAG->getRegister(0, MVT::i32);
709 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
715 // Match simple R +/- imm12 operands.
716 if (N.getOpcode() != ISD::SUB) {
718 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
719 -0x1000+1, 0x1000, RHSC)) { // 12 bits.
720 Base = N.getOperand(0);
721 if (Base.getOpcode() == ISD::FrameIndex) {
722 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
723 Base = CurDAG->getTargetFrameIndex(
724 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
726 Offset = CurDAG->getRegister(0, MVT::i32);
728 ARM_AM::AddrOpc AddSub = ARM_AM::add;
730 AddSub = ARM_AM::sub;
733 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
740 if ((Subtarget->isLikeA9() || Subtarget->isSwift()) && !N.hasOneUse()) {
741 // Compute R +/- (R << N) and reuse it.
743 Offset = CurDAG->getRegister(0, MVT::i32);
744 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
750 // Otherwise this is R +/- [possibly shifted] R.
751 ARM_AM::AddrOpc AddSub = N.getOpcode() != ISD::SUB ? ARM_AM::add:ARM_AM::sub;
752 ARM_AM::ShiftOpc ShOpcVal =
753 ARM_AM::getShiftOpcForNode(N.getOperand(1).getOpcode());
756 Base = N.getOperand(0);
757 Offset = N.getOperand(1);
759 if (ShOpcVal != ARM_AM::no_shift) {
760 // Check to see if the RHS of the shift is a constant, if not, we can't fold
762 if (ConstantSDNode *Sh =
763 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
764 ShAmt = Sh->getZExtValue();
765 if (isShifterOpProfitable(Offset, ShOpcVal, ShAmt))
766 Offset = N.getOperand(1).getOperand(0);
769 ShOpcVal = ARM_AM::no_shift;
772 ShOpcVal = ARM_AM::no_shift;
776 // Try matching (R shl C) + (R).
777 if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift &&
778 !(Subtarget->isLikeA9() || Subtarget->isSwift() ||
779 N.getOperand(0).hasOneUse())) {
780 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode());
781 if (ShOpcVal != ARM_AM::no_shift) {
782 // Check to see if the RHS of the shift is a constant, if not, we can't
784 if (ConstantSDNode *Sh =
785 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
786 ShAmt = Sh->getZExtValue();
787 if (isShifterOpProfitable(N.getOperand(0), ShOpcVal, ShAmt)) {
788 Offset = N.getOperand(0).getOperand(0);
789 Base = N.getOperand(1);
792 ShOpcVal = ARM_AM::no_shift;
795 ShOpcVal = ARM_AM::no_shift;
800 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
805 bool ARMDAGToDAGISel::SelectAddrMode2OffsetReg(SDNode *Op, SDValue N,
806 SDValue &Offset, SDValue &Opc) {
807 unsigned Opcode = Op->getOpcode();
808 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
809 ? cast<LoadSDNode>(Op)->getAddressingMode()
810 : cast<StoreSDNode>(Op)->getAddressingMode();
811 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
812 ? ARM_AM::add : ARM_AM::sub;
814 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val))
818 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
820 if (ShOpcVal != ARM_AM::no_shift) {
821 // Check to see if the RHS of the shift is a constant, if not, we can't fold
823 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
824 ShAmt = Sh->getZExtValue();
825 if (isShifterOpProfitable(N, ShOpcVal, ShAmt))
826 Offset = N.getOperand(0);
829 ShOpcVal = ARM_AM::no_shift;
832 ShOpcVal = ARM_AM::no_shift;
836 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
841 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImmPre(SDNode *Op, SDValue N,
842 SDValue &Offset, SDValue &Opc) {
843 unsigned Opcode = Op->getOpcode();
844 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
845 ? cast<LoadSDNode>(Op)->getAddressingMode()
846 : cast<StoreSDNode>(Op)->getAddressingMode();
847 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
848 ? ARM_AM::add : ARM_AM::sub;
850 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
851 if (AddSub == ARM_AM::sub) Val *= -1;
852 Offset = CurDAG->getRegister(0, MVT::i32);
853 Opc = CurDAG->getTargetConstant(Val, SDLoc(Op), MVT::i32);
861 bool ARMDAGToDAGISel::SelectAddrMode2OffsetImm(SDNode *Op, SDValue N,
862 SDValue &Offset, SDValue &Opc) {
863 unsigned Opcode = Op->getOpcode();
864 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
865 ? cast<LoadSDNode>(Op)->getAddressingMode()
866 : cast<StoreSDNode>(Op)->getAddressingMode();
867 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
868 ? ARM_AM::add : ARM_AM::sub;
870 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x1000, Val)) { // 12 bits.
871 Offset = CurDAG->getRegister(0, MVT::i32);
872 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
874 SDLoc(Op), MVT::i32);
881 bool ARMDAGToDAGISel::SelectAddrOffsetNone(SDValue N, SDValue &Base) {
886 bool ARMDAGToDAGISel::SelectAddrMode3(SDValue N,
887 SDValue &Base, SDValue &Offset,
889 if (N.getOpcode() == ISD::SUB) {
890 // X - C is canonicalize to X + -C, no need to handle it here.
891 Base = N.getOperand(0);
892 Offset = N.getOperand(1);
893 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0), SDLoc(N),
898 if (!CurDAG->isBaseWithConstantOffset(N)) {
900 if (N.getOpcode() == ISD::FrameIndex) {
901 int FI = cast<FrameIndexSDNode>(N)->getIndex();
902 Base = CurDAG->getTargetFrameIndex(
903 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
905 Offset = CurDAG->getRegister(0, MVT::i32);
906 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), SDLoc(N),
911 // If the RHS is +/- imm8, fold into addr mode.
913 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/1,
914 -256 + 1, 256, RHSC)) { // 8 bits.
915 Base = N.getOperand(0);
916 if (Base.getOpcode() == ISD::FrameIndex) {
917 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
918 Base = CurDAG->getTargetFrameIndex(
919 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
921 Offset = CurDAG->getRegister(0, MVT::i32);
923 ARM_AM::AddrOpc AddSub = ARM_AM::add;
925 AddSub = ARM_AM::sub;
928 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC), SDLoc(N),
933 Base = N.getOperand(0);
934 Offset = N.getOperand(1);
935 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), SDLoc(N),
940 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDNode *Op, SDValue N,
941 SDValue &Offset, SDValue &Opc) {
942 unsigned Opcode = Op->getOpcode();
943 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
944 ? cast<LoadSDNode>(Op)->getAddressingMode()
945 : cast<StoreSDNode>(Op)->getAddressingMode();
946 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
947 ? ARM_AM::add : ARM_AM::sub;
949 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 256, Val)) { // 12 bits.
950 Offset = CurDAG->getRegister(0, MVT::i32);
951 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), SDLoc(Op),
957 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), SDLoc(Op),
962 bool ARMDAGToDAGISel::SelectAddrMode5(SDValue N,
963 SDValue &Base, SDValue &Offset) {
964 if (!CurDAG->isBaseWithConstantOffset(N)) {
966 if (N.getOpcode() == ISD::FrameIndex) {
967 int FI = cast<FrameIndexSDNode>(N)->getIndex();
968 Base = CurDAG->getTargetFrameIndex(
969 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
970 } else if (N.getOpcode() == ARMISD::Wrapper &&
971 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
972 Base = N.getOperand(0);
974 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
979 // If the RHS is +/- imm8, fold into addr mode.
981 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4,
982 -256 + 1, 256, RHSC)) {
983 Base = N.getOperand(0);
984 if (Base.getOpcode() == ISD::FrameIndex) {
985 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
986 Base = CurDAG->getTargetFrameIndex(
987 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
990 ARM_AM::AddrOpc AddSub = ARM_AM::add;
992 AddSub = ARM_AM::sub;
995 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
1001 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
1002 SDLoc(N), MVT::i32);
1006 bool ARMDAGToDAGISel::SelectAddrMode6(SDNode *Parent, SDValue N, SDValue &Addr,
1010 unsigned Alignment = 0;
1012 MemSDNode *MemN = cast<MemSDNode>(Parent);
1014 if (isa<LSBaseSDNode>(MemN) ||
1015 ((MemN->getOpcode() == ARMISD::VST1_UPD ||
1016 MemN->getOpcode() == ARMISD::VLD1_UPD) &&
1017 MemN->getConstantOperandVal(MemN->getNumOperands() - 1) == 1)) {
1018 // This case occurs only for VLD1-lane/dup and VST1-lane instructions.
1019 // The maximum alignment is equal to the memory size being referenced.
1020 unsigned MMOAlign = MemN->getAlignment();
1021 unsigned MemSize = MemN->getMemoryVT().getSizeInBits() / 8;
1022 if (MMOAlign >= MemSize && MemSize > 1)
1023 Alignment = MemSize;
1025 // All other uses of addrmode6 are for intrinsics. For now just record
1026 // the raw alignment value; it will be refined later based on the legal
1027 // alignment operands for the intrinsic.
1028 Alignment = MemN->getAlignment();
1031 Align = CurDAG->getTargetConstant(Alignment, SDLoc(N), MVT::i32);
1035 bool ARMDAGToDAGISel::SelectAddrMode6Offset(SDNode *Op, SDValue N,
1037 LSBaseSDNode *LdSt = cast<LSBaseSDNode>(Op);
1038 ISD::MemIndexedMode AM = LdSt->getAddressingMode();
1039 if (AM != ISD::POST_INC)
1042 if (ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N)) {
1043 if (NC->getZExtValue() * 8 == LdSt->getMemoryVT().getSizeInBits())
1044 Offset = CurDAG->getRegister(0, MVT::i32);
1049 bool ARMDAGToDAGISel::SelectAddrModePC(SDValue N,
1050 SDValue &Offset, SDValue &Label) {
1051 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
1052 Offset = N.getOperand(0);
1053 SDValue N1 = N.getOperand(1);
1054 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
1055 SDLoc(N), MVT::i32);
1063 //===----------------------------------------------------------------------===//
1064 // Thumb Addressing Modes
1065 //===----------------------------------------------------------------------===//
1067 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N,
1068 SDValue &Base, SDValue &Offset){
1069 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) {
1070 ConstantSDNode *NC = dyn_cast<ConstantSDNode>(N);
1071 if (!NC || !NC->isNullValue())
1078 Base = N.getOperand(0);
1079 Offset = N.getOperand(1);
1084 ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale,
1085 SDValue &Base, SDValue &OffImm) {
1086 if (!CurDAG->isBaseWithConstantOffset(N)) {
1087 if (N.getOpcode() == ISD::ADD) {
1088 return false; // We want to select register offset instead
1089 } else if (N.getOpcode() == ARMISD::Wrapper &&
1090 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
1091 Base = N.getOperand(0);
1096 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1100 // If the RHS is + imm5 * scale, fold into addr mode.
1102 if (isScaledConstantInRange(N.getOperand(1), Scale, 0, 32, RHSC)) {
1103 Base = N.getOperand(0);
1104 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1108 // Offset is too large, so use register offset instead.
1113 ARMDAGToDAGISel::SelectThumbAddrModeImm5S4(SDValue N, SDValue &Base,
1115 return SelectThumbAddrModeImm5S(N, 4, Base, OffImm);
1119 ARMDAGToDAGISel::SelectThumbAddrModeImm5S2(SDValue N, SDValue &Base,
1121 return SelectThumbAddrModeImm5S(N, 2, Base, OffImm);
1125 ARMDAGToDAGISel::SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base,
1127 return SelectThumbAddrModeImm5S(N, 1, Base, OffImm);
1130 bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue N,
1131 SDValue &Base, SDValue &OffImm) {
1132 if (N.getOpcode() == ISD::FrameIndex) {
1133 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1134 // Only multiples of 4 are allowed for the offset, so the frame object
1135 // alignment must be at least 4.
1136 MachineFrameInfo *MFI = MF->getFrameInfo();
1137 if (MFI->getObjectAlignment(FI) < 4)
1138 MFI->setObjectAlignment(FI, 4);
1139 Base = CurDAG->getTargetFrameIndex(
1140 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1141 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1145 if (!CurDAG->isBaseWithConstantOffset(N))
1148 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
1149 if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
1150 (LHSR && LHSR->getReg() == ARM::SP)) {
1151 // If the RHS is + imm8 * scale, fold into addr mode.
1153 if (isScaledConstantInRange(N.getOperand(1), /*Scale=*/4, 0, 256, RHSC)) {
1154 Base = N.getOperand(0);
1155 if (Base.getOpcode() == ISD::FrameIndex) {
1156 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1157 // For LHS+RHS to result in an offset that's a multiple of 4 the object
1158 // indexed by the LHS must be 4-byte aligned.
1159 MachineFrameInfo *MFI = MF->getFrameInfo();
1160 if (MFI->getObjectAlignment(FI) < 4)
1161 MFI->setObjectAlignment(FI, 4);
1162 Base = CurDAG->getTargetFrameIndex(
1163 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1165 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1174 //===----------------------------------------------------------------------===//
1175 // Thumb 2 Addressing Modes
1176 //===----------------------------------------------------------------------===//
1179 bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue N, SDValue &BaseReg,
1181 if (DisableShifterOp)
1184 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOpcode());
1186 // Don't match base register only case. That is matched to a separate
1187 // lower complexity pattern with explicit register operand.
1188 if (ShOpcVal == ARM_AM::no_shift) return false;
1190 BaseReg = N.getOperand(0);
1191 unsigned ShImmVal = 0;
1192 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1193 ShImmVal = RHS->getZExtValue() & 31;
1194 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal), SDLoc(N));
1201 bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue N,
1202 SDValue &Base, SDValue &OffImm) {
1203 // Match simple R + imm12 operands.
1206 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1207 !CurDAG->isBaseWithConstantOffset(N)) {
1208 if (N.getOpcode() == ISD::FrameIndex) {
1209 // Match frame index.
1210 int FI = cast<FrameIndexSDNode>(N)->getIndex();
1211 Base = CurDAG->getTargetFrameIndex(
1212 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1213 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1217 if (N.getOpcode() == ARMISD::Wrapper &&
1218 N.getOperand(0).getOpcode() != ISD::TargetGlobalAddress) {
1219 Base = N.getOperand(0);
1220 if (Base.getOpcode() == ISD::TargetConstantPool)
1221 return false; // We want to select t2LDRpci instead.
1224 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1228 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1229 if (SelectT2AddrModeImm8(N, Base, OffImm))
1230 // Let t2LDRi8 handle (R - imm8).
1233 int RHSC = (int)RHS->getZExtValue();
1234 if (N.getOpcode() == ISD::SUB)
1237 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits (unsigned)
1238 Base = N.getOperand(0);
1239 if (Base.getOpcode() == ISD::FrameIndex) {
1240 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1241 Base = CurDAG->getTargetFrameIndex(
1242 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1244 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1251 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1255 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue N,
1256 SDValue &Base, SDValue &OffImm) {
1257 // Match simple R - imm8 operands.
1258 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB &&
1259 !CurDAG->isBaseWithConstantOffset(N))
1262 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1263 int RHSC = (int)RHS->getSExtValue();
1264 if (N.getOpcode() == ISD::SUB)
1267 if ((RHSC >= -255) && (RHSC < 0)) { // 8 bits (always negative)
1268 Base = N.getOperand(0);
1269 if (Base.getOpcode() == ISD::FrameIndex) {
1270 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1271 Base = CurDAG->getTargetFrameIndex(
1272 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1274 OffImm = CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32);
1282 bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDNode *Op, SDValue N,
1284 unsigned Opcode = Op->getOpcode();
1285 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
1286 ? cast<LoadSDNode>(Op)->getAddressingMode()
1287 : cast<StoreSDNode>(Op)->getAddressingMode();
1289 if (isScaledConstantInRange(N, /*Scale=*/1, 0, 0x100, RHSC)) { // 8 bits.
1290 OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
1291 ? CurDAG->getTargetConstant(RHSC, SDLoc(N), MVT::i32)
1292 : CurDAG->getTargetConstant(-RHSC, SDLoc(N), MVT::i32);
1299 bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
1301 SDValue &OffReg, SDValue &ShImm) {
1302 // (R - imm8) should be handled by t2LDRi8. The rest are handled by t2LDRi12.
1303 if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N))
1306 // Leave (R + imm12) for t2LDRi12, (R - imm8) for t2LDRi8.
1307 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
1308 int RHSC = (int)RHS->getZExtValue();
1309 if (RHSC >= 0 && RHSC < 0x1000) // 12 bits (unsigned)
1311 else if (RHSC < 0 && RHSC >= -255) // 8 bits
1315 // Look for (R + R) or (R + (R << [1,2,3])).
1317 Base = N.getOperand(0);
1318 OffReg = N.getOperand(1);
1320 // Swap if it is ((R << c) + R).
1321 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg.getOpcode());
1322 if (ShOpcVal != ARM_AM::lsl) {
1323 ShOpcVal = ARM_AM::getShiftOpcForNode(Base.getOpcode());
1324 if (ShOpcVal == ARM_AM::lsl)
1325 std::swap(Base, OffReg);
1328 if (ShOpcVal == ARM_AM::lsl) {
1329 // Check to see if the RHS of the shift is a constant, if not, we can't fold
1331 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
1332 ShAmt = Sh->getZExtValue();
1333 if (ShAmt < 4 && isShifterOpProfitable(OffReg, ShOpcVal, ShAmt))
1334 OffReg = OffReg.getOperand(0);
1341 ShImm = CurDAG->getTargetConstant(ShAmt, SDLoc(N), MVT::i32);
1346 bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
1348 // This *must* succeed since it's used for the irreplaceable ldrex and strex
1351 OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32);
1353 if (N.getOpcode() != ISD::ADD || !CurDAG->isBaseWithConstantOffset(N))
1356 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1));
1360 uint32_t RHSC = (int)RHS->getZExtValue();
1361 if (RHSC > 1020 || RHSC % 4 != 0)
1364 Base = N.getOperand(0);
1365 if (Base.getOpcode() == ISD::FrameIndex) {
1366 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
1367 Base = CurDAG->getTargetFrameIndex(
1368 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
1371 OffImm = CurDAG->getTargetConstant(RHSC/4, SDLoc(N), MVT::i32);
1375 //===--------------------------------------------------------------------===//
1377 /// getAL - Returns a ARMCC::AL immediate node.
1378 static inline SDValue getAL(SelectionDAG *CurDAG, SDLoc dl) {
1379 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, dl, MVT::i32);
1382 SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDNode *N) {
1383 LoadSDNode *LD = cast<LoadSDNode>(N);
1384 ISD::MemIndexedMode AM = LD->getAddressingMode();
1385 if (AM == ISD::UNINDEXED)
1388 EVT LoadedVT = LD->getMemoryVT();
1389 SDValue Offset, AMOpc;
1390 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1391 unsigned Opcode = 0;
1393 if (LoadedVT == MVT::i32 && isPre &&
1394 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1395 Opcode = ARM::LDR_PRE_IMM;
1397 } else if (LoadedVT == MVT::i32 && !isPre &&
1398 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1399 Opcode = ARM::LDR_POST_IMM;
1401 } else if (LoadedVT == MVT::i32 &&
1402 SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1403 Opcode = isPre ? ARM::LDR_PRE_REG : ARM::LDR_POST_REG;
1406 } else if (LoadedVT == MVT::i16 &&
1407 SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1409 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
1410 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
1411 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
1412 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
1413 if (LD->getExtensionType() == ISD::SEXTLOAD) {
1414 if (SelectAddrMode3Offset(N, LD->getOffset(), Offset, AMOpc)) {
1416 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
1420 SelectAddrMode2OffsetImmPre(N, LD->getOffset(), Offset, AMOpc)) {
1422 Opcode = ARM::LDRB_PRE_IMM;
1423 } else if (!isPre &&
1424 SelectAddrMode2OffsetImm(N, LD->getOffset(), Offset, AMOpc)) {
1426 Opcode = ARM::LDRB_POST_IMM;
1427 } else if (SelectAddrMode2OffsetReg(N, LD->getOffset(), Offset, AMOpc)) {
1429 Opcode = isPre ? ARM::LDRB_PRE_REG : ARM::LDRB_POST_REG;
1435 if (Opcode == ARM::LDR_PRE_IMM || Opcode == ARM::LDRB_PRE_IMM) {
1436 SDValue Chain = LD->getChain();
1437 SDValue Base = LD->getBasePtr();
1438 SDValue Ops[]= { Base, AMOpc, getAL(CurDAG, SDLoc(N)),
1439 CurDAG->getRegister(0, MVT::i32), Chain };
1440 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
1441 MVT::i32, MVT::Other, Ops);
1443 SDValue Chain = LD->getChain();
1444 SDValue Base = LD->getBasePtr();
1445 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG, SDLoc(N)),
1446 CurDAG->getRegister(0, MVT::i32), Chain };
1447 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32,
1448 MVT::i32, MVT::Other, Ops);
1455 SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDNode *N) {
1456 LoadSDNode *LD = cast<LoadSDNode>(N);
1457 ISD::MemIndexedMode AM = LD->getAddressingMode();
1458 if (AM == ISD::UNINDEXED)
1461 EVT LoadedVT = LD->getMemoryVT();
1462 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
1464 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
1465 unsigned Opcode = 0;
1467 if (SelectT2AddrModeImm8Offset(N, LD->getOffset(), Offset)) {
1468 switch (LoadedVT.getSimpleVT().SimpleTy) {
1470 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
1474 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
1476 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
1481 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
1483 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
1492 SDValue Chain = LD->getChain();
1493 SDValue Base = LD->getBasePtr();
1494 SDValue Ops[]= { Base, Offset, getAL(CurDAG, SDLoc(N)),
1495 CurDAG->getRegister(0, MVT::i32), Chain };
1496 return CurDAG->getMachineNode(Opcode, SDLoc(N), MVT::i32, MVT::i32,
1503 /// \brief Form a GPRPair pseudo register from a pair of GPR regs.
1504 SDNode *ARMDAGToDAGISel::createGPRPairNode(EVT VT, SDValue V0, SDValue V1) {
1505 SDLoc dl(V0.getNode());
1507 CurDAG->getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32);
1508 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::gsub_0, dl, MVT::i32);
1509 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::gsub_1, dl, MVT::i32);
1510 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1511 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1514 /// \brief Form a D register from a pair of S registers.
1515 SDNode *ARMDAGToDAGISel::createSRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1516 SDLoc dl(V0.getNode());
1518 CurDAG->getTargetConstant(ARM::DPR_VFP2RegClassID, dl, MVT::i32);
1519 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, dl, MVT::i32);
1520 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, dl, MVT::i32);
1521 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1522 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1525 /// \brief Form a quad register from a pair of D registers.
1526 SDNode *ARMDAGToDAGISel::createDRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1527 SDLoc dl(V0.getNode());
1528 SDValue RegClass = CurDAG->getTargetConstant(ARM::QPRRegClassID, dl,
1530 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, dl, MVT::i32);
1531 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, dl, MVT::i32);
1532 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1533 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1536 /// \brief Form 4 consecutive D registers from a pair of Q registers.
1537 SDNode *ARMDAGToDAGISel::createQRegPairNode(EVT VT, SDValue V0, SDValue V1) {
1538 SDLoc dl(V0.getNode());
1539 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, dl,
1541 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, dl, MVT::i32);
1542 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, dl, MVT::i32);
1543 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 };
1544 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1547 /// \brief Form 4 consecutive S registers.
1548 SDNode *ARMDAGToDAGISel::createQuadSRegsNode(EVT VT, SDValue V0, SDValue V1,
1549 SDValue V2, SDValue V3) {
1550 SDLoc dl(V0.getNode());
1552 CurDAG->getTargetConstant(ARM::QPR_VFP2RegClassID, dl, MVT::i32);
1553 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::ssub_0, dl, MVT::i32);
1554 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::ssub_1, dl, MVT::i32);
1555 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::ssub_2, dl, MVT::i32);
1556 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::ssub_3, dl, MVT::i32);
1557 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1558 V2, SubReg2, V3, SubReg3 };
1559 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1562 /// \brief Form 4 consecutive D registers.
1563 SDNode *ARMDAGToDAGISel::createQuadDRegsNode(EVT VT, SDValue V0, SDValue V1,
1564 SDValue V2, SDValue V3) {
1565 SDLoc dl(V0.getNode());
1566 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQPRRegClassID, dl,
1568 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::dsub_0, dl, MVT::i32);
1569 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::dsub_1, dl, MVT::i32);
1570 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::dsub_2, dl, MVT::i32);
1571 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::dsub_3, dl, MVT::i32);
1572 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1573 V2, SubReg2, V3, SubReg3 };
1574 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1577 /// \brief Form 4 consecutive Q registers.
1578 SDNode *ARMDAGToDAGISel::createQuadQRegsNode(EVT VT, SDValue V0, SDValue V1,
1579 SDValue V2, SDValue V3) {
1580 SDLoc dl(V0.getNode());
1581 SDValue RegClass = CurDAG->getTargetConstant(ARM::QQQQPRRegClassID, dl,
1583 SDValue SubReg0 = CurDAG->getTargetConstant(ARM::qsub_0, dl, MVT::i32);
1584 SDValue SubReg1 = CurDAG->getTargetConstant(ARM::qsub_1, dl, MVT::i32);
1585 SDValue SubReg2 = CurDAG->getTargetConstant(ARM::qsub_2, dl, MVT::i32);
1586 SDValue SubReg3 = CurDAG->getTargetConstant(ARM::qsub_3, dl, MVT::i32);
1587 const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1,
1588 V2, SubReg2, V3, SubReg3 };
1589 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, VT, Ops);
1592 /// GetVLDSTAlign - Get the alignment (in bytes) for the alignment operand
1593 /// of a NEON VLD or VST instruction. The supported values depend on the
1594 /// number of registers being loaded.
1595 SDValue ARMDAGToDAGISel::GetVLDSTAlign(SDValue Align, SDLoc dl,
1596 unsigned NumVecs, bool is64BitVector) {
1597 unsigned NumRegs = NumVecs;
1598 if (!is64BitVector && NumVecs < 3)
1601 unsigned Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
1602 if (Alignment >= 32 && NumRegs == 4)
1604 else if (Alignment >= 16 && (NumRegs == 2 || NumRegs == 4))
1606 else if (Alignment >= 8)
1611 return CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
1614 static bool isVLDfixed(unsigned Opc)
1617 default: return false;
1618 case ARM::VLD1d8wb_fixed : return true;
1619 case ARM::VLD1d16wb_fixed : return true;
1620 case ARM::VLD1d64Qwb_fixed : return true;
1621 case ARM::VLD1d32wb_fixed : return true;
1622 case ARM::VLD1d64wb_fixed : return true;
1623 case ARM::VLD1d64TPseudoWB_fixed : return true;
1624 case ARM::VLD1d64QPseudoWB_fixed : return true;
1625 case ARM::VLD1q8wb_fixed : return true;
1626 case ARM::VLD1q16wb_fixed : return true;
1627 case ARM::VLD1q32wb_fixed : return true;
1628 case ARM::VLD1q64wb_fixed : return true;
1629 case ARM::VLD2d8wb_fixed : return true;
1630 case ARM::VLD2d16wb_fixed : return true;
1631 case ARM::VLD2d32wb_fixed : return true;
1632 case ARM::VLD2q8PseudoWB_fixed : return true;
1633 case ARM::VLD2q16PseudoWB_fixed : return true;
1634 case ARM::VLD2q32PseudoWB_fixed : return true;
1635 case ARM::VLD2DUPd8wb_fixed : return true;
1636 case ARM::VLD2DUPd16wb_fixed : return true;
1637 case ARM::VLD2DUPd32wb_fixed : return true;
1641 static bool isVSTfixed(unsigned Opc)
1644 default: return false;
1645 case ARM::VST1d8wb_fixed : return true;
1646 case ARM::VST1d16wb_fixed : return true;
1647 case ARM::VST1d32wb_fixed : return true;
1648 case ARM::VST1d64wb_fixed : return true;
1649 case ARM::VST1q8wb_fixed : return true;
1650 case ARM::VST1q16wb_fixed : return true;
1651 case ARM::VST1q32wb_fixed : return true;
1652 case ARM::VST1q64wb_fixed : return true;
1653 case ARM::VST1d64TPseudoWB_fixed : return true;
1654 case ARM::VST1d64QPseudoWB_fixed : return true;
1655 case ARM::VST2d8wb_fixed : return true;
1656 case ARM::VST2d16wb_fixed : return true;
1657 case ARM::VST2d32wb_fixed : return true;
1658 case ARM::VST2q8PseudoWB_fixed : return true;
1659 case ARM::VST2q16PseudoWB_fixed : return true;
1660 case ARM::VST2q32PseudoWB_fixed : return true;
1664 // Get the register stride update opcode of a VLD/VST instruction that
1665 // is otherwise equivalent to the given fixed stride updating instruction.
1666 static unsigned getVLDSTRegisterUpdateOpcode(unsigned Opc) {
1667 assert((isVLDfixed(Opc) || isVSTfixed(Opc))
1668 && "Incorrect fixed stride updating instruction.");
1671 case ARM::VLD1d8wb_fixed: return ARM::VLD1d8wb_register;
1672 case ARM::VLD1d16wb_fixed: return ARM::VLD1d16wb_register;
1673 case ARM::VLD1d32wb_fixed: return ARM::VLD1d32wb_register;
1674 case ARM::VLD1d64wb_fixed: return ARM::VLD1d64wb_register;
1675 case ARM::VLD1q8wb_fixed: return ARM::VLD1q8wb_register;
1676 case ARM::VLD1q16wb_fixed: return ARM::VLD1q16wb_register;
1677 case ARM::VLD1q32wb_fixed: return ARM::VLD1q32wb_register;
1678 case ARM::VLD1q64wb_fixed: return ARM::VLD1q64wb_register;
1679 case ARM::VLD1d64Twb_fixed: return ARM::VLD1d64Twb_register;
1680 case ARM::VLD1d64Qwb_fixed: return ARM::VLD1d64Qwb_register;
1681 case ARM::VLD1d64TPseudoWB_fixed: return ARM::VLD1d64TPseudoWB_register;
1682 case ARM::VLD1d64QPseudoWB_fixed: return ARM::VLD1d64QPseudoWB_register;
1684 case ARM::VST1d8wb_fixed: return ARM::VST1d8wb_register;
1685 case ARM::VST1d16wb_fixed: return ARM::VST1d16wb_register;
1686 case ARM::VST1d32wb_fixed: return ARM::VST1d32wb_register;
1687 case ARM::VST1d64wb_fixed: return ARM::VST1d64wb_register;
1688 case ARM::VST1q8wb_fixed: return ARM::VST1q8wb_register;
1689 case ARM::VST1q16wb_fixed: return ARM::VST1q16wb_register;
1690 case ARM::VST1q32wb_fixed: return ARM::VST1q32wb_register;
1691 case ARM::VST1q64wb_fixed: return ARM::VST1q64wb_register;
1692 case ARM::VST1d64TPseudoWB_fixed: return ARM::VST1d64TPseudoWB_register;
1693 case ARM::VST1d64QPseudoWB_fixed: return ARM::VST1d64QPseudoWB_register;
1695 case ARM::VLD2d8wb_fixed: return ARM::VLD2d8wb_register;
1696 case ARM::VLD2d16wb_fixed: return ARM::VLD2d16wb_register;
1697 case ARM::VLD2d32wb_fixed: return ARM::VLD2d32wb_register;
1698 case ARM::VLD2q8PseudoWB_fixed: return ARM::VLD2q8PseudoWB_register;
1699 case ARM::VLD2q16PseudoWB_fixed: return ARM::VLD2q16PseudoWB_register;
1700 case ARM::VLD2q32PseudoWB_fixed: return ARM::VLD2q32PseudoWB_register;
1702 case ARM::VST2d8wb_fixed: return ARM::VST2d8wb_register;
1703 case ARM::VST2d16wb_fixed: return ARM::VST2d16wb_register;
1704 case ARM::VST2d32wb_fixed: return ARM::VST2d32wb_register;
1705 case ARM::VST2q8PseudoWB_fixed: return ARM::VST2q8PseudoWB_register;
1706 case ARM::VST2q16PseudoWB_fixed: return ARM::VST2q16PseudoWB_register;
1707 case ARM::VST2q32PseudoWB_fixed: return ARM::VST2q32PseudoWB_register;
1709 case ARM::VLD2DUPd8wb_fixed: return ARM::VLD2DUPd8wb_register;
1710 case ARM::VLD2DUPd16wb_fixed: return ARM::VLD2DUPd16wb_register;
1711 case ARM::VLD2DUPd32wb_fixed: return ARM::VLD2DUPd32wb_register;
1713 return Opc; // If not one we handle, return it unchanged.
1716 SDNode *ARMDAGToDAGISel::SelectVLD(SDNode *N, bool isUpdating, unsigned NumVecs,
1717 const uint16_t *DOpcodes,
1718 const uint16_t *QOpcodes0,
1719 const uint16_t *QOpcodes1) {
1720 assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range");
1723 SDValue MemAddr, Align;
1724 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1725 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1728 SDValue Chain = N->getOperand(0);
1729 EVT VT = N->getValueType(0);
1730 bool is64BitVector = VT.is64BitVector();
1731 Align = GetVLDSTAlign(Align, dl, NumVecs, is64BitVector);
1733 unsigned OpcodeIndex;
1734 switch (VT.getSimpleVT().SimpleTy) {
1735 default: llvm_unreachable("unhandled vld type");
1736 // Double-register operations:
1737 case MVT::v8i8: OpcodeIndex = 0; break;
1738 case MVT::v4i16: OpcodeIndex = 1; break;
1740 case MVT::v2i32: OpcodeIndex = 2; break;
1741 case MVT::v1i64: OpcodeIndex = 3; break;
1742 // Quad-register operations:
1743 case MVT::v16i8: OpcodeIndex = 0; break;
1744 case MVT::v8i16: OpcodeIndex = 1; break;
1746 case MVT::v4i32: OpcodeIndex = 2; break;
1748 case MVT::v2i64: OpcodeIndex = 3;
1749 assert(NumVecs == 1 && "v2i64 type only supported for VLD1");
1757 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
1760 ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, ResTyElts);
1762 std::vector<EVT> ResTys;
1763 ResTys.push_back(ResTy);
1765 ResTys.push_back(MVT::i32);
1766 ResTys.push_back(MVT::Other);
1768 SDValue Pred = getAL(CurDAG, dl);
1769 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1771 SmallVector<SDValue, 7> Ops;
1773 // Double registers and VLD1/VLD2 quad registers are directly supported.
1774 if (is64BitVector || NumVecs <= 2) {
1775 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1776 QOpcodes0[OpcodeIndex]);
1777 Ops.push_back(MemAddr);
1778 Ops.push_back(Align);
1780 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1781 // FIXME: VLD1/VLD2 fixed increment doesn't need Reg0. Remove the reg0
1782 // case entirely when the rest are updated to that form, too.
1783 if ((NumVecs <= 2) && !isa<ConstantSDNode>(Inc.getNode()))
1784 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1785 // FIXME: We use a VLD1 for v1i64 even if the pseudo says vld2/3/4, so
1786 // check for that explicitly too. Horribly hacky, but temporary.
1787 if ((NumVecs > 2 && !isVLDfixed(Opc)) ||
1788 !isa<ConstantSDNode>(Inc.getNode()))
1789 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
1791 Ops.push_back(Pred);
1792 Ops.push_back(Reg0);
1793 Ops.push_back(Chain);
1794 VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1797 // Otherwise, quad registers are loaded with two separate instructions,
1798 // where one loads the even registers and the other loads the odd registers.
1799 EVT AddrTy = MemAddr.getValueType();
1801 // Load the even subregs. This is always an updating load, so that it
1802 // provides the address to the second load for the odd subregs.
1804 SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, ResTy), 0);
1805 const SDValue OpsA[] = { MemAddr, Align, Reg0, ImplDef, Pred, Reg0, Chain };
1806 SDNode *VLdA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1807 ResTy, AddrTy, MVT::Other, OpsA);
1808 Chain = SDValue(VLdA, 2);
1810 // Load the odd subregs.
1811 Ops.push_back(SDValue(VLdA, 1));
1812 Ops.push_back(Align);
1814 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1815 assert(isa<ConstantSDNode>(Inc.getNode()) &&
1816 "only constant post-increment update allowed for VLD3/4");
1818 Ops.push_back(Reg0);
1820 Ops.push_back(SDValue(VLdA, 0));
1821 Ops.push_back(Pred);
1822 Ops.push_back(Reg0);
1823 Ops.push_back(Chain);
1824 VLd = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys, Ops);
1827 // Transfer memoperands.
1828 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1829 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1830 cast<MachineSDNode>(VLd)->setMemRefs(MemOp, MemOp + 1);
1835 // Extract out the subregisters.
1836 SDValue SuperReg = SDValue(VLd, 0);
1837 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
1838 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
1839 unsigned Sub0 = (is64BitVector ? ARM::dsub_0 : ARM::qsub_0);
1840 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
1841 ReplaceUses(SDValue(N, Vec),
1842 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
1843 ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1));
1845 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLd, 2));
1849 SDNode *ARMDAGToDAGISel::SelectVST(SDNode *N, bool isUpdating, unsigned NumVecs,
1850 const uint16_t *DOpcodes,
1851 const uint16_t *QOpcodes0,
1852 const uint16_t *QOpcodes1) {
1853 assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range");
1856 SDValue MemAddr, Align;
1857 unsigned AddrOpIdx = isUpdating ? 1 : 2;
1858 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
1859 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
1862 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1863 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
1865 SDValue Chain = N->getOperand(0);
1866 EVT VT = N->getOperand(Vec0Idx).getValueType();
1867 bool is64BitVector = VT.is64BitVector();
1868 Align = GetVLDSTAlign(Align, dl, NumVecs, is64BitVector);
1870 unsigned OpcodeIndex;
1871 switch (VT.getSimpleVT().SimpleTy) {
1872 default: llvm_unreachable("unhandled vst type");
1873 // Double-register operations:
1874 case MVT::v8i8: OpcodeIndex = 0; break;
1875 case MVT::v4i16: OpcodeIndex = 1; break;
1877 case MVT::v2i32: OpcodeIndex = 2; break;
1878 case MVT::v1i64: OpcodeIndex = 3; break;
1879 // Quad-register operations:
1880 case MVT::v16i8: OpcodeIndex = 0; break;
1881 case MVT::v8i16: OpcodeIndex = 1; break;
1883 case MVT::v4i32: OpcodeIndex = 2; break;
1885 case MVT::v2i64: OpcodeIndex = 3;
1886 assert(NumVecs == 1 && "v2i64 type only supported for VST1");
1890 std::vector<EVT> ResTys;
1892 ResTys.push_back(MVT::i32);
1893 ResTys.push_back(MVT::Other);
1895 SDValue Pred = getAL(CurDAG, dl);
1896 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
1897 SmallVector<SDValue, 7> Ops;
1899 // Double registers and VST1/VST2 quad registers are directly supported.
1900 if (is64BitVector || NumVecs <= 2) {
1903 SrcReg = N->getOperand(Vec0Idx);
1904 } else if (is64BitVector) {
1905 // Form a REG_SEQUENCE to force register allocation.
1906 SDValue V0 = N->getOperand(Vec0Idx + 0);
1907 SDValue V1 = N->getOperand(Vec0Idx + 1);
1909 SrcReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
1911 SDValue V2 = N->getOperand(Vec0Idx + 2);
1912 // If it's a vst3, form a quad D-register and leave the last part as
1914 SDValue V3 = (NumVecs == 3)
1915 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,dl,VT), 0)
1916 : N->getOperand(Vec0Idx + 3);
1917 SrcReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
1920 // Form a QQ register.
1921 SDValue Q0 = N->getOperand(Vec0Idx);
1922 SDValue Q1 = N->getOperand(Vec0Idx + 1);
1923 SrcReg = SDValue(createQRegPairNode(MVT::v4i64, Q0, Q1), 0);
1926 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
1927 QOpcodes0[OpcodeIndex]);
1928 Ops.push_back(MemAddr);
1929 Ops.push_back(Align);
1931 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1932 // FIXME: VST1/VST2 fixed increment doesn't need Reg0. Remove the reg0
1933 // case entirely when the rest are updated to that form, too.
1934 if (NumVecs <= 2 && !isa<ConstantSDNode>(Inc.getNode()))
1935 Opc = getVLDSTRegisterUpdateOpcode(Opc);
1936 // FIXME: We use a VST1 for v1i64 even if the pseudo says vld2/3/4, so
1937 // check for that explicitly too. Horribly hacky, but temporary.
1938 if (!isa<ConstantSDNode>(Inc.getNode()))
1940 else if (NumVecs > 2 && !isVSTfixed(Opc))
1941 Ops.push_back(Reg0);
1943 Ops.push_back(SrcReg);
1944 Ops.push_back(Pred);
1945 Ops.push_back(Reg0);
1946 Ops.push_back(Chain);
1947 SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
1949 // Transfer memoperands.
1950 cast<MachineSDNode>(VSt)->setMemRefs(MemOp, MemOp + 1);
1955 // Otherwise, quad registers are stored with two separate instructions,
1956 // where one stores the even registers and the other stores the odd registers.
1958 // Form the QQQQ REG_SEQUENCE.
1959 SDValue V0 = N->getOperand(Vec0Idx + 0);
1960 SDValue V1 = N->getOperand(Vec0Idx + 1);
1961 SDValue V2 = N->getOperand(Vec0Idx + 2);
1962 SDValue V3 = (NumVecs == 3)
1963 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
1964 : N->getOperand(Vec0Idx + 3);
1965 SDValue RegSeq = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
1967 // Store the even D registers. This is always an updating store, so that it
1968 // provides the address to the second store for the odd subregs.
1969 const SDValue OpsA[] = { MemAddr, Align, Reg0, RegSeq, Pred, Reg0, Chain };
1970 SDNode *VStA = CurDAG->getMachineNode(QOpcodes0[OpcodeIndex], dl,
1971 MemAddr.getValueType(),
1973 cast<MachineSDNode>(VStA)->setMemRefs(MemOp, MemOp + 1);
1974 Chain = SDValue(VStA, 1);
1976 // Store the odd D registers.
1977 Ops.push_back(SDValue(VStA, 0));
1978 Ops.push_back(Align);
1980 SDValue Inc = N->getOperand(AddrOpIdx + 1);
1981 assert(isa<ConstantSDNode>(Inc.getNode()) &&
1982 "only constant post-increment update allowed for VST3/4");
1984 Ops.push_back(Reg0);
1986 Ops.push_back(RegSeq);
1987 Ops.push_back(Pred);
1988 Ops.push_back(Reg0);
1989 Ops.push_back(Chain);
1990 SDNode *VStB = CurDAG->getMachineNode(QOpcodes1[OpcodeIndex], dl, ResTys,
1992 cast<MachineSDNode>(VStB)->setMemRefs(MemOp, MemOp + 1);
1996 SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
1997 bool isUpdating, unsigned NumVecs,
1998 const uint16_t *DOpcodes,
1999 const uint16_t *QOpcodes) {
2000 assert(NumVecs >=2 && NumVecs <= 4 && "VLDSTLane NumVecs out-of-range");
2003 SDValue MemAddr, Align;
2004 unsigned AddrOpIdx = isUpdating ? 1 : 2;
2005 unsigned Vec0Idx = 3; // AddrOpIdx + (isUpdating ? 2 : 1)
2006 if (!SelectAddrMode6(N, N->getOperand(AddrOpIdx), MemAddr, Align))
2009 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2010 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2012 SDValue Chain = N->getOperand(0);
2014 cast<ConstantSDNode>(N->getOperand(Vec0Idx + NumVecs))->getZExtValue();
2015 EVT VT = N->getOperand(Vec0Idx).getValueType();
2016 bool is64BitVector = VT.is64BitVector();
2018 unsigned Alignment = 0;
2020 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2021 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
2022 if (Alignment > NumBytes)
2023 Alignment = NumBytes;
2024 if (Alignment < 8 && Alignment < NumBytes)
2026 // Alignment must be a power of two; make sure of that.
2027 Alignment = (Alignment & -Alignment);
2031 Align = CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
2033 unsigned OpcodeIndex;
2034 switch (VT.getSimpleVT().SimpleTy) {
2035 default: llvm_unreachable("unhandled vld/vst lane type");
2036 // Double-register operations:
2037 case MVT::v8i8: OpcodeIndex = 0; break;
2038 case MVT::v4i16: OpcodeIndex = 1; break;
2040 case MVT::v2i32: OpcodeIndex = 2; break;
2041 // Quad-register operations:
2042 case MVT::v8i16: OpcodeIndex = 0; break;
2044 case MVT::v4i32: OpcodeIndex = 1; break;
2047 std::vector<EVT> ResTys;
2049 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2052 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(),
2053 MVT::i64, ResTyElts));
2056 ResTys.push_back(MVT::i32);
2057 ResTys.push_back(MVT::Other);
2059 SDValue Pred = getAL(CurDAG, dl);
2060 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2062 SmallVector<SDValue, 8> Ops;
2063 Ops.push_back(MemAddr);
2064 Ops.push_back(Align);
2066 SDValue Inc = N->getOperand(AddrOpIdx + 1);
2067 Ops.push_back(isa<ConstantSDNode>(Inc.getNode()) ? Reg0 : Inc);
2071 SDValue V0 = N->getOperand(Vec0Idx + 0);
2072 SDValue V1 = N->getOperand(Vec0Idx + 1);
2075 SuperReg = SDValue(createDRegPairNode(MVT::v2i64, V0, V1), 0);
2077 SuperReg = SDValue(createQRegPairNode(MVT::v4i64, V0, V1), 0);
2079 SDValue V2 = N->getOperand(Vec0Idx + 2);
2080 SDValue V3 = (NumVecs == 3)
2081 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2082 : N->getOperand(Vec0Idx + 3);
2084 SuperReg = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2086 SuperReg = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0);
2088 Ops.push_back(SuperReg);
2089 Ops.push_back(getI32Imm(Lane, dl));
2090 Ops.push_back(Pred);
2091 Ops.push_back(Reg0);
2092 Ops.push_back(Chain);
2094 unsigned Opc = (is64BitVector ? DOpcodes[OpcodeIndex] :
2095 QOpcodes[OpcodeIndex]);
2096 SDNode *VLdLn = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2097 cast<MachineSDNode>(VLdLn)->setMemRefs(MemOp, MemOp + 1);
2101 // Extract the subregisters.
2102 SuperReg = SDValue(VLdLn, 0);
2103 assert(ARM::dsub_7 == ARM::dsub_0+7 &&
2104 ARM::qsub_3 == ARM::qsub_0+3 && "Unexpected subreg numbering");
2105 unsigned Sub0 = is64BitVector ? ARM::dsub_0 : ARM::qsub_0;
2106 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2107 ReplaceUses(SDValue(N, Vec),
2108 CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg));
2109 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdLn, 1));
2111 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdLn, 2));
2115 SDNode *ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool isUpdating,
2117 const uint16_t *Opcodes) {
2118 assert(NumVecs >=2 && NumVecs <= 4 && "VLDDup NumVecs out-of-range");
2121 SDValue MemAddr, Align;
2122 if (!SelectAddrMode6(N, N->getOperand(1), MemAddr, Align))
2125 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2126 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
2128 SDValue Chain = N->getOperand(0);
2129 EVT VT = N->getValueType(0);
2131 unsigned Alignment = 0;
2133 Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
2134 unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
2135 if (Alignment > NumBytes)
2136 Alignment = NumBytes;
2137 if (Alignment < 8 && Alignment < NumBytes)
2139 // Alignment must be a power of two; make sure of that.
2140 Alignment = (Alignment & -Alignment);
2144 Align = CurDAG->getTargetConstant(Alignment, dl, MVT::i32);
2146 unsigned OpcodeIndex;
2147 switch (VT.getSimpleVT().SimpleTy) {
2148 default: llvm_unreachable("unhandled vld-dup type");
2149 case MVT::v8i8: OpcodeIndex = 0; break;
2150 case MVT::v4i16: OpcodeIndex = 1; break;
2152 case MVT::v2i32: OpcodeIndex = 2; break;
2155 SDValue Pred = getAL(CurDAG, dl);
2156 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2158 unsigned Opc = Opcodes[OpcodeIndex];
2159 SmallVector<SDValue, 6> Ops;
2160 Ops.push_back(MemAddr);
2161 Ops.push_back(Align);
2163 // fixed-stride update instructions don't have an explicit writeback
2164 // operand. It's implicit in the opcode itself.
2165 SDValue Inc = N->getOperand(2);
2166 if (!isa<ConstantSDNode>(Inc.getNode()))
2168 // FIXME: VLD3 and VLD4 haven't been updated to that form yet.
2169 else if (NumVecs > 2)
2170 Ops.push_back(Reg0);
2172 Ops.push_back(Pred);
2173 Ops.push_back(Reg0);
2174 Ops.push_back(Chain);
2176 unsigned ResTyElts = (NumVecs == 3) ? 4 : NumVecs;
2177 std::vector<EVT> ResTys;
2178 ResTys.push_back(EVT::getVectorVT(*CurDAG->getContext(), MVT::i64,ResTyElts));
2180 ResTys.push_back(MVT::i32);
2181 ResTys.push_back(MVT::Other);
2182 SDNode *VLdDup = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
2183 cast<MachineSDNode>(VLdDup)->setMemRefs(MemOp, MemOp + 1);
2184 SuperReg = SDValue(VLdDup, 0);
2186 // Extract the subregisters.
2187 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
2188 unsigned SubIdx = ARM::dsub_0;
2189 for (unsigned Vec = 0; Vec < NumVecs; ++Vec)
2190 ReplaceUses(SDValue(N, Vec),
2191 CurDAG->getTargetExtractSubreg(SubIdx+Vec, dl, VT, SuperReg));
2192 ReplaceUses(SDValue(N, NumVecs), SDValue(VLdDup, 1));
2194 ReplaceUses(SDValue(N, NumVecs + 1), SDValue(VLdDup, 2));
2198 SDNode *ARMDAGToDAGISel::SelectVTBL(SDNode *N, bool IsExt, unsigned NumVecs,
2200 assert(NumVecs >= 2 && NumVecs <= 4 && "VTBL NumVecs out-of-range");
2202 EVT VT = N->getValueType(0);
2203 unsigned FirstTblReg = IsExt ? 2 : 1;
2205 // Form a REG_SEQUENCE to force register allocation.
2207 SDValue V0 = N->getOperand(FirstTblReg + 0);
2208 SDValue V1 = N->getOperand(FirstTblReg + 1);
2210 RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0);
2212 SDValue V2 = N->getOperand(FirstTblReg + 2);
2213 // If it's a vtbl3, form a quad D-register and leave the last part as
2215 SDValue V3 = (NumVecs == 3)
2216 ? SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, VT), 0)
2217 : N->getOperand(FirstTblReg + 3);
2218 RegSeq = SDValue(createQuadDRegsNode(MVT::v4i64, V0, V1, V2, V3), 0);
2221 SmallVector<SDValue, 6> Ops;
2223 Ops.push_back(N->getOperand(1));
2224 Ops.push_back(RegSeq);
2225 Ops.push_back(N->getOperand(FirstTblReg + NumVecs));
2226 Ops.push_back(getAL(CurDAG, dl)); // predicate
2227 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // predicate register
2228 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2231 SDNode *ARMDAGToDAGISel::SelectV6T2BitfieldExtractOp(SDNode *N,
2233 if (!Subtarget->hasV6T2Ops())
2236 unsigned Opc = isSigned
2237 ? (Subtarget->isThumb() ? ARM::t2SBFX : ARM::SBFX)
2238 : (Subtarget->isThumb() ? ARM::t2UBFX : ARM::UBFX);
2241 // For unsigned extracts, check for a shift right and mask
2242 unsigned And_imm = 0;
2243 if (N->getOpcode() == ISD::AND) {
2244 if (isOpcWithIntImmediate(N, ISD::AND, And_imm)) {
2246 // The immediate is a mask of the low bits iff imm & (imm+1) == 0
2247 if (And_imm & (And_imm + 1))
2250 unsigned Srl_imm = 0;
2251 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL,
2253 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2255 // Note: The width operand is encoded as width-1.
2256 unsigned Width = countTrailingOnes(And_imm) - 1;
2257 unsigned LSB = Srl_imm;
2259 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2261 if ((LSB + Width + 1) == N->getValueType(0).getSizeInBits()) {
2262 // It's cheaper to use a right shift to extract the top bits.
2263 if (Subtarget->isThumb()) {
2264 Opc = isSigned ? ARM::t2ASRri : ARM::t2LSRri;
2265 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2266 CurDAG->getTargetConstant(LSB, dl, MVT::i32),
2267 getAL(CurDAG, dl), Reg0, Reg0 };
2268 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2271 // ARM models shift instructions as MOVsi with shifter operand.
2272 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(ISD::SRL);
2274 CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, LSB), dl,
2276 SDValue Ops[] = { N->getOperand(0).getOperand(0), ShOpc,
2277 getAL(CurDAG, dl), Reg0, Reg0 };
2278 return CurDAG->SelectNodeTo(N, ARM::MOVsi, MVT::i32, Ops);
2281 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2282 CurDAG->getTargetConstant(LSB, dl, MVT::i32),
2283 CurDAG->getTargetConstant(Width, dl, MVT::i32),
2284 getAL(CurDAG, dl), Reg0 };
2285 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2291 // Otherwise, we're looking for a shift of a shift
2292 unsigned Shl_imm = 0;
2293 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SHL, Shl_imm)) {
2294 assert(Shl_imm > 0 && Shl_imm < 32 && "bad amount in shift node!");
2295 unsigned Srl_imm = 0;
2296 if (isInt32Immediate(N->getOperand(1), Srl_imm)) {
2297 assert(Srl_imm > 0 && Srl_imm < 32 && "bad amount in shift node!");
2298 // Note: The width operand is encoded as width-1.
2299 unsigned Width = 32 - Srl_imm - 1;
2300 int LSB = Srl_imm - Shl_imm;
2303 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2304 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2305 CurDAG->getTargetConstant(LSB, dl, MVT::i32),
2306 CurDAG->getTargetConstant(Width, dl, MVT::i32),
2307 getAL(CurDAG, dl), Reg0 };
2308 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2312 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG) {
2313 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
2315 if (!isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRL, LSB) &&
2316 !isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::SRA, LSB))
2319 if (LSB + Width > 32)
2322 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2323 SDValue Ops[] = { N->getOperand(0).getOperand(0),
2324 CurDAG->getTargetConstant(LSB, dl, MVT::i32),
2325 CurDAG->getTargetConstant(Width - 1, dl, MVT::i32),
2326 getAL(CurDAG, dl), Reg0 };
2327 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2333 /// Target-specific DAG combining for ISD::XOR.
2334 /// Target-independent combining lowers SELECT_CC nodes of the form
2335 /// select_cc setg[ge] X, 0, X, -X
2336 /// select_cc setgt X, -1, X, -X
2337 /// select_cc setl[te] X, 0, -X, X
2338 /// select_cc setlt X, 1, -X, X
2339 /// which represent Integer ABS into:
2340 /// Y = sra (X, size(X)-1); xor (add (X, Y), Y)
2341 /// ARM instruction selection detects the latter and matches it to
2342 /// ARM::ABS or ARM::t2ABS machine node.
2343 SDNode *ARMDAGToDAGISel::SelectABSOp(SDNode *N){
2344 SDValue XORSrc0 = N->getOperand(0);
2345 SDValue XORSrc1 = N->getOperand(1);
2346 EVT VT = N->getValueType(0);
2348 if (Subtarget->isThumb1Only())
2351 if (XORSrc0.getOpcode() != ISD::ADD || XORSrc1.getOpcode() != ISD::SRA)
2354 SDValue ADDSrc0 = XORSrc0.getOperand(0);
2355 SDValue ADDSrc1 = XORSrc0.getOperand(1);
2356 SDValue SRASrc0 = XORSrc1.getOperand(0);
2357 SDValue SRASrc1 = XORSrc1.getOperand(1);
2358 ConstantSDNode *SRAConstant = dyn_cast<ConstantSDNode>(SRASrc1);
2359 EVT XType = SRASrc0.getValueType();
2360 unsigned Size = XType.getSizeInBits() - 1;
2362 if (ADDSrc1 == XORSrc1 && ADDSrc0 == SRASrc0 &&
2363 XType.isInteger() && SRAConstant != nullptr &&
2364 Size == SRAConstant->getZExtValue()) {
2365 unsigned Opcode = Subtarget->isThumb2() ? ARM::t2ABS : ARM::ABS;
2366 return CurDAG->SelectNodeTo(N, Opcode, VT, ADDSrc0);
2372 SDNode *ARMDAGToDAGISel::SelectConcatVector(SDNode *N) {
2373 // The only time a CONCAT_VECTORS operation can have legal types is when
2374 // two 64-bit vectors are concatenated to a 128-bit vector.
2375 EVT VT = N->getValueType(0);
2376 if (!VT.is128BitVector() || N->getNumOperands() != 2)
2377 llvm_unreachable("unexpected CONCAT_VECTORS");
2378 return createDRegPairNode(VT, N->getOperand(0), N->getOperand(1));
2381 SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
2384 if (N->isMachineOpcode()) {
2386 return nullptr; // Already selected.
2389 switch (N->getOpcode()) {
2391 case ISD::WRITE_REGISTER: {
2392 SDNode *ResNode = SelectWriteRegister(N);
2397 case ISD::READ_REGISTER: {
2398 SDNode *ResNode = SelectReadRegister(N);
2403 case ISD::INLINEASM: {
2404 SDNode *ResNode = SelectInlineAsm(N);
2410 // Select special operations if XOR node forms integer ABS pattern
2411 SDNode *ResNode = SelectABSOp(N);
2414 // Other cases are autogenerated.
2417 case ISD::Constant: {
2418 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
2420 if (Subtarget->useMovt(*MF))
2421 // Thumb2-aware targets have the MOVT instruction, so all immediates can
2422 // be done with MOV + MOVT, at worst.
2425 if (Subtarget->isThumb()) {
2426 UseCP = (Val > 255 && // MOV
2427 ~Val > 255 && // MOV + MVN
2428 !ARM_AM::isThumbImmShiftedVal(Val) && // MOV + LSL
2429 !(Subtarget->hasV6T2Ops() && Val <= 0xffff)); // MOVW
2431 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
2432 ARM_AM::getSOImmVal(~Val) == -1 && // MVN
2433 !ARM_AM::isSOImmTwoPartVal(Val) && // two instrs.
2434 !(Subtarget->hasV6T2Ops() && Val <= 0xffff)); // MOVW
2438 SDValue CPIdx = CurDAG->getTargetConstantPool(
2439 ConstantInt::get(Type::getInt32Ty(*CurDAG->getContext()), Val),
2440 TLI->getPointerTy(CurDAG->getDataLayout()));
2443 if (Subtarget->isThumb()) {
2444 SDValue Pred = getAL(CurDAG, dl);
2445 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2446 SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
2447 ResNode = CurDAG->getMachineNode(ARM::tLDRpci, dl, MVT::i32, MVT::Other,
2452 CurDAG->getTargetConstant(0, dl, MVT::i32),
2454 CurDAG->getRegister(0, MVT::i32),
2455 CurDAG->getEntryNode()
2457 ResNode=CurDAG->getMachineNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
2460 ReplaceUses(SDValue(N, 0), SDValue(ResNode, 0));
2464 // Other cases are autogenerated.
2467 case ISD::FrameIndex: {
2468 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
2469 int FI = cast<FrameIndexSDNode>(N)->getIndex();
2470 SDValue TFI = CurDAG->getTargetFrameIndex(
2471 FI, TLI->getPointerTy(CurDAG->getDataLayout()));
2472 if (Subtarget->isThumb1Only()) {
2473 // Set the alignment of the frame object to 4, to avoid having to generate
2474 // more than one ADD
2475 MachineFrameInfo *MFI = MF->getFrameInfo();
2476 if (MFI->getObjectAlignment(FI) < 4)
2477 MFI->setObjectAlignment(FI, 4);
2478 return CurDAG->SelectNodeTo(N, ARM::tADDframe, MVT::i32, TFI,
2479 CurDAG->getTargetConstant(0, dl, MVT::i32));
2481 unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
2482 ARM::t2ADDri : ARM::ADDri);
2483 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, dl, MVT::i32),
2484 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
2485 CurDAG->getRegister(0, MVT::i32) };
2486 return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops);
2490 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2493 case ISD::SIGN_EXTEND_INREG:
2495 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, true))
2499 if (Subtarget->isThumb1Only())
2501 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
2502 unsigned RHSV = C->getZExtValue();
2504 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
2505 unsigned ShImm = Log2_32(RHSV-1);
2508 SDValue V = N->getOperand(0);
2509 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2510 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, dl, MVT::i32);
2511 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2512 if (Subtarget->isThumb()) {
2513 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG, dl), Reg0, Reg0 };
2514 return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops);
2516 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG, dl), Reg0,
2518 return CurDAG->SelectNodeTo(N, ARM::ADDrsi, MVT::i32, Ops);
2521 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
2522 unsigned ShImm = Log2_32(RHSV+1);
2525 SDValue V = N->getOperand(0);
2526 ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
2527 SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, dl, MVT::i32);
2528 SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
2529 if (Subtarget->isThumb()) {
2530 SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG, dl), Reg0, Reg0 };
2531 return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops);
2533 SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG, dl), Reg0,
2535 return CurDAG->SelectNodeTo(N, ARM::RSBrsi, MVT::i32, Ops);
2541 // Check for unsigned bitfield extract
2542 if (SDNode *I = SelectV6T2BitfieldExtractOp(N, false))
2545 // (and (or x, c2), c1) and top 16-bits of c1 and c2 match, lower 16-bits
2546 // of c1 are 0xffff, and lower 16-bit of c2 are 0. That is, the top 16-bits
2547 // are entirely contributed by c2 and lower 16-bits are entirely contributed
2548 // by x. That's equal to (or (and x, 0xffff), (and c1, 0xffff0000)).
2549 // Select it to: "movt x, ((c1 & 0xffff) >> 16)
2550 EVT VT = N->getValueType(0);
2553 unsigned Opc = (Subtarget->isThumb() && Subtarget->hasThumb2())
2555 : (Subtarget->hasV6T2Ops() ? ARM::MOVTi16 : 0);
2558 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
2559 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
2562 if (N0.getOpcode() == ISD::OR && N0.getNode()->hasOneUse()) {
2563 SDValue N2 = N0.getOperand(1);
2564 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
2567 unsigned N1CVal = N1C->getZExtValue();
2568 unsigned N2CVal = N2C->getZExtValue();
2569 if ((N1CVal & 0xffff0000U) == (N2CVal & 0xffff0000U) &&
2570 (N1CVal & 0xffffU) == 0xffffU &&
2571 (N2CVal & 0xffffU) == 0x0U) {
2572 SDValue Imm16 = CurDAG->getTargetConstant((N2CVal & 0xFFFF0000U) >> 16,
2574 SDValue Ops[] = { N0.getOperand(0), Imm16,
2575 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32) };
2576 return CurDAG->getMachineNode(Opc, dl, VT, Ops);
2581 case ARMISD::VMOVRRD:
2582 return CurDAG->getMachineNode(ARM::VMOVRRD, dl, MVT::i32, MVT::i32,
2583 N->getOperand(0), getAL(CurDAG, dl),
2584 CurDAG->getRegister(0, MVT::i32));
2585 case ISD::UMUL_LOHI: {
2586 if (Subtarget->isThumb1Only())
2588 if (Subtarget->isThumb()) {
2589 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2590 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32) };
2591 return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops);
2593 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2594 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
2595 CurDAG->getRegister(0, MVT::i32) };
2596 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2597 ARM::UMULL : ARM::UMULLv5,
2598 dl, MVT::i32, MVT::i32, Ops);
2601 case ISD::SMUL_LOHI: {
2602 if (Subtarget->isThumb1Only())
2604 if (Subtarget->isThumb()) {
2605 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2606 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32) };
2607 return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops);
2609 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
2610 getAL(CurDAG, dl), CurDAG->getRegister(0, MVT::i32),
2611 CurDAG->getRegister(0, MVT::i32) };
2612 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2613 ARM::SMULL : ARM::SMULLv5,
2614 dl, MVT::i32, MVT::i32, Ops);
2617 case ARMISD::UMLAL:{
2618 if (Subtarget->isThumb()) {
2619 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2620 N->getOperand(3), getAL(CurDAG, dl),
2621 CurDAG->getRegister(0, MVT::i32)};
2622 return CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops);
2624 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2625 N->getOperand(3), getAL(CurDAG, dl),
2626 CurDAG->getRegister(0, MVT::i32),
2627 CurDAG->getRegister(0, MVT::i32) };
2628 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2629 ARM::UMLAL : ARM::UMLALv5,
2630 dl, MVT::i32, MVT::i32, Ops);
2633 case ARMISD::SMLAL:{
2634 if (Subtarget->isThumb()) {
2635 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2636 N->getOperand(3), getAL(CurDAG, dl),
2637 CurDAG->getRegister(0, MVT::i32)};
2638 return CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops);
2640 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
2641 N->getOperand(3), getAL(CurDAG, dl),
2642 CurDAG->getRegister(0, MVT::i32),
2643 CurDAG->getRegister(0, MVT::i32) };
2644 return CurDAG->getMachineNode(Subtarget->hasV6Ops() ?
2645 ARM::SMLAL : ARM::SMLALv5,
2646 dl, MVT::i32, MVT::i32, Ops);
2650 SDNode *ResNode = nullptr;
2651 if (Subtarget->isThumb() && Subtarget->hasThumb2())
2652 ResNode = SelectT2IndexedLoad(N);
2654 ResNode = SelectARMIndexedLoad(N);
2657 // Other cases are autogenerated.
2660 case ARMISD::BRCOND: {
2661 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2662 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2663 // Pattern complexity = 6 cost = 1 size = 0
2665 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2666 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
2667 // Pattern complexity = 6 cost = 1 size = 0
2669 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
2670 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
2671 // Pattern complexity = 6 cost = 1 size = 0
2673 unsigned Opc = Subtarget->isThumb() ?
2674 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
2675 SDValue Chain = N->getOperand(0);
2676 SDValue N1 = N->getOperand(1);
2677 SDValue N2 = N->getOperand(2);
2678 SDValue N3 = N->getOperand(3);
2679 SDValue InFlag = N->getOperand(4);
2680 assert(N1.getOpcode() == ISD::BasicBlock);
2681 assert(N2.getOpcode() == ISD::Constant);
2682 assert(N3.getOpcode() == ISD::Register);
2684 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
2685 cast<ConstantSDNode>(N2)->getZExtValue()), dl,
2687 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
2688 SDNode *ResNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
2690 Chain = SDValue(ResNode, 0);
2691 if (N->getNumValues() == 2) {
2692 InFlag = SDValue(ResNode, 1);
2693 ReplaceUses(SDValue(N, 1), InFlag);
2695 ReplaceUses(SDValue(N, 0),
2696 SDValue(Chain.getNode(), Chain.getResNo()));
2699 case ARMISD::VZIP: {
2701 EVT VT = N->getValueType(0);
2702 switch (VT.getSimpleVT().SimpleTy) {
2703 default: return nullptr;
2704 case MVT::v8i8: Opc = ARM::VZIPd8; break;
2705 case MVT::v4i16: Opc = ARM::VZIPd16; break;
2707 // vzip.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2708 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2709 case MVT::v16i8: Opc = ARM::VZIPq8; break;
2710 case MVT::v8i16: Opc = ARM::VZIPq16; break;
2712 case MVT::v4i32: Opc = ARM::VZIPq32; break;
2714 SDValue Pred = getAL(CurDAG, dl);
2715 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2716 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2717 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2719 case ARMISD::VUZP: {
2721 EVT VT = N->getValueType(0);
2722 switch (VT.getSimpleVT().SimpleTy) {
2723 default: return nullptr;
2724 case MVT::v8i8: Opc = ARM::VUZPd8; break;
2725 case MVT::v4i16: Opc = ARM::VUZPd16; break;
2727 // vuzp.32 Dd, Dm is a pseudo-instruction expanded to vtrn.32 Dd, Dm.
2728 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2729 case MVT::v16i8: Opc = ARM::VUZPq8; break;
2730 case MVT::v8i16: Opc = ARM::VUZPq16; break;
2732 case MVT::v4i32: Opc = ARM::VUZPq32; break;
2734 SDValue Pred = getAL(CurDAG, dl);
2735 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2736 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2737 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2739 case ARMISD::VTRN: {
2741 EVT VT = N->getValueType(0);
2742 switch (VT.getSimpleVT().SimpleTy) {
2743 default: return nullptr;
2744 case MVT::v8i8: Opc = ARM::VTRNd8; break;
2745 case MVT::v4i16: Opc = ARM::VTRNd16; break;
2747 case MVT::v2i32: Opc = ARM::VTRNd32; break;
2748 case MVT::v16i8: Opc = ARM::VTRNq8; break;
2749 case MVT::v8i16: Opc = ARM::VTRNq16; break;
2751 case MVT::v4i32: Opc = ARM::VTRNq32; break;
2753 SDValue Pred = getAL(CurDAG, dl);
2754 SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
2755 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), Pred, PredReg };
2756 return CurDAG->getMachineNode(Opc, dl, VT, VT, Ops);
2758 case ARMISD::BUILD_VECTOR: {
2759 EVT VecVT = N->getValueType(0);
2760 EVT EltVT = VecVT.getVectorElementType();
2761 unsigned NumElts = VecVT.getVectorNumElements();
2762 if (EltVT == MVT::f64) {
2763 assert(NumElts == 2 && "unexpected type for BUILD_VECTOR");
2764 return createDRegPairNode(VecVT, N->getOperand(0), N->getOperand(1));
2766 assert(EltVT == MVT::f32 && "unexpected type for BUILD_VECTOR");
2768 return createSRegPairNode(VecVT, N->getOperand(0), N->getOperand(1));
2769 assert(NumElts == 4 && "unexpected type for BUILD_VECTOR");
2770 return createQuadSRegsNode(VecVT, N->getOperand(0), N->getOperand(1),
2771 N->getOperand(2), N->getOperand(3));
2774 case ARMISD::VLD2DUP: {
2775 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8, ARM::VLD2DUPd16,
2777 return SelectVLDDup(N, false, 2, Opcodes);
2780 case ARMISD::VLD3DUP: {
2781 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo,
2782 ARM::VLD3DUPd16Pseudo,
2783 ARM::VLD3DUPd32Pseudo };
2784 return SelectVLDDup(N, false, 3, Opcodes);
2787 case ARMISD::VLD4DUP: {
2788 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo,
2789 ARM::VLD4DUPd16Pseudo,
2790 ARM::VLD4DUPd32Pseudo };
2791 return SelectVLDDup(N, false, 4, Opcodes);
2794 case ARMISD::VLD2DUP_UPD: {
2795 static const uint16_t Opcodes[] = { ARM::VLD2DUPd8wb_fixed,
2796 ARM::VLD2DUPd16wb_fixed,
2797 ARM::VLD2DUPd32wb_fixed };
2798 return SelectVLDDup(N, true, 2, Opcodes);
2801 case ARMISD::VLD3DUP_UPD: {
2802 static const uint16_t Opcodes[] = { ARM::VLD3DUPd8Pseudo_UPD,
2803 ARM::VLD3DUPd16Pseudo_UPD,
2804 ARM::VLD3DUPd32Pseudo_UPD };
2805 return SelectVLDDup(N, true, 3, Opcodes);
2808 case ARMISD::VLD4DUP_UPD: {
2809 static const uint16_t Opcodes[] = { ARM::VLD4DUPd8Pseudo_UPD,
2810 ARM::VLD4DUPd16Pseudo_UPD,
2811 ARM::VLD4DUPd32Pseudo_UPD };
2812 return SelectVLDDup(N, true, 4, Opcodes);
2815 case ARMISD::VLD1_UPD: {
2816 static const uint16_t DOpcodes[] = { ARM::VLD1d8wb_fixed,
2817 ARM::VLD1d16wb_fixed,
2818 ARM::VLD1d32wb_fixed,
2819 ARM::VLD1d64wb_fixed };
2820 static const uint16_t QOpcodes[] = { ARM::VLD1q8wb_fixed,
2821 ARM::VLD1q16wb_fixed,
2822 ARM::VLD1q32wb_fixed,
2823 ARM::VLD1q64wb_fixed };
2824 return SelectVLD(N, true, 1, DOpcodes, QOpcodes, nullptr);
2827 case ARMISD::VLD2_UPD: {
2828 static const uint16_t DOpcodes[] = { ARM::VLD2d8wb_fixed,
2829 ARM::VLD2d16wb_fixed,
2830 ARM::VLD2d32wb_fixed,
2831 ARM::VLD1q64wb_fixed};
2832 static const uint16_t QOpcodes[] = { ARM::VLD2q8PseudoWB_fixed,
2833 ARM::VLD2q16PseudoWB_fixed,
2834 ARM::VLD2q32PseudoWB_fixed };
2835 return SelectVLD(N, true, 2, DOpcodes, QOpcodes, nullptr);
2838 case ARMISD::VLD3_UPD: {
2839 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo_UPD,
2840 ARM::VLD3d16Pseudo_UPD,
2841 ARM::VLD3d32Pseudo_UPD,
2842 ARM::VLD1d64TPseudoWB_fixed};
2843 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
2844 ARM::VLD3q16Pseudo_UPD,
2845 ARM::VLD3q32Pseudo_UPD };
2846 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo_UPD,
2847 ARM::VLD3q16oddPseudo_UPD,
2848 ARM::VLD3q32oddPseudo_UPD };
2849 return SelectVLD(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2852 case ARMISD::VLD4_UPD: {
2853 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo_UPD,
2854 ARM::VLD4d16Pseudo_UPD,
2855 ARM::VLD4d32Pseudo_UPD,
2856 ARM::VLD1d64QPseudoWB_fixed};
2857 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
2858 ARM::VLD4q16Pseudo_UPD,
2859 ARM::VLD4q32Pseudo_UPD };
2860 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo_UPD,
2861 ARM::VLD4q16oddPseudo_UPD,
2862 ARM::VLD4q32oddPseudo_UPD };
2863 return SelectVLD(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2866 case ARMISD::VLD2LN_UPD: {
2867 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo_UPD,
2868 ARM::VLD2LNd16Pseudo_UPD,
2869 ARM::VLD2LNd32Pseudo_UPD };
2870 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo_UPD,
2871 ARM::VLD2LNq32Pseudo_UPD };
2872 return SelectVLDSTLane(N, true, true, 2, DOpcodes, QOpcodes);
2875 case ARMISD::VLD3LN_UPD: {
2876 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo_UPD,
2877 ARM::VLD3LNd16Pseudo_UPD,
2878 ARM::VLD3LNd32Pseudo_UPD };
2879 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo_UPD,
2880 ARM::VLD3LNq32Pseudo_UPD };
2881 return SelectVLDSTLane(N, true, true, 3, DOpcodes, QOpcodes);
2884 case ARMISD::VLD4LN_UPD: {
2885 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo_UPD,
2886 ARM::VLD4LNd16Pseudo_UPD,
2887 ARM::VLD4LNd32Pseudo_UPD };
2888 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo_UPD,
2889 ARM::VLD4LNq32Pseudo_UPD };
2890 return SelectVLDSTLane(N, true, true, 4, DOpcodes, QOpcodes);
2893 case ARMISD::VST1_UPD: {
2894 static const uint16_t DOpcodes[] = { ARM::VST1d8wb_fixed,
2895 ARM::VST1d16wb_fixed,
2896 ARM::VST1d32wb_fixed,
2897 ARM::VST1d64wb_fixed };
2898 static const uint16_t QOpcodes[] = { ARM::VST1q8wb_fixed,
2899 ARM::VST1q16wb_fixed,
2900 ARM::VST1q32wb_fixed,
2901 ARM::VST1q64wb_fixed };
2902 return SelectVST(N, true, 1, DOpcodes, QOpcodes, nullptr);
2905 case ARMISD::VST2_UPD: {
2906 static const uint16_t DOpcodes[] = { ARM::VST2d8wb_fixed,
2907 ARM::VST2d16wb_fixed,
2908 ARM::VST2d32wb_fixed,
2909 ARM::VST1q64wb_fixed};
2910 static const uint16_t QOpcodes[] = { ARM::VST2q8PseudoWB_fixed,
2911 ARM::VST2q16PseudoWB_fixed,
2912 ARM::VST2q32PseudoWB_fixed };
2913 return SelectVST(N, true, 2, DOpcodes, QOpcodes, nullptr);
2916 case ARMISD::VST3_UPD: {
2917 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo_UPD,
2918 ARM::VST3d16Pseudo_UPD,
2919 ARM::VST3d32Pseudo_UPD,
2920 ARM::VST1d64TPseudoWB_fixed};
2921 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
2922 ARM::VST3q16Pseudo_UPD,
2923 ARM::VST3q32Pseudo_UPD };
2924 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo_UPD,
2925 ARM::VST3q16oddPseudo_UPD,
2926 ARM::VST3q32oddPseudo_UPD };
2927 return SelectVST(N, true, 3, DOpcodes, QOpcodes0, QOpcodes1);
2930 case ARMISD::VST4_UPD: {
2931 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo_UPD,
2932 ARM::VST4d16Pseudo_UPD,
2933 ARM::VST4d32Pseudo_UPD,
2934 ARM::VST1d64QPseudoWB_fixed};
2935 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
2936 ARM::VST4q16Pseudo_UPD,
2937 ARM::VST4q32Pseudo_UPD };
2938 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo_UPD,
2939 ARM::VST4q16oddPseudo_UPD,
2940 ARM::VST4q32oddPseudo_UPD };
2941 return SelectVST(N, true, 4, DOpcodes, QOpcodes0, QOpcodes1);
2944 case ARMISD::VST2LN_UPD: {
2945 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo_UPD,
2946 ARM::VST2LNd16Pseudo_UPD,
2947 ARM::VST2LNd32Pseudo_UPD };
2948 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo_UPD,
2949 ARM::VST2LNq32Pseudo_UPD };
2950 return SelectVLDSTLane(N, false, true, 2, DOpcodes, QOpcodes);
2953 case ARMISD::VST3LN_UPD: {
2954 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo_UPD,
2955 ARM::VST3LNd16Pseudo_UPD,
2956 ARM::VST3LNd32Pseudo_UPD };
2957 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo_UPD,
2958 ARM::VST3LNq32Pseudo_UPD };
2959 return SelectVLDSTLane(N, false, true, 3, DOpcodes, QOpcodes);
2962 case ARMISD::VST4LN_UPD: {
2963 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo_UPD,
2964 ARM::VST4LNd16Pseudo_UPD,
2965 ARM::VST4LNd32Pseudo_UPD };
2966 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo_UPD,
2967 ARM::VST4LNq32Pseudo_UPD };
2968 return SelectVLDSTLane(N, false, true, 4, DOpcodes, QOpcodes);
2971 case ISD::INTRINSIC_VOID:
2972 case ISD::INTRINSIC_W_CHAIN: {
2973 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2978 case Intrinsic::arm_ldaexd:
2979 case Intrinsic::arm_ldrexd: {
2981 SDValue Chain = N->getOperand(0);
2982 SDValue MemAddr = N->getOperand(2);
2983 bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
2985 bool IsAcquire = IntNo == Intrinsic::arm_ldaexd;
2986 unsigned NewOpc = isThumb ? (IsAcquire ? ARM::t2LDAEXD : ARM::t2LDREXD)
2987 : (IsAcquire ? ARM::LDAEXD : ARM::LDREXD);
2989 // arm_ldrexd returns a i64 value in {i32, i32}
2990 std::vector<EVT> ResTys;
2992 ResTys.push_back(MVT::i32);
2993 ResTys.push_back(MVT::i32);
2995 ResTys.push_back(MVT::Untyped);
2996 ResTys.push_back(MVT::Other);
2998 // Place arguments in the right order.
2999 SmallVector<SDValue, 7> Ops;
3000 Ops.push_back(MemAddr);
3001 Ops.push_back(getAL(CurDAG, dl));
3002 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3003 Ops.push_back(Chain);
3004 SDNode *Ld = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
3005 // Transfer memoperands.
3006 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3007 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3008 cast<MachineSDNode>(Ld)->setMemRefs(MemOp, MemOp + 1);
3011 SDValue OutChain = isThumb ? SDValue(Ld, 2) : SDValue(Ld, 1);
3012 if (!SDValue(N, 0).use_empty()) {
3015 Result = SDValue(Ld, 0);
3018 CurDAG->getTargetConstant(ARM::gsub_0, dl, MVT::i32);
3019 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3020 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
3021 Result = SDValue(ResNode,0);
3023 ReplaceUses(SDValue(N, 0), Result);
3025 if (!SDValue(N, 1).use_empty()) {
3028 Result = SDValue(Ld, 1);
3031 CurDAG->getTargetConstant(ARM::gsub_1, dl, MVT::i32);
3032 SDNode *ResNode = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
3033 dl, MVT::i32, SDValue(Ld, 0), SubRegIdx);
3034 Result = SDValue(ResNode,0);
3036 ReplaceUses(SDValue(N, 1), Result);
3038 ReplaceUses(SDValue(N, 2), OutChain);
3041 case Intrinsic::arm_stlexd:
3042 case Intrinsic::arm_strexd: {
3044 SDValue Chain = N->getOperand(0);
3045 SDValue Val0 = N->getOperand(2);
3046 SDValue Val1 = N->getOperand(3);
3047 SDValue MemAddr = N->getOperand(4);
3049 // Store exclusive double return a i32 value which is the return status
3050 // of the issued store.
3051 const EVT ResTys[] = {MVT::i32, MVT::Other};
3053 bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
3054 // Place arguments in the right order.
3055 SmallVector<SDValue, 7> Ops;
3057 Ops.push_back(Val0);
3058 Ops.push_back(Val1);
3060 // arm_strexd uses GPRPair.
3061 Ops.push_back(SDValue(createGPRPairNode(MVT::Untyped, Val0, Val1), 0));
3062 Ops.push_back(MemAddr);
3063 Ops.push_back(getAL(CurDAG, dl));
3064 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3065 Ops.push_back(Chain);
3067 bool IsRelease = IntNo == Intrinsic::arm_stlexd;
3068 unsigned NewOpc = isThumb ? (IsRelease ? ARM::t2STLEXD : ARM::t2STREXD)
3069 : (IsRelease ? ARM::STLEXD : ARM::STREXD);
3071 SDNode *St = CurDAG->getMachineNode(NewOpc, dl, ResTys, Ops);
3072 // Transfer memoperands.
3073 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
3074 MemOp[0] = cast<MemIntrinsicSDNode>(N)->getMemOperand();
3075 cast<MachineSDNode>(St)->setMemRefs(MemOp, MemOp + 1);
3080 case Intrinsic::arm_neon_vld1: {
3081 static const uint16_t DOpcodes[] = { ARM::VLD1d8, ARM::VLD1d16,
3082 ARM::VLD1d32, ARM::VLD1d64 };
3083 static const uint16_t QOpcodes[] = { ARM::VLD1q8, ARM::VLD1q16,
3084 ARM::VLD1q32, ARM::VLD1q64};
3085 return SelectVLD(N, false, 1, DOpcodes, QOpcodes, nullptr);
3088 case Intrinsic::arm_neon_vld2: {
3089 static const uint16_t DOpcodes[] = { ARM::VLD2d8, ARM::VLD2d16,
3090 ARM::VLD2d32, ARM::VLD1q64 };
3091 static const uint16_t QOpcodes[] = { ARM::VLD2q8Pseudo, ARM::VLD2q16Pseudo,
3092 ARM::VLD2q32Pseudo };
3093 return SelectVLD(N, false, 2, DOpcodes, QOpcodes, nullptr);
3096 case Intrinsic::arm_neon_vld3: {
3097 static const uint16_t DOpcodes[] = { ARM::VLD3d8Pseudo,
3100 ARM::VLD1d64TPseudo };
3101 static const uint16_t QOpcodes0[] = { ARM::VLD3q8Pseudo_UPD,
3102 ARM::VLD3q16Pseudo_UPD,
3103 ARM::VLD3q32Pseudo_UPD };
3104 static const uint16_t QOpcodes1[] = { ARM::VLD3q8oddPseudo,
3105 ARM::VLD3q16oddPseudo,
3106 ARM::VLD3q32oddPseudo };
3107 return SelectVLD(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3110 case Intrinsic::arm_neon_vld4: {
3111 static const uint16_t DOpcodes[] = { ARM::VLD4d8Pseudo,
3114 ARM::VLD1d64QPseudo };
3115 static const uint16_t QOpcodes0[] = { ARM::VLD4q8Pseudo_UPD,
3116 ARM::VLD4q16Pseudo_UPD,
3117 ARM::VLD4q32Pseudo_UPD };
3118 static const uint16_t QOpcodes1[] = { ARM::VLD4q8oddPseudo,
3119 ARM::VLD4q16oddPseudo,
3120 ARM::VLD4q32oddPseudo };
3121 return SelectVLD(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3124 case Intrinsic::arm_neon_vld2lane: {
3125 static const uint16_t DOpcodes[] = { ARM::VLD2LNd8Pseudo,
3126 ARM::VLD2LNd16Pseudo,
3127 ARM::VLD2LNd32Pseudo };
3128 static const uint16_t QOpcodes[] = { ARM::VLD2LNq16Pseudo,
3129 ARM::VLD2LNq32Pseudo };
3130 return SelectVLDSTLane(N, true, false, 2, DOpcodes, QOpcodes);
3133 case Intrinsic::arm_neon_vld3lane: {
3134 static const uint16_t DOpcodes[] = { ARM::VLD3LNd8Pseudo,
3135 ARM::VLD3LNd16Pseudo,
3136 ARM::VLD3LNd32Pseudo };
3137 static const uint16_t QOpcodes[] = { ARM::VLD3LNq16Pseudo,
3138 ARM::VLD3LNq32Pseudo };
3139 return SelectVLDSTLane(N, true, false, 3, DOpcodes, QOpcodes);
3142 case Intrinsic::arm_neon_vld4lane: {
3143 static const uint16_t DOpcodes[] = { ARM::VLD4LNd8Pseudo,
3144 ARM::VLD4LNd16Pseudo,
3145 ARM::VLD4LNd32Pseudo };
3146 static const uint16_t QOpcodes[] = { ARM::VLD4LNq16Pseudo,
3147 ARM::VLD4LNq32Pseudo };
3148 return SelectVLDSTLane(N, true, false, 4, DOpcodes, QOpcodes);
3151 case Intrinsic::arm_neon_vst1: {
3152 static const uint16_t DOpcodes[] = { ARM::VST1d8, ARM::VST1d16,
3153 ARM::VST1d32, ARM::VST1d64 };
3154 static const uint16_t QOpcodes[] = { ARM::VST1q8, ARM::VST1q16,
3155 ARM::VST1q32, ARM::VST1q64 };
3156 return SelectVST(N, false, 1, DOpcodes, QOpcodes, nullptr);
3159 case Intrinsic::arm_neon_vst2: {
3160 static const uint16_t DOpcodes[] = { ARM::VST2d8, ARM::VST2d16,
3161 ARM::VST2d32, ARM::VST1q64 };
3162 static uint16_t QOpcodes[] = { ARM::VST2q8Pseudo, ARM::VST2q16Pseudo,
3163 ARM::VST2q32Pseudo };
3164 return SelectVST(N, false, 2, DOpcodes, QOpcodes, nullptr);
3167 case Intrinsic::arm_neon_vst3: {
3168 static const uint16_t DOpcodes[] = { ARM::VST3d8Pseudo,
3171 ARM::VST1d64TPseudo };
3172 static const uint16_t QOpcodes0[] = { ARM::VST3q8Pseudo_UPD,
3173 ARM::VST3q16Pseudo_UPD,
3174 ARM::VST3q32Pseudo_UPD };
3175 static const uint16_t QOpcodes1[] = { ARM::VST3q8oddPseudo,
3176 ARM::VST3q16oddPseudo,
3177 ARM::VST3q32oddPseudo };
3178 return SelectVST(N, false, 3, DOpcodes, QOpcodes0, QOpcodes1);
3181 case Intrinsic::arm_neon_vst4: {
3182 static const uint16_t DOpcodes[] = { ARM::VST4d8Pseudo,
3185 ARM::VST1d64QPseudo };
3186 static const uint16_t QOpcodes0[] = { ARM::VST4q8Pseudo_UPD,
3187 ARM::VST4q16Pseudo_UPD,
3188 ARM::VST4q32Pseudo_UPD };
3189 static const uint16_t QOpcodes1[] = { ARM::VST4q8oddPseudo,
3190 ARM::VST4q16oddPseudo,
3191 ARM::VST4q32oddPseudo };
3192 return SelectVST(N, false, 4, DOpcodes, QOpcodes0, QOpcodes1);
3195 case Intrinsic::arm_neon_vst2lane: {
3196 static const uint16_t DOpcodes[] = { ARM::VST2LNd8Pseudo,
3197 ARM::VST2LNd16Pseudo,
3198 ARM::VST2LNd32Pseudo };
3199 static const uint16_t QOpcodes[] = { ARM::VST2LNq16Pseudo,
3200 ARM::VST2LNq32Pseudo };
3201 return SelectVLDSTLane(N, false, false, 2, DOpcodes, QOpcodes);
3204 case Intrinsic::arm_neon_vst3lane: {
3205 static const uint16_t DOpcodes[] = { ARM::VST3LNd8Pseudo,
3206 ARM::VST3LNd16Pseudo,
3207 ARM::VST3LNd32Pseudo };
3208 static const uint16_t QOpcodes[] = { ARM::VST3LNq16Pseudo,
3209 ARM::VST3LNq32Pseudo };
3210 return SelectVLDSTLane(N, false, false, 3, DOpcodes, QOpcodes);
3213 case Intrinsic::arm_neon_vst4lane: {
3214 static const uint16_t DOpcodes[] = { ARM::VST4LNd8Pseudo,
3215 ARM::VST4LNd16Pseudo,
3216 ARM::VST4LNd32Pseudo };
3217 static const uint16_t QOpcodes[] = { ARM::VST4LNq16Pseudo,
3218 ARM::VST4LNq32Pseudo };
3219 return SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes);
3225 case ISD::INTRINSIC_WO_CHAIN: {
3226 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3231 case Intrinsic::arm_neon_vtbl2:
3232 return SelectVTBL(N, false, 2, ARM::VTBL2);
3233 case Intrinsic::arm_neon_vtbl3:
3234 return SelectVTBL(N, false, 3, ARM::VTBL3Pseudo);
3235 case Intrinsic::arm_neon_vtbl4:
3236 return SelectVTBL(N, false, 4, ARM::VTBL4Pseudo);
3238 case Intrinsic::arm_neon_vtbx2:
3239 return SelectVTBL(N, true, 2, ARM::VTBX2);
3240 case Intrinsic::arm_neon_vtbx3:
3241 return SelectVTBL(N, true, 3, ARM::VTBX3Pseudo);
3242 case Intrinsic::arm_neon_vtbx4:
3243 return SelectVTBL(N, true, 4, ARM::VTBX4Pseudo);
3248 case ARMISD::VTBL1: {
3250 EVT VT = N->getValueType(0);
3251 SmallVector<SDValue, 6> Ops;
3253 Ops.push_back(N->getOperand(0));
3254 Ops.push_back(N->getOperand(1));
3255 Ops.push_back(getAL(CurDAG, dl)); // Predicate
3256 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3257 return CurDAG->getMachineNode(ARM::VTBL1, dl, VT, Ops);
3259 case ARMISD::VTBL2: {
3261 EVT VT = N->getValueType(0);
3263 // Form a REG_SEQUENCE to force register allocation.
3264 SDValue V0 = N->getOperand(0);
3265 SDValue V1 = N->getOperand(1);
3266 SDValue RegSeq = SDValue(createDRegPairNode(MVT::v16i8, V0, V1), 0);
3268 SmallVector<SDValue, 6> Ops;
3269 Ops.push_back(RegSeq);
3270 Ops.push_back(N->getOperand(2));
3271 Ops.push_back(getAL(CurDAG, dl)); // Predicate
3272 Ops.push_back(CurDAG->getRegister(0, MVT::i32)); // Predicate Register
3273 return CurDAG->getMachineNode(ARM::VTBL2, dl, VT, Ops);
3276 case ISD::CONCAT_VECTORS:
3277 return SelectConcatVector(N);
3280 return SelectCode(N);
3283 // Inspect a register string of the form
3284 // cp<coprocessor>:<opc1>:c<CRn>:c<CRm>:<opc2> (32bit) or
3285 // cp<coprocessor>:<opc1>:c<CRm> (64bit) inspect the fields of the string
3286 // and obtain the integer operands from them, adding these operands to the
3288 static void getIntOperandsFromRegisterString(StringRef RegString,
3289 SelectionDAG *CurDAG, SDLoc DL,
3290 std::vector<SDValue>& Ops) {
3291 SmallVector<StringRef, 5> Fields;
3292 RegString.split(Fields, ":");
3294 if (Fields.size() > 1) {
3295 bool AllIntFields = true;
3297 for (StringRef Field : Fields) {
3298 // Need to trim out leading 'cp' characters and get the integer field.
3300 AllIntFields &= !Field.trim("CPcp").getAsInteger(10, IntField);
3301 Ops.push_back(CurDAG->getTargetConstant(IntField, DL, MVT::i32));
3304 assert(AllIntFields &&
3305 "Unexpected non-integer value in special register string.");
3309 // Maps a Banked Register string to its mask value. The mask value returned is
3310 // for use in the MRSbanked / MSRbanked instruction nodes as the Banked Register
3311 // mask operand, which expresses which register is to be used, e.g. r8, and in
3312 // which mode it is to be used, e.g. usr. Returns -1 to signify that the string
3314 static inline int getBankedRegisterMask(StringRef RegString) {
3315 return StringSwitch<int>(RegString.lower())
3316 .Case("r8_usr", 0x00)
3317 .Case("r9_usr", 0x01)
3318 .Case("r10_usr", 0x02)
3319 .Case("r11_usr", 0x03)
3320 .Case("r12_usr", 0x04)
3321 .Case("sp_usr", 0x05)
3322 .Case("lr_usr", 0x06)
3323 .Case("r8_fiq", 0x08)
3324 .Case("r9_fiq", 0x09)
3325 .Case("r10_fiq", 0x0a)
3326 .Case("r11_fiq", 0x0b)
3327 .Case("r12_fiq", 0x0c)
3328 .Case("sp_fiq", 0x0d)
3329 .Case("lr_fiq", 0x0e)
3330 .Case("lr_irq", 0x10)
3331 .Case("sp_irq", 0x11)
3332 .Case("lr_svc", 0x12)
3333 .Case("sp_svc", 0x13)
3334 .Case("lr_abt", 0x14)
3335 .Case("sp_abt", 0x15)
3336 .Case("lr_und", 0x16)
3337 .Case("sp_und", 0x17)
3338 .Case("lr_mon", 0x1c)
3339 .Case("sp_mon", 0x1d)
3340 .Case("elr_hyp", 0x1e)
3341 .Case("sp_hyp", 0x1f)
3342 .Case("spsr_fiq", 0x2e)
3343 .Case("spsr_irq", 0x30)
3344 .Case("spsr_svc", 0x32)
3345 .Case("spsr_abt", 0x34)
3346 .Case("spsr_und", 0x36)
3347 .Case("spsr_mon", 0x3c)
3348 .Case("spsr_hyp", 0x3e)
3352 // Maps a MClass special register string to its value for use in the
3353 // t2MRS_M / t2MSR_M instruction nodes as the SYSm value operand.
3354 // Returns -1 to signify that the string was invalid.
3355 static inline int getMClassRegisterSYSmValueMask(StringRef RegString) {
3356 return StringSwitch<int>(RegString.lower())
3366 .Case("primask", 0x10)
3367 .Case("basepri", 0x11)
3368 .Case("basepri_max", 0x12)
3369 .Case("faultmask", 0x13)
3370 .Case("control", 0x14)
3374 // The flags here are common to those allowed for apsr in the A class cores and
3375 // those allowed for the special registers in the M class cores. Returns a
3376 // value representing which flags were present, -1 if invalid.
3377 static inline int getMClassFlagsMask(StringRef Flags, bool hasThumb2DSP) {
3379 return 0x2 | (int)hasThumb2DSP;
3381 return StringSwitch<int>(Flags)
3384 .Case("nzcvqg", 0x3)
3388 static int getMClassRegisterMask(StringRef Reg, StringRef Flags, bool IsRead,
3389 const ARMSubtarget *Subtarget) {
3390 // Ensure that the register (without flags) was a valid M Class special
3392 int SYSmvalue = getMClassRegisterSYSmValueMask(Reg);
3393 if (SYSmvalue == -1)
3396 // basepri, basepri_max and faultmask are only valid for V7m.
3397 if (!Subtarget->hasV7Ops() && SYSmvalue >= 0x11 && SYSmvalue <= 0x13)
3400 // If it was a read then we won't be expecting flags and so at this point
3401 // we can return the mask.
3403 assert (Flags.empty() && "Unexpected flags for reading M class register.");
3407 // We know we are now handling a write so need to get the mask for the flags.
3408 int Mask = getMClassFlagsMask(Flags, Subtarget->hasThumb2DSP());
3410 // Only apsr, iapsr, eapsr, xpsr can have flags. The other register values
3411 // shouldn't have flags present.
3412 if ((SYSmvalue < 0x4 && Mask == -1) || (SYSmvalue > 0x4 && !Flags.empty()))
3415 // The _g and _nzcvqg versions are only valid if the DSP extension is
3417 if (!Subtarget->hasThumb2DSP() && (Mask & 0x1))
3420 // The register was valid so need to put the mask in the correct place
3421 // (the flags need to be in bits 11-10) and combine with the SYSmvalue to
3422 // construct the operand for the instruction node.
3423 if (SYSmvalue < 0x4)
3424 return SYSmvalue | Mask << 10;
3429 static int getARClassRegisterMask(StringRef Reg, StringRef Flags) {
3430 // The mask operand contains the special register (R Bit) in bit 4, whether
3431 // the register is spsr (R bit is 1) or one of cpsr/apsr (R bit is 0), and
3432 // bits 3-0 contains the fields to be accessed in the special register, set by
3433 // the flags provided with the register.
3435 if (Reg == "apsr") {
3436 // The flags permitted for apsr are the same flags that are allowed in
3437 // M class registers. We get the flag value and then shift the flags into
3438 // the correct place to combine with the mask.
3439 Mask = getMClassFlagsMask(Flags, true);
3445 if (Reg != "cpsr" && Reg != "spsr") {
3449 // This is the same as if the flags were "fc"
3450 if (Flags.empty() || Flags == "all")
3453 // Inspect the supplied flags string and set the bits in the mask for
3454 // the relevant and valid flags allowed for cpsr and spsr.
3455 for (char Flag : Flags) {
3474 // This avoids allowing strings where the same flag bit appears twice.
3475 if (!FlagVal || (Mask & FlagVal))
3480 // If the register is spsr then we need to set the R bit.
3487 // Lower the read_register intrinsic to ARM specific DAG nodes
3488 // using the supplied metadata string to select the instruction node to use
3489 // and the registers/masks to construct as operands for the node.
3490 SDNode *ARMDAGToDAGISel::SelectReadRegister(SDNode *N){
3491 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
3492 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
3493 bool IsThumb2 = Subtarget->isThumb2();
3496 std::vector<SDValue> Ops;
3497 getIntOperandsFromRegisterString(RegString->getString(), CurDAG, DL, Ops);
3500 // If the special register string was constructed of fields (as defined
3501 // in the ACLE) then need to lower to MRC node (32 bit) or
3502 // MRRC node(64 bit), we can make the distinction based on the number of
3503 // operands we have.
3505 SmallVector<EVT, 3> ResTypes;
3506 if (Ops.size() == 5){
3507 Opcode = IsThumb2 ? ARM::t2MRC : ARM::MRC;
3508 ResTypes.append({ MVT::i32, MVT::Other });
3510 assert(Ops.size() == 3 &&
3511 "Invalid number of fields in special register string.");
3512 Opcode = IsThumb2 ? ARM::t2MRRC : ARM::MRRC;
3513 ResTypes.append({ MVT::i32, MVT::i32, MVT::Other });
3516 Ops.push_back(getAL(CurDAG, DL));
3517 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3518 Ops.push_back(N->getOperand(0));
3519 return CurDAG->getMachineNode(Opcode, DL, ResTypes, Ops);
3522 std::string SpecialReg = RegString->getString().lower();
3524 int BankedReg = getBankedRegisterMask(SpecialReg);
3525 if (BankedReg != -1) {
3526 Ops = { CurDAG->getTargetConstant(BankedReg, DL, MVT::i32),
3527 getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
3529 return CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRSbanked : ARM::MRSbanked,
3530 DL, MVT::i32, MVT::Other, Ops);
3533 // The VFP registers are read by creating SelectionDAG nodes with opcodes
3534 // corresponding to the register that is being read from. So we switch on the
3535 // string to find which opcode we need to use.
3536 unsigned Opcode = StringSwitch<unsigned>(SpecialReg)
3537 .Case("fpscr", ARM::VMRS)
3538 .Case("fpexc", ARM::VMRS_FPEXC)
3539 .Case("fpsid", ARM::VMRS_FPSID)
3540 .Case("mvfr0", ARM::VMRS_MVFR0)
3541 .Case("mvfr1", ARM::VMRS_MVFR1)
3542 .Case("mvfr2", ARM::VMRS_MVFR2)
3543 .Case("fpinst", ARM::VMRS_FPINST)
3544 .Case("fpinst2", ARM::VMRS_FPINST2)
3547 // If an opcode was found then we can lower the read to a VFP instruction.
3549 if (!Subtarget->hasVFP2())
3551 if (Opcode == ARM::VMRS_MVFR2 && !Subtarget->hasFPARMv8())
3554 Ops = { getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
3556 return CurDAG->getMachineNode(Opcode, DL, MVT::i32, MVT::Other, Ops);
3559 // If the target is M Class then need to validate that the register string
3560 // is an acceptable value, so check that a mask can be constructed from the
3562 if (Subtarget->isMClass()) {
3563 int SYSmValue = getMClassRegisterMask(SpecialReg, "", true, Subtarget);
3564 if (SYSmValue == -1)
3567 SDValue Ops[] = { CurDAG->getTargetConstant(SYSmValue, DL, MVT::i32),
3568 getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
3570 return CurDAG->getMachineNode(ARM::t2MRS_M, DL, MVT::i32, MVT::Other, Ops);
3573 // Here we know the target is not M Class so we need to check if it is one
3574 // of the remaining possible values which are apsr, cpsr or spsr.
3575 if (SpecialReg == "apsr" || SpecialReg == "cpsr") {
3576 Ops = { getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
3578 return CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRS_AR : ARM::MRS, DL,
3579 MVT::i32, MVT::Other, Ops);
3582 if (SpecialReg == "spsr") {
3583 Ops = { getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
3585 return CurDAG->getMachineNode(IsThumb2 ? ARM::t2MRSsys_AR : ARM::MRSsys,
3586 DL, MVT::i32, MVT::Other, Ops);
3592 // Lower the write_register intrinsic to ARM specific DAG nodes
3593 // using the supplied metadata string to select the instruction node to use
3594 // and the registers/masks to use in the nodes
3595 SDNode *ARMDAGToDAGISel::SelectWriteRegister(SDNode *N){
3596 const MDNodeSDNode *MD = dyn_cast<MDNodeSDNode>(N->getOperand(1));
3597 const MDString *RegString = dyn_cast<MDString>(MD->getMD()->getOperand(0));
3598 bool IsThumb2 = Subtarget->isThumb2();
3601 std::vector<SDValue> Ops;
3602 getIntOperandsFromRegisterString(RegString->getString(), CurDAG, DL, Ops);
3605 // If the special register string was constructed of fields (as defined
3606 // in the ACLE) then need to lower to MCR node (32 bit) or
3607 // MCRR node(64 bit), we can make the distinction based on the number of
3608 // operands we have.
3610 if (Ops.size() == 5) {
3611 Opcode = IsThumb2 ? ARM::t2MCR : ARM::MCR;
3612 Ops.insert(Ops.begin()+2, N->getOperand(2));
3614 assert(Ops.size() == 3 &&
3615 "Invalid number of fields in special register string.");
3616 Opcode = IsThumb2 ? ARM::t2MCRR : ARM::MCRR;
3617 SDValue WriteValue[] = { N->getOperand(2), N->getOperand(3) };
3618 Ops.insert(Ops.begin()+2, WriteValue, WriteValue+2);
3621 Ops.push_back(getAL(CurDAG, DL));
3622 Ops.push_back(CurDAG->getRegister(0, MVT::i32));
3623 Ops.push_back(N->getOperand(0));
3625 return CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops);
3628 std::string SpecialReg = RegString->getString().lower();
3629 int BankedReg = getBankedRegisterMask(SpecialReg);
3630 if (BankedReg != -1) {
3631 Ops = { CurDAG->getTargetConstant(BankedReg, DL, MVT::i32), N->getOperand(2),
3632 getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
3634 return CurDAG->getMachineNode(IsThumb2 ? ARM::t2MSRbanked : ARM::MSRbanked,
3635 DL, MVT::Other, Ops);
3638 // The VFP registers are written to by creating SelectionDAG nodes with
3639 // opcodes corresponding to the register that is being written. So we switch
3640 // on the string to find which opcode we need to use.
3641 unsigned Opcode = StringSwitch<unsigned>(SpecialReg)
3642 .Case("fpscr", ARM::VMSR)
3643 .Case("fpexc", ARM::VMSR_FPEXC)
3644 .Case("fpsid", ARM::VMSR_FPSID)
3645 .Case("fpinst", ARM::VMSR_FPINST)
3646 .Case("fpinst2", ARM::VMSR_FPINST2)
3650 if (!Subtarget->hasVFP2())
3652 Ops = { N->getOperand(2), getAL(CurDAG, DL),
3653 CurDAG->getRegister(0, MVT::i32), N->getOperand(0) };
3654 return CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops);
3657 SmallVector<StringRef, 5> Fields;
3658 StringRef(SpecialReg).split(Fields, "_", 1, false);
3659 std::string Reg = Fields[0].str();
3660 StringRef Flags = Fields.size() == 2 ? Fields[1] : "";
3662 // If the target was M Class then need to validate the special register value
3663 // and retrieve the mask for use in the instruction node.
3664 if (Subtarget->isMClass()) {
3665 // basepri_max gets split so need to correct Reg and Flags.
3666 if (SpecialReg == "basepri_max") {
3670 int SYSmValue = getMClassRegisterMask(Reg, Flags, false, Subtarget);
3671 if (SYSmValue == -1)
3674 SDValue Ops[] = { CurDAG->getTargetConstant(SYSmValue, DL, MVT::i32),
3675 N->getOperand(2), getAL(CurDAG, DL),
3676 CurDAG->getRegister(0, MVT::i32), N->getOperand(0) };
3677 return CurDAG->getMachineNode(ARM::t2MSR_M, DL, MVT::Other, Ops);
3680 // We then check to see if a valid mask can be constructed for one of the
3681 // register string values permitted for the A and R class cores. These values
3682 // are apsr, spsr and cpsr; these are also valid on older cores.
3683 int Mask = getARClassRegisterMask(Reg, Flags);
3685 Ops = { CurDAG->getTargetConstant(Mask, DL, MVT::i32), N->getOperand(2),
3686 getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32),
3688 return CurDAG->getMachineNode(IsThumb2 ? ARM::t2MSR_AR : ARM::MSR,
3689 DL, MVT::Other, Ops);
3695 SDNode *ARMDAGToDAGISel::SelectInlineAsm(SDNode *N){
3696 std::vector<SDValue> AsmNodeOperands;
3697 unsigned Flag, Kind;
3698 bool Changed = false;
3699 unsigned NumOps = N->getNumOperands();
3701 // Normally, i64 data is bounded to two arbitrary GRPs for "%r" constraint.
3702 // However, some instrstions (e.g. ldrexd/strexd in ARM mode) require
3703 // (even/even+1) GPRs and use %n and %Hn to refer to the individual regs
3704 // respectively. Since there is no constraint to explicitly specify a
3705 // reg pair, we use GPRPair reg class for "%r" for 64-bit data. For Thumb,
3706 // the 64-bit data may be referred by H, Q, R modifiers, so we still pack
3707 // them into a GPRPair.
3710 SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1)
3711 : SDValue(nullptr,0);
3713 SmallVector<bool, 8> OpChanged;
3714 // Glue node will be appended late.
3715 for(unsigned i = 0, e = N->getGluedNode() ? NumOps - 1 : NumOps; i < e; ++i) {
3716 SDValue op = N->getOperand(i);
3717 AsmNodeOperands.push_back(op);
3719 if (i < InlineAsm::Op_FirstOperand)
3722 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(i))) {
3723 Flag = C->getZExtValue();
3724 Kind = InlineAsm::getKind(Flag);
3729 // Immediate operands to inline asm in the SelectionDAG are modeled with
3730 // two operands. The first is a constant of value InlineAsm::Kind_Imm, and
3731 // the second is a constant with the value of the immediate. If we get here
3732 // and we have a Kind_Imm, skip the next operand, and continue.
3733 if (Kind == InlineAsm::Kind_Imm) {
3734 SDValue op = N->getOperand(++i);
3735 AsmNodeOperands.push_back(op);
3739 unsigned NumRegs = InlineAsm::getNumOperandRegisters(Flag);
3741 OpChanged.push_back(false);
3743 unsigned DefIdx = 0;
3744 bool IsTiedToChangedOp = false;
3745 // If it's a use that is tied with a previous def, it has no
3746 // reg class constraint.
3747 if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx))
3748 IsTiedToChangedOp = OpChanged[DefIdx];
3750 if (Kind != InlineAsm::Kind_RegUse && Kind != InlineAsm::Kind_RegDef
3751 && Kind != InlineAsm::Kind_RegDefEarlyClobber)
3755 bool HasRC = InlineAsm::hasRegClassConstraint(Flag, RC);
3756 if ((!IsTiedToChangedOp && (!HasRC || RC != ARM::GPRRegClassID))
3760 assert((i+2 < NumOps) && "Invalid number of operands in inline asm");
3761 SDValue V0 = N->getOperand(i+1);
3762 SDValue V1 = N->getOperand(i+2);
3763 unsigned Reg0 = cast<RegisterSDNode>(V0)->getReg();
3764 unsigned Reg1 = cast<RegisterSDNode>(V1)->getReg();
3766 MachineRegisterInfo &MRI = MF->getRegInfo();
3768 if (Kind == InlineAsm::Kind_RegDef ||
3769 Kind == InlineAsm::Kind_RegDefEarlyClobber) {
3770 // Replace the two GPRs with 1 GPRPair and copy values from GPRPair to
3771 // the original GPRs.
3773 unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
3774 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
3775 SDValue Chain = SDValue(N,0);
3777 SDNode *GU = N->getGluedUser();
3778 SDValue RegCopy = CurDAG->getCopyFromReg(Chain, dl, GPVR, MVT::Untyped,
3781 // Extract values from a GPRPair reg and copy to the original GPR reg.
3782 SDValue Sub0 = CurDAG->getTargetExtractSubreg(ARM::gsub_0, dl, MVT::i32,
3784 SDValue Sub1 = CurDAG->getTargetExtractSubreg(ARM::gsub_1, dl, MVT::i32,
3786 SDValue T0 = CurDAG->getCopyToReg(Sub0, dl, Reg0, Sub0,
3787 RegCopy.getValue(1));
3788 SDValue T1 = CurDAG->getCopyToReg(Sub1, dl, Reg1, Sub1, T0.getValue(1));
3790 // Update the original glue user.
3791 std::vector<SDValue> Ops(GU->op_begin(), GU->op_end()-1);
3792 Ops.push_back(T1.getValue(1));
3793 CurDAG->UpdateNodeOperands(GU, Ops);
3796 // For Kind == InlineAsm::Kind_RegUse, we first copy two GPRs into a
3797 // GPRPair and then pass the GPRPair to the inline asm.
3798 SDValue Chain = AsmNodeOperands[InlineAsm::Op_InputChain];
3800 // As REG_SEQ doesn't take RegisterSDNode, we copy them first.
3801 SDValue T0 = CurDAG->getCopyFromReg(Chain, dl, Reg0, MVT::i32,
3803 SDValue T1 = CurDAG->getCopyFromReg(Chain, dl, Reg1, MVT::i32,
3805 SDValue Pair = SDValue(createGPRPairNode(MVT::Untyped, T0, T1), 0);
3807 // Copy REG_SEQ into a GPRPair-typed VR and replace the original two
3808 // i32 VRs of inline asm with it.
3809 unsigned GPVR = MRI.createVirtualRegister(&ARM::GPRPairRegClass);
3810 PairedReg = CurDAG->getRegister(GPVR, MVT::Untyped);
3811 Chain = CurDAG->getCopyToReg(T1, dl, GPVR, Pair, T1.getValue(1));
3813 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
3814 Glue = Chain.getValue(1);
3819 if(PairedReg.getNode()) {
3820 OpChanged[OpChanged.size() -1 ] = true;
3821 Flag = InlineAsm::getFlagWord(Kind, 1 /* RegNum*/);
3822 if (IsTiedToChangedOp)
3823 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, DefIdx);
3825 Flag = InlineAsm::getFlagWordForRegClass(Flag, ARM::GPRPairRegClassID);
3826 // Replace the current flag.
3827 AsmNodeOperands[AsmNodeOperands.size() -1] = CurDAG->getTargetConstant(
3828 Flag, dl, MVT::i32);
3829 // Add the new register node and skip the original two GPRs.
3830 AsmNodeOperands.push_back(PairedReg);
3831 // Skip the next two GPRs.
3837 AsmNodeOperands.push_back(Glue);
3841 SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N),
3842 CurDAG->getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
3844 return New.getNode();
3848 bool ARMDAGToDAGISel::
3849 SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
3850 std::vector<SDValue> &OutOps) {
3851 switch(ConstraintID) {
3853 llvm_unreachable("Unexpected asm memory constraint");
3854 case InlineAsm::Constraint_i:
3855 // FIXME: It seems strange that 'i' is needed here since it's supposed to
3856 // be an immediate and not a memory constraint.
3858 case InlineAsm::Constraint_m:
3859 case InlineAsm::Constraint_Q:
3860 case InlineAsm::Constraint_Um:
3861 case InlineAsm::Constraint_Un:
3862 case InlineAsm::Constraint_Uq:
3863 case InlineAsm::Constraint_Us:
3864 case InlineAsm::Constraint_Ut:
3865 case InlineAsm::Constraint_Uv:
3866 case InlineAsm::Constraint_Uy:
3867 // Require the address to be in a register. That is safe for all ARM
3868 // variants and it is hard to do anything much smarter without knowing
3869 // how the operand is used.
3870 OutOps.push_back(Op);
3876 /// createARMISelDag - This pass converts a legalized DAG into a
3877 /// ARM-specific DAG, ready for instruction scheduling.
3879 FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM,
3880 CodeGenOpt::Level OptLevel) {
3881 return new ARMDAGToDAGISel(TM, OptLevel);