1 //===-- ARMISelDAGToDAG.cpp - A dag to dag inst selector for ARM ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the ARM target.
12 //===----------------------------------------------------------------------===//
15 #include "ARMAddressingModes.h"
16 #include "ARMConstantPoolValue.h"
17 #include "ARMISelLowering.h"
18 #include "ARMTargetMachine.h"
19 #include "llvm/CallingConv.h"
20 #include "llvm/Constants.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/SelectionDAGISel.h"
29 #include "llvm/Target/TargetLowering.h"
30 #include "llvm/Target/TargetOptions.h"
31 #include "llvm/Support/Compiler.h"
32 #include "llvm/Support/Debug.h"
35 static const unsigned arm_dsubreg_0 = 5;
36 static const unsigned arm_dsubreg_1 = 6;
38 //===--------------------------------------------------------------------===//
39 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
40 /// instructions for SelectionDAG operations.
43 class ARMDAGToDAGISel : public SelectionDAGISel {
44 ARMBaseTargetMachine &TM;
46 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
47 /// make the right decision when generating code for different targets.
48 const ARMSubtarget *Subtarget;
51 explicit ARMDAGToDAGISel(ARMBaseTargetMachine &tm)
52 : SelectionDAGISel(tm), TM(tm),
53 Subtarget(&TM.getSubtarget<ARMSubtarget>()) {
56 virtual const char *getPassName() const {
57 return "ARM Instruction Selection";
60 /// getI32Imm - Return a target constant with the specified value, of type i32.
61 inline SDValue getI32Imm(unsigned Imm) {
62 return CurDAG->getTargetConstant(Imm, MVT::i32);
65 SDNode *Select(SDValue Op);
66 virtual void InstructionSelect();
67 bool SelectShifterOperandReg(SDValue Op, SDValue N, SDValue &A,
68 SDValue &B, SDValue &C);
69 bool SelectAddrMode2(SDValue Op, SDValue N, SDValue &Base,
70 SDValue &Offset, SDValue &Opc);
71 bool SelectAddrMode2Offset(SDValue Op, SDValue N,
72 SDValue &Offset, SDValue &Opc);
73 bool SelectAddrMode3(SDValue Op, SDValue N, SDValue &Base,
74 SDValue &Offset, SDValue &Opc);
75 bool SelectAddrMode3Offset(SDValue Op, SDValue N,
76 SDValue &Offset, SDValue &Opc);
77 bool SelectAddrMode5(SDValue Op, SDValue N, SDValue &Base,
79 bool SelectAddrMode6(SDValue Op, SDValue N, SDValue &Addr, SDValue &Update,
82 bool SelectAddrModePC(SDValue Op, SDValue N, SDValue &Offset,
85 bool SelectThumbAddrModeRR(SDValue Op, SDValue N, SDValue &Base,
87 bool SelectThumbAddrModeRI5(SDValue Op, SDValue N, unsigned Scale,
88 SDValue &Base, SDValue &OffImm,
90 bool SelectThumbAddrModeS1(SDValue Op, SDValue N, SDValue &Base,
91 SDValue &OffImm, SDValue &Offset);
92 bool SelectThumbAddrModeS2(SDValue Op, SDValue N, SDValue &Base,
93 SDValue &OffImm, SDValue &Offset);
94 bool SelectThumbAddrModeS4(SDValue Op, SDValue N, SDValue &Base,
95 SDValue &OffImm, SDValue &Offset);
96 bool SelectThumbAddrModeSP(SDValue Op, SDValue N, SDValue &Base,
99 bool SelectT2ShifterOperandReg(SDValue Op, SDValue N,
100 SDValue &BaseReg, SDValue &Opc);
101 bool SelectT2AddrModeImm12(SDValue Op, SDValue N, SDValue &Base,
103 bool SelectT2AddrModeImm8(SDValue Op, SDValue N, SDValue &Base,
105 bool SelectT2AddrModeImm8Offset(SDValue Op, SDValue N,
107 bool SelectT2AddrModeImm8s4(SDValue Op, SDValue N, SDValue &Base,
109 bool SelectT2AddrModeSoReg(SDValue Op, SDValue N, SDValue &Base,
110 SDValue &OffReg, SDValue &ShImm);
112 // Include the pieces autogenerated from the target description.
113 #include "ARMGenDAGISel.inc"
116 /// SelectARMIndexedLoad - Indexed (pre/post inc/dec) load matching code for
118 SDNode *SelectARMIndexedLoad(SDValue Op);
119 SDNode *SelectT2IndexedLoad(SDValue Op);
122 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
123 /// inline asm expressions.
124 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
126 std::vector<SDValue> &OutOps);
130 void ARMDAGToDAGISel::InstructionSelect() {
134 CurDAG->RemoveDeadNodes();
137 bool ARMDAGToDAGISel::SelectShifterOperandReg(SDValue Op,
142 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
144 // Don't match base register only case. That is matched to a separate
145 // lower complexity pattern with explicit register operand.
146 if (ShOpcVal == ARM_AM::no_shift) return false;
148 BaseReg = N.getOperand(0);
149 unsigned ShImmVal = 0;
150 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
151 ShReg = CurDAG->getRegister(0, MVT::i32);
152 ShImmVal = RHS->getZExtValue() & 31;
154 ShReg = N.getOperand(1);
156 Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
161 bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N,
162 SDValue &Base, SDValue &Offset,
164 if (N.getOpcode() == ISD::MUL) {
165 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
166 // X * [3,5,9] -> X + X * [2,4,8] etc.
167 int RHSC = (int)RHS->getZExtValue();
170 ARM_AM::AddrOpc AddSub = ARM_AM::add;
172 AddSub = ARM_AM::sub;
175 if (isPowerOf2_32(RHSC)) {
176 unsigned ShAmt = Log2_32(RHSC);
177 Base = Offset = N.getOperand(0);
178 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
187 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) {
189 if (N.getOpcode() == ISD::FrameIndex) {
190 int FI = cast<FrameIndexSDNode>(N)->getIndex();
191 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
192 } else if (N.getOpcode() == ARMISD::Wrapper) {
193 Base = N.getOperand(0);
195 Offset = CurDAG->getRegister(0, MVT::i32);
196 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
202 // Match simple R +/- imm12 operands.
203 if (N.getOpcode() == ISD::ADD)
204 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
205 int RHSC = (int)RHS->getZExtValue();
206 if ((RHSC >= 0 && RHSC < 0x1000) ||
207 (RHSC < 0 && RHSC > -0x1000)) { // 12 bits.
208 Base = N.getOperand(0);
209 if (Base.getOpcode() == ISD::FrameIndex) {
210 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
211 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
213 Offset = CurDAG->getRegister(0, MVT::i32);
215 ARM_AM::AddrOpc AddSub = ARM_AM::add;
217 AddSub = ARM_AM::sub;
220 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
227 // Otherwise this is R +/- [possibly shifted] R
228 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::ADD ? ARM_AM::add:ARM_AM::sub;
229 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1));
232 Base = N.getOperand(0);
233 Offset = N.getOperand(1);
235 if (ShOpcVal != ARM_AM::no_shift) {
236 // Check to see if the RHS of the shift is a constant, if not, we can't fold
238 if (ConstantSDNode *Sh =
239 dyn_cast<ConstantSDNode>(N.getOperand(1).getOperand(1))) {
240 ShAmt = Sh->getZExtValue();
241 Offset = N.getOperand(1).getOperand(0);
243 ShOpcVal = ARM_AM::no_shift;
247 // Try matching (R shl C) + (R).
248 if (N.getOpcode() == ISD::ADD && ShOpcVal == ARM_AM::no_shift) {
249 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0));
250 if (ShOpcVal != ARM_AM::no_shift) {
251 // Check to see if the RHS of the shift is a constant, if not, we can't
253 if (ConstantSDNode *Sh =
254 dyn_cast<ConstantSDNode>(N.getOperand(0).getOperand(1))) {
255 ShAmt = Sh->getZExtValue();
256 Offset = N.getOperand(0).getOperand(0);
257 Base = N.getOperand(1);
259 ShOpcVal = ARM_AM::no_shift;
264 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
269 bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDValue Op, SDValue N,
270 SDValue &Offset, SDValue &Opc) {
271 unsigned Opcode = Op.getOpcode();
272 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
273 ? cast<LoadSDNode>(Op)->getAddressingMode()
274 : cast<StoreSDNode>(Op)->getAddressingMode();
275 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
276 ? ARM_AM::add : ARM_AM::sub;
277 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
278 int Val = (int)C->getZExtValue();
279 if (Val >= 0 && Val < 0x1000) { // 12 bits.
280 Offset = CurDAG->getRegister(0, MVT::i32);
281 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
289 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
291 if (ShOpcVal != ARM_AM::no_shift) {
292 // Check to see if the RHS of the shift is a constant, if not, we can't fold
294 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
295 ShAmt = Sh->getZExtValue();
296 Offset = N.getOperand(0);
298 ShOpcVal = ARM_AM::no_shift;
302 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
308 bool ARMDAGToDAGISel::SelectAddrMode3(SDValue Op, SDValue N,
309 SDValue &Base, SDValue &Offset,
311 if (N.getOpcode() == ISD::SUB) {
312 // X - C is canonicalize to X + -C, no need to handle it here.
313 Base = N.getOperand(0);
314 Offset = N.getOperand(1);
315 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
319 if (N.getOpcode() != ISD::ADD) {
321 if (N.getOpcode() == ISD::FrameIndex) {
322 int FI = cast<FrameIndexSDNode>(N)->getIndex();
323 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
325 Offset = CurDAG->getRegister(0, MVT::i32);
326 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
330 // If the RHS is +/- imm8, fold into addr mode.
331 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
332 int RHSC = (int)RHS->getZExtValue();
333 if ((RHSC >= 0 && RHSC < 256) ||
334 (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed.
335 Base = N.getOperand(0);
336 if (Base.getOpcode() == ISD::FrameIndex) {
337 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
338 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
340 Offset = CurDAG->getRegister(0, MVT::i32);
342 ARM_AM::AddrOpc AddSub = ARM_AM::add;
344 AddSub = ARM_AM::sub;
347 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
352 Base = N.getOperand(0);
353 Offset = N.getOperand(1);
354 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
358 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDValue Op, SDValue N,
359 SDValue &Offset, SDValue &Opc) {
360 unsigned Opcode = Op.getOpcode();
361 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
362 ? cast<LoadSDNode>(Op)->getAddressingMode()
363 : cast<StoreSDNode>(Op)->getAddressingMode();
364 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
365 ? ARM_AM::add : ARM_AM::sub;
366 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
367 int Val = (int)C->getZExtValue();
368 if (Val >= 0 && Val < 256) {
369 Offset = CurDAG->getRegister(0, MVT::i32);
370 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
376 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
381 bool ARMDAGToDAGISel::SelectAddrMode5(SDValue Op, SDValue N,
382 SDValue &Base, SDValue &Offset) {
383 if (N.getOpcode() != ISD::ADD) {
385 if (N.getOpcode() == ISD::FrameIndex) {
386 int FI = cast<FrameIndexSDNode>(N)->getIndex();
387 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
388 } else if (N.getOpcode() == ARMISD::Wrapper) {
389 Base = N.getOperand(0);
391 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
396 // If the RHS is +/- imm8, fold into addr mode.
397 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
398 int RHSC = (int)RHS->getZExtValue();
399 if ((RHSC & 3) == 0) { // The constant is implicitly multiplied by 4.
401 if ((RHSC >= 0 && RHSC < 256) ||
402 (RHSC < 0 && RHSC > -256)) { // note -256 itself isn't allowed.
403 Base = N.getOperand(0);
404 if (Base.getOpcode() == ISD::FrameIndex) {
405 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
406 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
409 ARM_AM::AddrOpc AddSub = ARM_AM::add;
411 AddSub = ARM_AM::sub;
414 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
422 Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
427 bool ARMDAGToDAGISel::SelectAddrMode6(SDValue Op, SDValue N,
428 SDValue &Addr, SDValue &Update,
431 // The optional writeback is handled in ARMLoadStoreOpt.
432 Update = CurDAG->getRegister(0, MVT::i32);
433 Opc = CurDAG->getTargetConstant(ARM_AM::getAM6Opc(false), MVT::i32);
437 bool ARMDAGToDAGISel::SelectAddrModePC(SDValue Op, SDValue N,
438 SDValue &Offset, SDValue &Label) {
439 if (N.getOpcode() == ARMISD::PIC_ADD && N.hasOneUse()) {
440 Offset = N.getOperand(0);
441 SDValue N1 = N.getOperand(1);
442 Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
449 bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue Op, SDValue N,
450 SDValue &Base, SDValue &Offset){
451 // FIXME dl should come from the parent load or store, not the address
452 DebugLoc dl = Op.getDebugLoc();
453 if (N.getOpcode() != ISD::ADD) {
455 // We must materialize a zero in a reg! Returning a constant here
456 // wouldn't work without additional code to position the node within
457 // ISel's topological ordering in a place where ISel will process it
458 // normally. Instead, just explicitly issue a tMOVri8 node!
459 Offset = SDValue(CurDAG->getTargetNode(ARM::tMOVi8, dl, MVT::i32,
460 CurDAG->getTargetConstant(0, MVT::i32)), 0);
464 Base = N.getOperand(0);
465 Offset = N.getOperand(1);
470 ARMDAGToDAGISel::SelectThumbAddrModeRI5(SDValue Op, SDValue N,
471 unsigned Scale, SDValue &Base,
472 SDValue &OffImm, SDValue &Offset) {
474 SDValue TmpBase, TmpOffImm;
475 if (SelectThumbAddrModeSP(Op, N, TmpBase, TmpOffImm))
476 return false; // We want to select tLDRspi / tSTRspi instead.
477 if (N.getOpcode() == ARMISD::Wrapper &&
478 N.getOperand(0).getOpcode() == ISD::TargetConstantPool)
479 return false; // We want to select tLDRpci instead.
482 if (N.getOpcode() != ISD::ADD) {
483 Base = (N.getOpcode() == ARMISD::Wrapper) ? N.getOperand(0) : N;
484 Offset = CurDAG->getRegister(0, MVT::i32);
485 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
489 // Thumb does not have [sp, r] address mode.
490 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
491 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(N.getOperand(1));
492 if ((LHSR && LHSR->getReg() == ARM::SP) ||
493 (RHSR && RHSR->getReg() == ARM::SP)) {
495 Offset = CurDAG->getRegister(0, MVT::i32);
496 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
500 // If the RHS is + imm5 * scale, fold into addr mode.
501 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
502 int RHSC = (int)RHS->getZExtValue();
503 if ((RHSC & (Scale-1)) == 0) { // The constant is implicitly multiplied.
505 if (RHSC >= 0 && RHSC < 32) {
506 Base = N.getOperand(0);
507 Offset = CurDAG->getRegister(0, MVT::i32);
508 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
514 Base = N.getOperand(0);
515 Offset = N.getOperand(1);
516 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
520 bool ARMDAGToDAGISel::SelectThumbAddrModeS1(SDValue Op, SDValue N,
521 SDValue &Base, SDValue &OffImm,
523 return SelectThumbAddrModeRI5(Op, N, 1, Base, OffImm, Offset);
526 bool ARMDAGToDAGISel::SelectThumbAddrModeS2(SDValue Op, SDValue N,
527 SDValue &Base, SDValue &OffImm,
529 return SelectThumbAddrModeRI5(Op, N, 2, Base, OffImm, Offset);
532 bool ARMDAGToDAGISel::SelectThumbAddrModeS4(SDValue Op, SDValue N,
533 SDValue &Base, SDValue &OffImm,
535 return SelectThumbAddrModeRI5(Op, N, 4, Base, OffImm, Offset);
538 bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue Op, SDValue N,
539 SDValue &Base, SDValue &OffImm) {
540 if (N.getOpcode() == ISD::FrameIndex) {
541 int FI = cast<FrameIndexSDNode>(N)->getIndex();
542 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
543 OffImm = CurDAG->getTargetConstant(0, MVT::i32);
547 if (N.getOpcode() != ISD::ADD)
550 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(N.getOperand(0));
551 if (N.getOperand(0).getOpcode() == ISD::FrameIndex ||
552 (LHSR && LHSR->getReg() == ARM::SP)) {
553 // If the RHS is + imm8 * scale, fold into addr mode.
554 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
555 int RHSC = (int)RHS->getZExtValue();
556 if ((RHSC & 3) == 0) { // The constant is implicitly multiplied.
558 if (RHSC >= 0 && RHSC < 256) {
559 Base = N.getOperand(0);
560 if (Base.getOpcode() == ISD::FrameIndex) {
561 int FI = cast<FrameIndexSDNode>(Base)->getIndex();
562 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
564 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
574 bool ARMDAGToDAGISel::SelectT2ShifterOperandReg(SDValue Op, SDValue N,
577 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
579 // Don't match base register only case. That is matched to a separate
580 // lower complexity pattern with explicit register operand.
581 if (ShOpcVal == ARM_AM::no_shift) return false;
583 BaseReg = N.getOperand(0);
584 unsigned ShImmVal = 0;
585 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
586 ShImmVal = RHS->getZExtValue() & 31;
587 Opc = getI32Imm(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal));
594 bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue Op, SDValue N,
595 SDValue &Base, SDValue &OffImm) {
596 // Match simple R + imm12 operands.
597 if (N.getOpcode() != ISD::ADD)
600 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
601 int RHSC = (int)RHS->getZExtValue();
602 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits.
603 Base = N.getOperand(0);
604 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
612 bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue Op, SDValue N,
613 SDValue &Base, SDValue &OffImm) {
614 if (N.getOpcode() == ISD::ADD) {
615 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
616 int RHSC = (int)RHS->getZExtValue();
617 if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
618 Base = N.getOperand(0);
619 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
623 } else if (N.getOpcode() == ISD::SUB) {
624 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
625 int RHSC = (int)RHS->getZExtValue();
626 if (RHSC >= 0 && RHSC < 0x100) { // 8 bits.
627 Base = N.getOperand(0);
628 OffImm = CurDAG->getTargetConstant(-RHSC, MVT::i32);
637 bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDValue Op, SDValue N,
639 unsigned Opcode = Op.getOpcode();
640 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
641 ? cast<LoadSDNode>(Op)->getAddressingMode()
642 : cast<StoreSDNode>(Op)->getAddressingMode();
643 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N)) {
644 int RHSC = (int)RHS->getZExtValue();
645 if (RHSC >= 0 && RHSC < 0x100) { // 8 bits.
646 OffImm = (AM == ISD::PRE_INC)
647 ? CurDAG->getTargetConstant(RHSC, MVT::i32)
648 : CurDAG->getTargetConstant(-RHSC, MVT::i32);
656 bool ARMDAGToDAGISel::SelectT2AddrModeImm8s4(SDValue Op, SDValue N,
657 SDValue &Base, SDValue &OffImm) {
658 if (N.getOpcode() == ISD::ADD) {
659 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
660 int RHSC = (int)RHS->getZExtValue();
661 if (((RHSC & 0x3) == 0) && (RHSC < 0 && RHSC > -0x400)) { // 8 bits.
662 Base = N.getOperand(0);
663 OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
667 } else if (N.getOpcode() == ISD::SUB) {
668 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
669 int RHSC = (int)RHS->getZExtValue();
670 if (((RHSC & 0x3) == 0) && (RHSC >= 0 && RHSC < 0x400)) { // 8 bits.
671 Base = N.getOperand(0);
672 OffImm = CurDAG->getTargetConstant(-RHSC, MVT::i32);
681 bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue Op, SDValue N,
683 SDValue &OffReg, SDValue &ShImm) {
685 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) {
687 if (N.getOpcode() == ISD::FrameIndex) {
688 int FI = cast<FrameIndexSDNode>(N)->getIndex();
689 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
690 } else if (N.getOpcode() == ARMISD::Wrapper) {
691 Base = N.getOperand(0);
692 if (Base.getOpcode() == ISD::TargetConstantPool)
693 return false; // We want to select t2LDRpci instead.
695 OffReg = CurDAG->getRegister(0, MVT::i32);
696 ShImm = CurDAG->getTargetConstant(0, MVT::i32);
700 // Look for (R + R) or (R + (R << [1,2,3])).
702 Base = N.getOperand(0);
703 OffReg = N.getOperand(1);
705 // Swap if it is ((R << c) + R).
706 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(OffReg);
707 if (ShOpcVal != ARM_AM::lsl) {
708 ShOpcVal = ARM_AM::getShiftOpcForNode(Base);
709 if (ShOpcVal == ARM_AM::lsl)
710 std::swap(Base, OffReg);
713 if (ShOpcVal == ARM_AM::lsl) {
714 // Check to see if the RHS of the shift is a constant, if not, we can't fold
716 if (ConstantSDNode *Sh = dyn_cast<ConstantSDNode>(OffReg.getOperand(1))) {
717 ShAmt = Sh->getZExtValue();
720 ShOpcVal = ARM_AM::no_shift;
722 OffReg = OffReg.getOperand(0);
724 ShOpcVal = ARM_AM::no_shift;
726 } else if (SelectT2AddrModeImm12(Op, N, Base, ShImm) ||
727 SelectT2AddrModeImm8 (Op, N, Base, ShImm))
728 // Don't match if it's possible to match to one of the r +/- imm cases.
731 ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
736 //===--------------------------------------------------------------------===//
738 /// getAL - Returns a ARMCC::AL immediate node.
739 static inline SDValue getAL(SelectionDAG *CurDAG) {
740 return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
743 SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDValue Op) {
744 LoadSDNode *LD = cast<LoadSDNode>(Op);
745 ISD::MemIndexedMode AM = LD->getAddressingMode();
746 if (AM == ISD::UNINDEXED)
749 MVT LoadedVT = LD->getMemoryVT();
750 SDValue Offset, AMOpc;
751 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
754 if (LoadedVT == MVT::i32 &&
755 SelectAddrMode2Offset(Op, LD->getOffset(), Offset, AMOpc)) {
756 Opcode = isPre ? ARM::LDR_PRE : ARM::LDR_POST;
758 } else if (LoadedVT == MVT::i16 &&
759 SelectAddrMode3Offset(Op, LD->getOffset(), Offset, AMOpc)) {
761 Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
762 ? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
763 : (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
764 } else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
765 if (LD->getExtensionType() == ISD::SEXTLOAD) {
766 if (SelectAddrMode3Offset(Op, LD->getOffset(), Offset, AMOpc)) {
768 Opcode = isPre ? ARM::LDRSB_PRE : ARM::LDRSB_POST;
771 if (SelectAddrMode2Offset(Op, LD->getOffset(), Offset, AMOpc)) {
773 Opcode = isPre ? ARM::LDRB_PRE : ARM::LDRB_POST;
779 SDValue Chain = LD->getChain();
780 SDValue Base = LD->getBasePtr();
781 SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
782 CurDAG->getRegister(0, MVT::i32), Chain };
783 return CurDAG->getTargetNode(Opcode, Op.getDebugLoc(), MVT::i32, MVT::i32,
790 SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDValue Op) {
791 LoadSDNode *LD = cast<LoadSDNode>(Op);
792 ISD::MemIndexedMode AM = LD->getAddressingMode();
793 if (AM == ISD::UNINDEXED)
796 MVT LoadedVT = LD->getMemoryVT();
797 bool isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
799 bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
802 if (SelectT2AddrModeImm8Offset(Op, LD->getOffset(), Offset)) {
803 switch (LoadedVT.getSimpleVT()) {
805 Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
809 Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
811 Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
816 Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
818 Opcode = isPre ? ARM::t2LDRB_PRE : ARM::t2LDRB_POST;
827 SDValue Chain = LD->getChain();
828 SDValue Base = LD->getBasePtr();
829 SDValue Ops[]= { Base, Offset, getAL(CurDAG),
830 CurDAG->getRegister(0, MVT::i32), Chain };
831 return CurDAG->getTargetNode(Opcode, Op.getDebugLoc(), MVT::i32, MVT::i32,
839 SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
840 SDNode *N = Op.getNode();
841 DebugLoc dl = N->getDebugLoc();
843 if (N->isMachineOpcode())
844 return NULL; // Already selected.
846 switch (N->getOpcode()) {
848 case ISD::Constant: {
849 unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
851 if (Subtarget->isThumb()) {
852 if (Subtarget->hasThumb2())
853 // Thumb2 has the MOVT instruction, so all immediates can
854 // be done with MOV + MOVT, at worst.
857 UseCP = (Val > 255 && // MOV
858 ~Val > 255 && // MOV + MVN
859 !ARM_AM::isThumbImmShiftedVal(Val)); // MOV + LSL
861 UseCP = (ARM_AM::getSOImmVal(Val) == -1 && // MOV
862 ARM_AM::getSOImmVal(~Val) == -1 && // MVN
863 !ARM_AM::isSOImmTwoPartVal(Val)); // two instrs.
866 CurDAG->getTargetConstantPool(ConstantInt::get(Type::Int32Ty, Val),
870 if (Subtarget->isThumb())
871 ResNode = CurDAG->getTargetNode(ARM::tLDRcp, dl, MVT::i32, MVT::Other,
872 CPIdx, CurDAG->getEntryNode());
876 CurDAG->getRegister(0, MVT::i32),
877 CurDAG->getTargetConstant(0, MVT::i32),
879 CurDAG->getRegister(0, MVT::i32),
880 CurDAG->getEntryNode()
882 ResNode=CurDAG->getTargetNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
885 ReplaceUses(Op, SDValue(ResNode, 0));
889 // Other cases are autogenerated.
892 case ISD::FrameIndex: {
893 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
894 int FI = cast<FrameIndexSDNode>(N)->getIndex();
895 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
896 if (Subtarget->isThumb()) {
897 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, TFI,
898 CurDAG->getTargetConstant(0, MVT::i32));
900 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
901 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
902 CurDAG->getRegister(0, MVT::i32) };
903 return CurDAG->SelectNodeTo(N, ARM::ADDri, MVT::i32, Ops, 5);
907 if (!Subtarget->isThumb())
909 // Select add sp, c to tADDhirr.
910 SDValue N0 = Op.getOperand(0);
911 SDValue N1 = Op.getOperand(1);
912 RegisterSDNode *LHSR = dyn_cast<RegisterSDNode>(Op.getOperand(0));
913 RegisterSDNode *RHSR = dyn_cast<RegisterSDNode>(Op.getOperand(1));
914 if (LHSR && LHSR->getReg() == ARM::SP) {
916 std::swap(LHSR, RHSR);
918 if (RHSR && RHSR->getReg() == ARM::SP) {
919 SDValue Val = SDValue(CurDAG->getTargetNode(ARM::tMOVlor2hir, dl,
920 Op.getValueType(), N0, N0), 0);
921 return CurDAG->SelectNodeTo(N, ARM::tADDhirr, Op.getValueType(), Val, N1);
926 if (Subtarget->isThumb1Only())
928 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
929 unsigned RHSV = C->getZExtValue();
931 if (isPowerOf2_32(RHSV-1)) { // 2^n+1?
932 SDValue V = Op.getOperand(0);
933 unsigned ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, Log2_32(RHSV-1));
934 SDValue Ops[] = { V, V, CurDAG->getRegister(0, MVT::i32),
935 CurDAG->getTargetConstant(ShImm, MVT::i32),
936 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
937 CurDAG->getRegister(0, MVT::i32) };
938 return CurDAG->SelectNodeTo(N, ARM::ADDrs, MVT::i32, Ops, 7);
940 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
941 SDValue V = Op.getOperand(0);
942 unsigned ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, Log2_32(RHSV+1));
943 SDValue Ops[] = { V, V, CurDAG->getRegister(0, MVT::i32),
944 CurDAG->getTargetConstant(ShImm, MVT::i32),
945 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
946 CurDAG->getRegister(0, MVT::i32) };
947 return CurDAG->SelectNodeTo(N, ARM::RSBrs, MVT::i32, Ops, 7);
952 return CurDAG->getTargetNode(ARM::FMRRD, dl, MVT::i32, MVT::i32,
953 Op.getOperand(0), getAL(CurDAG),
954 CurDAG->getRegister(0, MVT::i32));
955 case ISD::UMUL_LOHI: {
956 if (Subtarget->isThumb1Only())
958 if (Subtarget->isThumb()) {
959 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
960 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
961 CurDAG->getRegister(0, MVT::i32) };
962 return CurDAG->getTargetNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops,4);
964 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
965 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
966 CurDAG->getRegister(0, MVT::i32) };
967 return CurDAG->getTargetNode(ARM::UMULL, dl, MVT::i32, MVT::i32, Ops, 5);
970 case ISD::SMUL_LOHI: {
971 if (Subtarget->isThumb1Only())
973 if (Subtarget->isThumb()) {
974 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
975 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
976 return CurDAG->getTargetNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops,4);
978 SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
979 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
980 CurDAG->getRegister(0, MVT::i32) };
981 return CurDAG->getTargetNode(ARM::SMULL, dl, MVT::i32, MVT::i32, Ops, 5);
986 if (Subtarget->isThumb() && Subtarget->hasThumb2())
987 ResNode = SelectT2IndexedLoad(Op);
989 ResNode = SelectARMIndexedLoad(Op);
992 // Other cases are autogenerated.
995 case ARMISD::BRCOND: {
996 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
997 // Emits: (Bcc:void (bb:Other):$dst, (imm:i32):$cc)
998 // Pattern complexity = 6 cost = 1 size = 0
1000 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
1001 // Emits: (tBcc:void (bb:Other):$dst, (imm:i32):$cc)
1002 // Pattern complexity = 6 cost = 1 size = 0
1004 // Pattern: (ARMbrcond:void (bb:Other):$dst, (imm:i32):$cc)
1005 // Emits: (t2Bcc:void (bb:Other):$dst, (imm:i32):$cc)
1006 // Pattern complexity = 6 cost = 1 size = 0
1008 unsigned Opc = Subtarget->isThumb() ?
1009 ((Subtarget->hasThumb2()) ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
1010 SDValue Chain = Op.getOperand(0);
1011 SDValue N1 = Op.getOperand(1);
1012 SDValue N2 = Op.getOperand(2);
1013 SDValue N3 = Op.getOperand(3);
1014 SDValue InFlag = Op.getOperand(4);
1015 assert(N1.getOpcode() == ISD::BasicBlock);
1016 assert(N2.getOpcode() == ISD::Constant);
1017 assert(N3.getOpcode() == ISD::Register);
1019 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
1020 cast<ConstantSDNode>(N2)->getZExtValue()),
1022 SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
1023 SDNode *ResNode = CurDAG->getTargetNode(Opc, dl, MVT::Other,
1025 Chain = SDValue(ResNode, 0);
1026 if (Op.getNode()->getNumValues() == 2) {
1027 InFlag = SDValue(ResNode, 1);
1028 ReplaceUses(SDValue(Op.getNode(), 1), InFlag);
1030 ReplaceUses(SDValue(Op.getNode(), 0), SDValue(Chain.getNode(), Chain.getResNo()));
1033 case ARMISD::CMOV: {
1034 MVT VT = Op.getValueType();
1035 SDValue N0 = Op.getOperand(0);
1036 SDValue N1 = Op.getOperand(1);
1037 SDValue N2 = Op.getOperand(2);
1038 SDValue N3 = Op.getOperand(3);
1039 SDValue InFlag = Op.getOperand(4);
1040 assert(N2.getOpcode() == ISD::Constant);
1041 assert(N3.getOpcode() == ISD::Register);
1043 if (!Subtarget->isThumb1Only() && VT == MVT::i32) {
1044 // Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
1045 // Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
1046 // Pattern complexity = 18 cost = 1 size = 0
1050 if (Subtarget->isThumb()) {
1051 if (SelectT2ShifterOperandReg(Op, N1, CPTmp0, CPTmp1)) {
1052 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
1053 cast<ConstantSDNode>(N2)->getZExtValue()),
1055 SDValue Ops[] = { N0, CPTmp0, CPTmp1, Tmp2, N3, InFlag };
1056 return CurDAG->SelectNodeTo(Op.getNode(),
1057 ARM::t2MOVCCs, MVT::i32,Ops, 6);
1060 if (SelectShifterOperandReg(Op, N1, CPTmp0, CPTmp1, CPTmp2)) {
1061 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
1062 cast<ConstantSDNode>(N2)->getZExtValue()),
1064 SDValue Ops[] = { N0, CPTmp0, CPTmp1, CPTmp2, Tmp2, N3, InFlag };
1065 return CurDAG->SelectNodeTo(Op.getNode(),
1066 ARM::MOVCCs, MVT::i32, Ops, 7);
1070 // Pattern: (ARMcmov:i32 GPR:i32:$false,
1071 // (imm:i32)<<P:Predicate_so_imm>><<X:so_imm_XFORM>>:$true,
1073 // Emits: (MOVCCi:i32 GPR:i32:$false,
1074 // (so_imm_XFORM:i32 (imm:i32):$true), (imm:i32):$cc)
1075 // Pattern complexity = 10 cost = 1 size = 0
1076 if (N3.getOpcode() == ISD::Constant) {
1077 if (Subtarget->isThumb()) {
1078 if (Predicate_t2_so_imm(N3.getNode())) {
1079 SDValue Tmp1 = CurDAG->getTargetConstant(((unsigned)
1080 cast<ConstantSDNode>(N1)->getZExtValue()),
1082 Tmp1 = Transform_t2_so_imm_XFORM(Tmp1.getNode());
1083 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
1084 cast<ConstantSDNode>(N2)->getZExtValue()),
1086 SDValue Ops[] = { N0, Tmp1, Tmp2, N3, InFlag };
1087 return CurDAG->SelectNodeTo(Op.getNode(),
1088 ARM::t2MOVCCi, MVT::i32, Ops, 5);
1091 if (Predicate_so_imm(N3.getNode())) {
1092 SDValue Tmp1 = CurDAG->getTargetConstant(((unsigned)
1093 cast<ConstantSDNode>(N1)->getZExtValue()),
1095 Tmp1 = Transform_so_imm_XFORM(Tmp1.getNode());
1096 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
1097 cast<ConstantSDNode>(N2)->getZExtValue()),
1099 SDValue Ops[] = { N0, Tmp1, Tmp2, N3, InFlag };
1100 return CurDAG->SelectNodeTo(Op.getNode(),
1101 ARM::MOVCCi, MVT::i32, Ops, 5);
1107 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
1108 // Emits: (MOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
1109 // Pattern complexity = 6 cost = 1 size = 0
1111 // Pattern: (ARMcmov:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
1112 // Emits: (tMOVCCr:i32 GPR:i32:$false, GPR:i32:$true, (imm:i32):$cc)
1113 // Pattern complexity = 6 cost = 11 size = 0
1115 // Also FCPYScc and FCPYDcc.
1116 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
1117 cast<ConstantSDNode>(N2)->getZExtValue()),
1119 SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag };
1121 switch (VT.getSimpleVT()) {
1122 default: assert(false && "Illegal conditional move type!");
1125 Opc = Subtarget->isThumb()
1126 ? (Subtarget->hasThumb2() ? ARM::t2MOVCCr : ARM::tMOVCCr)
1136 return CurDAG->SelectNodeTo(Op.getNode(), Opc, VT, Ops, 5);
1138 case ARMISD::CNEG: {
1139 MVT VT = Op.getValueType();
1140 SDValue N0 = Op.getOperand(0);
1141 SDValue N1 = Op.getOperand(1);
1142 SDValue N2 = Op.getOperand(2);
1143 SDValue N3 = Op.getOperand(3);
1144 SDValue InFlag = Op.getOperand(4);
1145 assert(N2.getOpcode() == ISD::Constant);
1146 assert(N3.getOpcode() == ISD::Register);
1148 SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
1149 cast<ConstantSDNode>(N2)->getZExtValue()),
1151 SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag };
1153 switch (VT.getSimpleVT()) {
1154 default: assert(false && "Illegal conditional move type!");
1163 return CurDAG->SelectNodeTo(Op.getNode(), Opc, VT, Ops, 5);
1166 case ISD::DECLARE: {
1167 SDValue Chain = Op.getOperand(0);
1168 SDValue N1 = Op.getOperand(1);
1169 SDValue N2 = Op.getOperand(2);
1170 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N1);
1171 // FIXME: handle VLAs.
1173 ReplaceUses(Op.getValue(0), Chain);
1176 if (N2.getOpcode() == ARMISD::PIC_ADD && isa<LoadSDNode>(N2.getOperand(0)))
1177 N2 = N2.getOperand(0);
1178 LoadSDNode *Ld = dyn_cast<LoadSDNode>(N2);
1180 ReplaceUses(Op.getValue(0), Chain);
1183 SDValue BasePtr = Ld->getBasePtr();
1184 assert(BasePtr.getOpcode() == ARMISD::Wrapper &&
1185 isa<ConstantPoolSDNode>(BasePtr.getOperand(0)) &&
1186 "llvm.dbg.variable should be a constantpool node");
1187 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(BasePtr.getOperand(0));
1188 GlobalValue *GV = 0;
1189 if (CP->isMachineConstantPoolEntry()) {
1190 ARMConstantPoolValue *ACPV = (ARMConstantPoolValue*)CP->getMachineCPVal();
1193 GV = dyn_cast<GlobalValue>(CP->getConstVal());
1195 ReplaceUses(Op.getValue(0), Chain);
1199 SDValue Tmp1 = CurDAG->getTargetFrameIndex(FINode->getIndex(),
1200 TLI.getPointerTy());
1201 SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy());
1202 SDValue Ops[] = { Tmp1, Tmp2, Chain };
1203 return CurDAG->getTargetNode(TargetInstrInfo::DECLARE, dl,
1204 MVT::Other, Ops, 3);
1207 case ISD::CONCAT_VECTORS: {
1208 MVT VT = Op.getValueType();
1209 assert(VT.is128BitVector() && Op.getNumOperands() == 2 &&
1210 "unexpected CONCAT_VECTORS");
1211 SDValue N0 = Op.getOperand(0);
1212 SDValue N1 = Op.getOperand(1);
1214 CurDAG->getTargetNode(TargetInstrInfo::IMPLICIT_DEF, dl, VT);
1215 if (N0.getOpcode() != ISD::UNDEF)
1216 Result = CurDAG->getTargetNode(TargetInstrInfo::INSERT_SUBREG, dl, VT,
1217 SDValue(Result, 0), N0,
1218 CurDAG->getTargetConstant(arm_dsubreg_0,
1220 if (N1.getOpcode() != ISD::UNDEF)
1221 Result = CurDAG->getTargetNode(TargetInstrInfo::INSERT_SUBREG, dl, VT,
1222 SDValue(Result, 0), N1,
1223 CurDAG->getTargetConstant(arm_dsubreg_1,
1228 case ISD::VECTOR_SHUFFLE: {
1229 MVT VT = Op.getValueType();
1231 // Match 128-bit splat to VDUPLANEQ. (This could be done with a Pat in
1232 // ARMInstrNEON.td but it is awkward because the shuffle mask needs to be
1233 // transformed first into a lane number and then to both a subregister
1234 // index and an adjusted lane number.) If the source operand is a
1235 // SCALAR_TO_VECTOR, leave it so it will be matched later as a VDUP.
1236 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1237 if (VT.is128BitVector() && SVOp->isSplat() &&
1238 Op.getOperand(0).getOpcode() != ISD::SCALAR_TO_VECTOR &&
1239 Op.getOperand(1).getOpcode() == ISD::UNDEF) {
1240 unsigned LaneVal = SVOp->getSplatIndex();
1244 switch (VT.getVectorElementType().getSimpleVT()) {
1245 default: assert(false && "unhandled VDUP splat type");
1246 case MVT::i8: Opc = ARM::VDUPLN8q; HalfVT = MVT::v8i8; break;
1247 case MVT::i16: Opc = ARM::VDUPLN16q; HalfVT = MVT::v4i16; break;
1248 case MVT::i32: Opc = ARM::VDUPLN32q; HalfVT = MVT::v2i32; break;
1249 case MVT::f32: Opc = ARM::VDUPLNfq; HalfVT = MVT::v2f32; break;
1252 // The source operand needs to be changed to a subreg of the original
1253 // 128-bit operand, and the lane number needs to be adjusted accordingly.
1254 unsigned NumElts = VT.getVectorNumElements() / 2;
1255 unsigned SRVal = (LaneVal < NumElts ? arm_dsubreg_0 : arm_dsubreg_1);
1256 SDValue SR = CurDAG->getTargetConstant(SRVal, MVT::i32);
1257 SDValue NewLane = CurDAG->getTargetConstant(LaneVal % NumElts, MVT::i32);
1258 SDNode *SubReg = CurDAG->getTargetNode(TargetInstrInfo::EXTRACT_SUBREG,
1259 dl, HalfVT, N->getOperand(0), SR);
1260 return CurDAG->SelectNodeTo(N, Opc, VT, SDValue(SubReg, 0), NewLane);
1267 return SelectCode(Op);
1270 bool ARMDAGToDAGISel::
1271 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
1272 std::vector<SDValue> &OutOps) {
1273 assert(ConstraintCode == 'm' && "unexpected asm memory constraint");
1275 SDValue Base, Offset, Opc;
1276 if (!SelectAddrMode2(Op, Op, Base, Offset, Opc))
1279 OutOps.push_back(Base);
1280 OutOps.push_back(Offset);
1281 OutOps.push_back(Opc);
1285 /// createARMISelDag - This pass converts a legalized DAG into a
1286 /// ARM-specific DAG, ready for instruction scheduling.
1288 FunctionPass *llvm::createARMISelDag(ARMBaseTargetMachine &TM) {
1289 return new ARMDAGToDAGISel(TM);