1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the Evan Cheng and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "x86-isel"
17 #include "X86InstrBuilder.h"
18 #include "X86ISelLowering.h"
19 #include "X86RegisterInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/GlobalValue.h"
23 #include "llvm/Instructions.h"
24 #include "llvm/Intrinsics.h"
25 #include "llvm/Support/CFG.h"
26 #include "llvm/Type.h"
27 #include "llvm/CodeGen/MachineConstantPool.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/SSARegMap.h"
32 #include "llvm/CodeGen/SelectionDAGISel.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Support/Compiler.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/ADT/Statistic.h"
42 STATISTIC(NumFPKill , "Number of FP_REG_KILL instructions added");
43 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
46 //===----------------------------------------------------------------------===//
47 // Pattern Matcher Implementation
48 //===----------------------------------------------------------------------===//
51 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
52 /// SDOperand's instead of register numbers for the leaves of the matched
54 struct X86ISelAddressMode {
60 struct { // This is really a union, discriminated by BaseType!
65 bool isRIPRel; // RIP relative?
73 unsigned Align; // CP alignment.
76 : BaseType(RegBase), isRIPRel(false), Scale(1), IndexReg(), Disp(0),
77 GV(0), CP(0), ES(0), JT(-1), Align(0) {
83 //===--------------------------------------------------------------------===//
84 /// ISel - X86 specific code to select X86 machine instructions for
85 /// SelectionDAG operations.
87 class VISIBILITY_HIDDEN X86DAGToDAGISel : public SelectionDAGISel {
88 /// ContainsFPCode - Every instruction we select that uses or defines a FP
89 /// register should set this to true.
92 /// FastISel - Enable fast(er) instruction selection.
96 /// TM - Keep a reference to X86TargetMachine.
100 /// X86Lowering - This object fully describes how to lower LLVM code to an
101 /// X86-specific SelectionDAG.
102 X86TargetLowering X86Lowering;
104 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
105 /// make the right decision when generating code for different targets.
106 const X86Subtarget *Subtarget;
108 /// GlobalBaseReg - keeps track of the virtual register mapped onto global
110 unsigned GlobalBaseReg;
113 X86DAGToDAGISel(X86TargetMachine &tm, bool fast)
114 : SelectionDAGISel(X86Lowering),
115 ContainsFPCode(false), FastISel(fast), TM(tm),
116 X86Lowering(*TM.getTargetLowering()),
117 Subtarget(&TM.getSubtarget<X86Subtarget>()) {}
119 virtual bool runOnFunction(Function &Fn) {
120 // Make sure we re-emit a set of the global base reg if necessary
122 return SelectionDAGISel::runOnFunction(Fn);
125 virtual const char *getPassName() const {
126 return "X86 DAG->DAG Instruction Selection";
129 /// InstructionSelectBasicBlock - This callback is invoked by
130 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
131 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
133 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
135 virtual bool CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root);
137 // Include the pieces autogenerated from the target description.
138 #include "X86GenDAGISel.inc"
141 SDNode *Select(SDOperand N);
143 bool MatchAddress(SDOperand N, X86ISelAddressMode &AM,
144 bool isRoot = true, unsigned Depth = 0);
145 bool SelectAddr(SDOperand Op, SDOperand N, SDOperand &Base,
146 SDOperand &Scale, SDOperand &Index, SDOperand &Disp);
147 bool SelectLEAAddr(SDOperand Op, SDOperand N, SDOperand &Base,
148 SDOperand &Scale, SDOperand &Index, SDOperand &Disp);
149 bool SelectScalarSSELoad(SDOperand Op, SDOperand Pred,
150 SDOperand N, SDOperand &Base, SDOperand &Scale,
151 SDOperand &Index, SDOperand &Disp,
152 SDOperand &InChain, SDOperand &OutChain);
153 bool TryFoldLoad(SDOperand P, SDOperand N,
154 SDOperand &Base, SDOperand &Scale,
155 SDOperand &Index, SDOperand &Disp);
156 void InstructionSelectPreprocess(SelectionDAG &DAG);
158 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
159 /// inline asm expressions.
160 virtual bool SelectInlineAsmMemoryOperand(const SDOperand &Op,
162 std::vector<SDOperand> &OutOps,
165 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
167 inline void getAddressOperands(X86ISelAddressMode &AM, SDOperand &Base,
168 SDOperand &Scale, SDOperand &Index,
170 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
171 CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
173 Scale = getI8Imm(AM.Scale);
175 // These are 32-bit even in 64-bit mode since RIP relative offset
178 Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp);
180 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, AM.Align, AM.Disp);
182 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32);
183 else if (AM.JT != -1)
184 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32);
186 Disp = getI32Imm(AM.Disp);
189 /// getI8Imm - Return a target constant with the specified value, of type
191 inline SDOperand getI8Imm(unsigned Imm) {
192 return CurDAG->getTargetConstant(Imm, MVT::i8);
195 /// getI16Imm - Return a target constant with the specified value, of type
197 inline SDOperand getI16Imm(unsigned Imm) {
198 return CurDAG->getTargetConstant(Imm, MVT::i16);
201 /// getI32Imm - Return a target constant with the specified value, of type
203 inline SDOperand getI32Imm(unsigned Imm) {
204 return CurDAG->getTargetConstant(Imm, MVT::i32);
207 /// getGlobalBaseReg - insert code into the entry mbb to materialize the PIC
208 /// base register. Return the virtual register that holds this value.
209 SDNode *getGlobalBaseReg();
217 static SDNode *findFlagUse(SDNode *N) {
218 unsigned FlagResNo = N->getNumValues()-1;
219 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
221 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
222 SDOperand Op = User->getOperand(i);
223 if (Op.Val == N && Op.ResNo == FlagResNo)
230 static void findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
231 SDNode *Root, SDNode *Skip, bool &found,
232 std::set<SDNode *> &Visited) {
234 Use->getNodeId() > Def->getNodeId() ||
235 !Visited.insert(Use).second)
238 for (unsigned i = 0, e = Use->getNumOperands(); !found && i != e; ++i) {
239 SDNode *N = Use->getOperand(i).Val;
244 continue; // Immediate use is ok.
246 assert(Use->getOpcode() == ISD::STORE ||
247 Use->getOpcode() == X86ISD::CMP);
253 findNonImmUse(N, Def, ImmedUse, Root, Skip, found, Visited);
257 /// isNonImmUse - Start searching from Root up the DAG to check is Def can
258 /// be reached. Return true if that's the case. However, ignore direct uses
259 /// by ImmedUse (which would be U in the example illustrated in
260 /// CanBeFoldedBy) and by Root (which can happen in the store case).
261 /// FIXME: to be really generic, we should allow direct use by any node
262 /// that is being folded. But realisticly since we only fold loads which
263 /// have one non-chain use, we only need to watch out for load/op/store
264 /// and load/op/cmp case where the root (store / cmp) may reach the load via
265 /// its chain operand.
266 static inline bool isNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse,
267 SDNode *Skip = NULL) {
268 std::set<SDNode *> Visited;
270 findNonImmUse(Root, Def, ImmedUse, Root, Skip, found, Visited);
275 bool X86DAGToDAGISel::CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root) {
276 if (FastISel) return false;
278 // If U use can somehow reach N through another path then U can't fold N or
279 // it will create a cycle. e.g. In the following diagram, U can reach N
280 // through X. If N is folded into into U, then X is both a predecessor and
291 if (isNonImmUse(Root, N, U))
294 // If U produces a flag, then it gets (even more) interesting. Since it
295 // would have been "glued" together with its flag use, we need to check if
308 // If FU (flag use) indirectly reach N (the load), and U fold N (call it
309 // NU), then TF is a predecessor of FU and a successor of NU. But since
310 // NU and FU are flagged together, this effectively creates a cycle.
311 bool HasFlagUse = false;
312 MVT::ValueType VT = Root->getValueType(Root->getNumValues()-1);
313 while ((VT == MVT::Flag && !Root->use_empty())) {
314 SDNode *FU = findFlagUse(Root);
321 VT = Root->getValueType(Root->getNumValues()-1);
325 return !isNonImmUse(Root, N, Root, U);
329 /// MoveBelowTokenFactor - Replace TokenFactor operand with load's chain operand
330 /// and move load below the TokenFactor. Replace store's chain operand with
331 /// load's chain result.
332 static void MoveBelowTokenFactor(SelectionDAG &DAG, SDOperand Load,
333 SDOperand Store, SDOperand TF) {
334 std::vector<SDOperand> Ops;
335 for (unsigned i = 0, e = TF.Val->getNumOperands(); i != e; ++i)
336 if (Load.Val == TF.Val->getOperand(i).Val)
337 Ops.push_back(Load.Val->getOperand(0));
339 Ops.push_back(TF.Val->getOperand(i));
340 DAG.UpdateNodeOperands(TF, &Ops[0], Ops.size());
341 DAG.UpdateNodeOperands(Load, TF, Load.getOperand(1), Load.getOperand(2));
342 DAG.UpdateNodeOperands(Store, Load.getValue(1), Store.getOperand(1),
343 Store.getOperand(2), Store.getOperand(3));
346 /// InstructionSelectPreprocess - Preprocess the DAG to allow the instruction
347 /// selector to pick more load-modify-store instructions. This is a common
358 /// [TokenFactor] [Op]
365 /// The fact the store's chain operand != load's chain will prevent the
366 /// (store (op (load))) instruction from being selected. We can transform it to:
385 void X86DAGToDAGISel::InstructionSelectPreprocess(SelectionDAG &DAG) {
386 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
387 E = DAG.allnodes_end(); I != E; ++I) {
388 if (!ISD::isNON_TRUNCStore(I))
390 SDOperand Chain = I->getOperand(0);
391 if (Chain.Val->getOpcode() != ISD::TokenFactor)
394 SDOperand N1 = I->getOperand(1);
395 SDOperand N2 = I->getOperand(2);
396 if (MVT::isFloatingPoint(N1.getValueType()) ||
397 MVT::isVector(N1.getValueType()) ||
403 unsigned Opcode = N1.Val->getOpcode();
412 SDOperand N10 = N1.getOperand(0);
413 SDOperand N11 = N1.getOperand(1);
414 if (ISD::isNON_EXTLoad(N10.Val))
416 else if (ISD::isNON_EXTLoad(N11.Val)) {
420 RModW = RModW && N10.Val->isOperand(Chain.Val) && N10.hasOneUse() &&
421 (N10.getOperand(1) == N2) &&
422 (N10.Val->getValueType(0) == N1.getValueType());
437 SDOperand N10 = N1.getOperand(0);
438 if (ISD::isNON_EXTLoad(N10.Val))
439 RModW = N10.Val->isOperand(Chain.Val) && N10.hasOneUse() &&
440 (N10.getOperand(1) == N2) &&
441 (N10.Val->getValueType(0) == N1.getValueType());
449 MoveBelowTokenFactor(DAG, Load, SDOperand(I, 0), Chain);
455 /// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
456 /// when it has created a SelectionDAG for us to codegen.
457 void X86DAGToDAGISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
459 MachineFunction::iterator FirstMBB = BB;
462 InstructionSelectPreprocess(DAG);
464 // Codegen the basic block.
466 DOUT << "===== Instruction selection begins:\n";
469 DAG.setRoot(SelectRoot(DAG.getRoot()));
471 DOUT << "===== Instruction selection ends:\n";
474 DAG.RemoveDeadNodes();
476 // Emit machine code to BB.
477 ScheduleAndEmitDAG(DAG);
479 // If we are emitting FP stack code, scan the basic block to determine if this
480 // block defines any FP values. If so, put an FP_REG_KILL instruction before
481 // the terminator of the block.
482 if (!Subtarget->hasSSE2()) {
483 // Note that FP stack instructions *are* used in SSE code when returning
484 // values, but these are not live out of the basic block, so we don't need
485 // an FP_REG_KILL in this case either.
486 bool ContainsFPCode = false;
488 // Scan all of the machine instructions in these MBBs, checking for FP
490 MachineFunction::iterator MBBI = FirstMBB;
492 for (MachineBasicBlock::iterator I = MBBI->begin(), E = MBBI->end();
493 !ContainsFPCode && I != E; ++I) {
494 for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) {
495 if (I->getOperand(op).isRegister() && I->getOperand(op).isDef() &&
496 MRegisterInfo::isVirtualRegister(I->getOperand(op).getReg()) &&
497 RegMap->getRegClass(I->getOperand(0).getReg()) ==
498 X86::RFPRegisterClass) {
499 ContainsFPCode = true;
504 } while (!ContainsFPCode && &*(MBBI++) != BB);
506 // Check PHI nodes in successor blocks. These PHI's will be lowered to have
507 // a copy of the input value in this block.
508 if (!ContainsFPCode) {
509 // Final check, check LLVM BB's that are successors to the LLVM BB
510 // corresponding to BB for FP PHI nodes.
511 const BasicBlock *LLVMBB = BB->getBasicBlock();
513 for (succ_const_iterator SI = succ_begin(LLVMBB), E = succ_end(LLVMBB);
514 !ContainsFPCode && SI != E; ++SI) {
515 for (BasicBlock::const_iterator II = SI->begin();
516 (PN = dyn_cast<PHINode>(II)); ++II) {
517 if (PN->getType()->isFloatingPoint()) {
518 ContainsFPCode = true;
525 // Finally, if we found any FP code, emit the FP_REG_KILL instruction.
526 if (ContainsFPCode) {
527 BuildMI(*BB, BB->getFirstTerminator(),
528 TM.getInstrInfo()->get(X86::FP_REG_KILL));
534 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
535 /// the main function.
536 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
537 MachineFrameInfo *MFI) {
538 const TargetInstrInfo *TII = TM.getInstrInfo();
539 if (Subtarget->isTargetCygMing())
540 BuildMI(BB, TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
542 // Switch the FPU to 64-bit precision mode for better compatibility and speed.
543 int CWFrameIdx = MFI->CreateStackObject(2, 2);
544 addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx);
546 // Set the high part to be 64-bit precision.
547 addFrameReference(BuildMI(BB, TII->get(X86::MOV8mi)),
548 CWFrameIdx, 1).addImm(2);
550 // Reload the modified control word now.
551 addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx);
554 void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
555 // If this is main, emit special code for main.
556 MachineBasicBlock *BB = MF.begin();
557 if (Fn.hasExternalLinkage() && Fn.getName() == "main")
558 EmitSpecialCodeForMain(BB, MF.getFrameInfo());
561 /// MatchAddress - Add the specified node to the specified addressing mode,
562 /// returning true if it cannot be done. This just pattern matches for the
564 bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
565 bool isRoot, unsigned Depth) {
567 // Default, generate it as a register.
568 AM.BaseType = X86ISelAddressMode::RegBase;
573 // RIP relative addressing: %rip + 32-bit displacement!
575 if (!AM.ES && AM.JT != -1 && N.getOpcode() == ISD::Constant) {
576 int64_t Val = cast<ConstantSDNode>(N)->getSignExtended();
577 if (isInt32(AM.Disp + Val)) {
585 int id = N.Val->getNodeId();
586 bool Available = isSelected(id);
588 switch (N.getOpcode()) {
590 case ISD::Constant: {
591 int64_t Val = cast<ConstantSDNode>(N)->getSignExtended();
592 if (isInt32(AM.Disp + Val)) {
599 case X86ISD::Wrapper: {
600 bool is64Bit = Subtarget->is64Bit();
601 // Under X86-64 non-small code model, GV (and friends) are 64-bits.
602 if (is64Bit && TM.getCodeModel() != CodeModel::Small)
604 if (AM.GV != 0 || AM.CP != 0 || AM.ES != 0 || AM.JT != -1)
606 // If value is available in a register both base and index components have
607 // been picked, we can't fit the result available in the register in the
608 // addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
609 if (!Available || (AM.Base.Reg.Val && AM.IndexReg.Val)) {
610 bool isStatic = TM.getRelocationModel() == Reloc::Static;
611 SDOperand N0 = N.getOperand(0);
612 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
613 GlobalValue *GV = G->getGlobal();
614 bool isAbs32 = !is64Bit || isStatic;
615 if (isAbs32 || isRoot) {
617 AM.Disp += G->getOffset();
618 AM.isRIPRel = !isAbs32;
621 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
622 if (!is64Bit || isStatic || isRoot) {
623 AM.CP = CP->getConstVal();
624 AM.Align = CP->getAlignment();
625 AM.Disp += CP->getOffset();
626 AM.isRIPRel = !isStatic;
629 } else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) {
630 if (isStatic || isRoot) {
631 AM.ES = S->getSymbol();
632 AM.isRIPRel = !isStatic;
635 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
636 if (isStatic || isRoot) {
637 AM.JT = J->getIndex();
638 AM.isRIPRel = !isStatic;
646 case ISD::FrameIndex:
647 if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base.Reg.Val == 0) {
648 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
649 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
655 if (!Available && AM.IndexReg.Val == 0 && AM.Scale == 1)
656 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
657 unsigned Val = CN->getValue();
658 if (Val == 1 || Val == 2 || Val == 3) {
660 SDOperand ShVal = N.Val->getOperand(0);
662 // Okay, we know that we have a scale by now. However, if the scaled
663 // value is an add of something and a constant, we can fold the
664 // constant into the disp field here.
665 if (ShVal.Val->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
666 isa<ConstantSDNode>(ShVal.Val->getOperand(1))) {
667 AM.IndexReg = ShVal.Val->getOperand(0);
668 ConstantSDNode *AddVal =
669 cast<ConstantSDNode>(ShVal.Val->getOperand(1));
670 uint64_t Disp = AM.Disp + (AddVal->getValue() << Val);
684 // X*[3,5,9] -> X+X*[2,4,8]
686 AM.BaseType == X86ISelAddressMode::RegBase &&
687 AM.Base.Reg.Val == 0 &&
688 AM.IndexReg.Val == 0) {
689 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
690 if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
691 AM.Scale = unsigned(CN->getValue())-1;
693 SDOperand MulVal = N.Val->getOperand(0);
696 // Okay, we know that we have a scale by now. However, if the scaled
697 // value is an add of something and a constant, we can fold the
698 // constant into the disp field here.
699 if (MulVal.Val->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
700 isa<ConstantSDNode>(MulVal.Val->getOperand(1))) {
701 Reg = MulVal.Val->getOperand(0);
702 ConstantSDNode *AddVal =
703 cast<ConstantSDNode>(MulVal.Val->getOperand(1));
704 uint64_t Disp = AM.Disp + AddVal->getValue() * CN->getValue();
708 Reg = N.Val->getOperand(0);
710 Reg = N.Val->getOperand(0);
713 AM.IndexReg = AM.Base.Reg = Reg;
721 X86ISelAddressMode Backup = AM;
722 if (!MatchAddress(N.Val->getOperand(0), AM, false, Depth+1) &&
723 !MatchAddress(N.Val->getOperand(1), AM, false, Depth+1))
726 if (!MatchAddress(N.Val->getOperand(1), AM, false, Depth+1) &&
727 !MatchAddress(N.Val->getOperand(0), AM, false, Depth+1))
734 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
736 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
737 X86ISelAddressMode Backup = AM;
738 // Start with the LHS as an addr mode.
739 if (!MatchAddress(N.getOperand(0), AM, false) &&
740 // Address could not have picked a GV address for the displacement.
742 // On x86-64, the resultant disp must fit in 32-bits.
743 isInt32(AM.Disp + CN->getSignExtended()) &&
744 // Check to see if the LHS & C is zero.
745 TLI.MaskedValueIsZero(N.getOperand(0), CN->getValue())) {
746 AM.Disp += CN->getValue();
755 // Is the base register already occupied?
756 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.Val) {
757 // If so, check to see if the scale index register is set.
758 if (AM.IndexReg.Val == 0) {
764 // Otherwise, we cannot select it.
768 // Default, generate it as a register.
769 AM.BaseType = X86ISelAddressMode::RegBase;
774 /// SelectAddr - returns true if it is able pattern match an addressing mode.
775 /// It returns the operands which make up the maximal addressing mode it can
776 /// match by reference.
777 bool X86DAGToDAGISel::SelectAddr(SDOperand Op, SDOperand N, SDOperand &Base,
778 SDOperand &Scale, SDOperand &Index,
780 X86ISelAddressMode AM;
781 if (MatchAddress(N, AM))
784 MVT::ValueType VT = N.getValueType();
785 if (AM.BaseType == X86ISelAddressMode::RegBase) {
786 if (!AM.Base.Reg.Val)
787 AM.Base.Reg = CurDAG->getRegister(0, VT);
790 if (!AM.IndexReg.Val)
791 AM.IndexReg = CurDAG->getRegister(0, VT);
793 getAddressOperands(AM, Base, Scale, Index, Disp);
797 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
799 static inline bool isZeroNode(SDOperand Elt) {
800 return ((isa<ConstantSDNode>(Elt) &&
801 cast<ConstantSDNode>(Elt)->getValue() == 0) ||
802 (isa<ConstantFPSDNode>(Elt) &&
803 cast<ConstantFPSDNode>(Elt)->isExactlyValue(0.0)));
807 /// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
808 /// match a load whose top elements are either undef or zeros. The load flavor
809 /// is derived from the type of N, which is either v4f32 or v2f64.
810 bool X86DAGToDAGISel::SelectScalarSSELoad(SDOperand Op, SDOperand Pred,
811 SDOperand N, SDOperand &Base,
812 SDOperand &Scale, SDOperand &Index,
813 SDOperand &Disp, SDOperand &InChain,
814 SDOperand &OutChain) {
815 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
816 InChain = N.getOperand(0).getValue(1);
817 if (ISD::isNON_EXTLoad(InChain.Val) &&
818 InChain.getValue(0).hasOneUse() &&
820 CanBeFoldedBy(N.Val, Pred.Val, Op.Val)) {
821 LoadSDNode *LD = cast<LoadSDNode>(InChain);
822 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
824 OutChain = LD->getChain();
829 // Also handle the case where we explicitly require zeros in the top
830 // elements. This is a vector shuffle from the zero vector.
831 if (N.getOpcode() == ISD::VECTOR_SHUFFLE && N.Val->hasOneUse() &&
832 N.getOperand(0).getOpcode() == ISD::BUILD_VECTOR &&
833 N.getOperand(1).getOpcode() == ISD::SCALAR_TO_VECTOR &&
834 N.getOperand(1).Val->hasOneUse() &&
835 ISD::isNON_EXTLoad(N.getOperand(1).getOperand(0).Val) &&
836 N.getOperand(1).getOperand(0).hasOneUse()) {
837 // Check to see if the BUILD_VECTOR is building a zero vector.
838 SDOperand BV = N.getOperand(0);
839 for (unsigned i = 0, e = BV.getNumOperands(); i != e; ++i)
840 if (!isZeroNode(BV.getOperand(i)) &&
841 BV.getOperand(i).getOpcode() != ISD::UNDEF)
842 return false; // Not a zero/undef vector.
843 // Check to see if the shuffle mask is 4/L/L/L or 2/L, where L is something
845 unsigned VecWidth = BV.getNumOperands();
846 SDOperand ShufMask = N.getOperand(2);
847 assert(ShufMask.getOpcode() == ISD::BUILD_VECTOR && "Invalid shuf mask!");
848 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(ShufMask.getOperand(0))) {
849 if (C->getValue() == VecWidth) {
850 for (unsigned i = 1; i != VecWidth; ++i) {
851 if (ShufMask.getOperand(i).getOpcode() == ISD::UNDEF) {
854 ConstantSDNode *C = cast<ConstantSDNode>(ShufMask.getOperand(i));
855 if (C->getValue() >= VecWidth) return false;
860 // Okay, this is a zero extending load. Fold it.
861 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(1).getOperand(0));
862 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
864 OutChain = LD->getChain();
865 InChain = SDOperand(LD, 1);
873 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
874 /// mode it matches can be cost effectively emitted as an LEA instruction.
875 bool X86DAGToDAGISel::SelectLEAAddr(SDOperand Op, SDOperand N,
876 SDOperand &Base, SDOperand &Scale,
877 SDOperand &Index, SDOperand &Disp) {
878 X86ISelAddressMode AM;
879 if (MatchAddress(N, AM))
882 MVT::ValueType VT = N.getValueType();
883 unsigned Complexity = 0;
884 if (AM.BaseType == X86ISelAddressMode::RegBase)
888 AM.Base.Reg = CurDAG->getRegister(0, VT);
889 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
895 AM.IndexReg = CurDAG->getRegister(0, VT);
897 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
902 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
903 // to a LEA. This is determined with some expermentation but is by no means
904 // optimal (especially for code size consideration). LEA is nice because of
905 // its three-address nature. Tweak the cost function again when we can run
906 // convertToThreeAddress() at register allocation time.
907 if (AM.GV || AM.CP || AM.ES || AM.JT != -1) {
908 // For X86-64, we should always use lea to materialize RIP relative
910 if (Subtarget->is64Bit())
916 if (AM.Disp && (AM.Base.Reg.Val || AM.IndexReg.Val))
919 if (Complexity > 2) {
920 getAddressOperands(AM, Base, Scale, Index, Disp);
926 bool X86DAGToDAGISel::TryFoldLoad(SDOperand P, SDOperand N,
927 SDOperand &Base, SDOperand &Scale,
928 SDOperand &Index, SDOperand &Disp) {
929 if (ISD::isNON_EXTLoad(N.Val) &&
931 CanBeFoldedBy(N.Val, P.Val, P.Val))
932 return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp);
936 /// getGlobalBaseReg - Output the instructions required to put the
937 /// base address to use for accessing globals into a register.
939 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
940 assert(!Subtarget->is64Bit() && "X86-64 PIC uses RIP relative addressing");
941 if (!GlobalBaseReg) {
942 // Insert the set of GlobalBaseReg into the first MBB of the function
943 MachineBasicBlock &FirstMBB = BB->getParent()->front();
944 MachineBasicBlock::iterator MBBI = FirstMBB.begin();
945 SSARegMap *RegMap = BB->getParent()->getSSARegMap();
946 unsigned PC = RegMap->createVirtualRegister(X86::GR32RegisterClass);
948 const TargetInstrInfo *TII = TM.getInstrInfo();
949 BuildMI(FirstMBB, MBBI, TII->get(X86::MovePCtoStack));
950 BuildMI(FirstMBB, MBBI, TII->get(X86::POP32r), PC);
952 // If we're using vanilla 'GOT' PIC style, we should use relative addressing
953 // not to pc, but to _GLOBAL_ADDRESS_TABLE_ external
954 if (TM.getRelocationModel() == Reloc::PIC_ &&
955 Subtarget->isPICStyleGOT()) {
956 GlobalBaseReg = RegMap->createVirtualRegister(X86::GR32RegisterClass);
957 BuildMI(FirstMBB, MBBI, TII->get(X86::ADD32ri), GlobalBaseReg).
959 addExternalSymbol("_GLOBAL_OFFSET_TABLE_");
965 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).Val;
968 static SDNode *FindCallStartFromCall(SDNode *Node) {
969 if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
970 assert(Node->getOperand(0).getValueType() == MVT::Other &&
971 "Node doesn't have a token chain argument!");
972 return FindCallStartFromCall(Node->getOperand(0).Val);
975 SDNode *X86DAGToDAGISel::Select(SDOperand N) {
976 SDNode *Node = N.Val;
977 MVT::ValueType NVT = Node->getValueType(0);
979 unsigned Opcode = Node->getOpcode();
982 DOUT << std::string(Indent, ' ') << "Selecting: ";
983 DEBUG(Node->dump(CurDAG));
988 if (Opcode >= ISD::BUILTIN_OP_END && Opcode < X86ISD::FIRST_NUMBER) {
990 DOUT << std::string(Indent-2, ' ') << "== ";
991 DEBUG(Node->dump(CurDAG));
995 return NULL; // Already selected.
1000 case X86ISD::GlobalBaseReg:
1001 return getGlobalBaseReg();
1004 // Turn ADD X, c to MOV32ri X+c. This cannot be done with tblgen'd
1005 // code and is matched first so to prevent it from being turned into
1007 // In 64-bit mode, use LEA to take advantage of RIP-relative addressing.
1008 MVT::ValueType PtrVT = TLI.getPointerTy();
1009 SDOperand N0 = N.getOperand(0);
1010 SDOperand N1 = N.getOperand(1);
1011 if (N.Val->getValueType(0) == PtrVT &&
1012 N0.getOpcode() == X86ISD::Wrapper &&
1013 N1.getOpcode() == ISD::Constant) {
1014 unsigned Offset = (unsigned)cast<ConstantSDNode>(N1)->getValue();
1016 // TODO: handle ExternalSymbolSDNode.
1017 if (GlobalAddressSDNode *G =
1018 dyn_cast<GlobalAddressSDNode>(N0.getOperand(0))) {
1019 C = CurDAG->getTargetGlobalAddress(G->getGlobal(), PtrVT,
1020 G->getOffset() + Offset);
1021 } else if (ConstantPoolSDNode *CP =
1022 dyn_cast<ConstantPoolSDNode>(N0.getOperand(0))) {
1023 C = CurDAG->getTargetConstantPool(CP->getConstVal(), PtrVT,
1025 CP->getOffset()+Offset);
1029 if (Subtarget->is64Bit()) {
1030 SDOperand Ops[] = { CurDAG->getRegister(0, PtrVT), getI8Imm(1),
1031 CurDAG->getRegister(0, PtrVT), C };
1032 return CurDAG->SelectNodeTo(N.Val, X86::LEA64r, MVT::i64, Ops, 4);
1034 return CurDAG->SelectNodeTo(N.Val, X86::MOV32ri, PtrVT, C);
1038 // Other cases are handled by auto-generated code.
1044 if (Opcode == ISD::MULHU)
1046 default: assert(0 && "Unsupported VT!");
1047 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
1048 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
1049 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
1050 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
1054 default: assert(0 && "Unsupported VT!");
1055 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
1056 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
1057 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
1058 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
1061 unsigned LoReg, HiReg;
1063 default: assert(0 && "Unsupported VT!");
1064 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
1065 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
1066 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
1067 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
1070 SDOperand N0 = Node->getOperand(0);
1071 SDOperand N1 = Node->getOperand(1);
1073 bool foldedLoad = false;
1074 SDOperand Tmp0, Tmp1, Tmp2, Tmp3;
1075 foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
1076 // MULHU and MULHS are commmutative
1078 foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3);
1080 N0 = Node->getOperand(1);
1081 N1 = Node->getOperand(0);
1087 Chain = N1.getOperand(0);
1088 AddToISelQueue(Chain);
1090 Chain = CurDAG->getEntryNode();
1092 SDOperand InFlag(0, 0);
1094 Chain = CurDAG->getCopyToReg(Chain, CurDAG->getRegister(LoReg, NVT),
1096 InFlag = Chain.getValue(1);
1099 AddToISelQueue(Tmp0);
1100 AddToISelQueue(Tmp1);
1101 AddToISelQueue(Tmp2);
1102 AddToISelQueue(Tmp3);
1103 SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Chain, InFlag };
1105 CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
1106 Chain = SDOperand(CNode, 0);
1107 InFlag = SDOperand(CNode, 1);
1111 SDOperand(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
1114 SDOperand Result = CurDAG->getCopyFromReg(Chain, HiReg, NVT, InFlag);
1115 ReplaceUses(N.getValue(0), Result);
1117 ReplaceUses(N1.getValue(1), Result.getValue(1));
1120 DOUT << std::string(Indent-2, ' ') << "=> ";
1121 DEBUG(Result.Val->dump(CurDAG));
1132 bool isSigned = Opcode == ISD::SDIV || Opcode == ISD::SREM;
1133 bool isDiv = Opcode == ISD::SDIV || Opcode == ISD::UDIV;
1136 default: assert(0 && "Unsupported VT!");
1137 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
1138 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
1139 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
1140 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
1144 default: assert(0 && "Unsupported VT!");
1145 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
1146 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
1147 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
1148 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
1151 unsigned LoReg, HiReg;
1152 unsigned ClrOpcode, SExtOpcode;
1154 default: assert(0 && "Unsupported VT!");
1156 LoReg = X86::AL; HiReg = X86::AH;
1158 SExtOpcode = X86::CBW;
1161 LoReg = X86::AX; HiReg = X86::DX;
1162 ClrOpcode = X86::MOV16r0;
1163 SExtOpcode = X86::CWD;
1166 LoReg = X86::EAX; HiReg = X86::EDX;
1167 ClrOpcode = X86::MOV32r0;
1168 SExtOpcode = X86::CDQ;
1171 LoReg = X86::RAX; HiReg = X86::RDX;
1172 ClrOpcode = X86::MOV64r0;
1173 SExtOpcode = X86::CQO;
1177 SDOperand N0 = Node->getOperand(0);
1178 SDOperand N1 = Node->getOperand(1);
1179 SDOperand InFlag(0, 0);
1180 if (NVT == MVT::i8 && !isSigned) {
1181 // Special case for div8, just use a move with zero extension to AX to
1182 // clear the upper 8 bits (AH).
1183 SDOperand Tmp0, Tmp1, Tmp2, Tmp3, Move, Chain;
1184 if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3)) {
1185 SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N0.getOperand(0) };
1186 AddToISelQueue(N0.getOperand(0));
1187 AddToISelQueue(Tmp0);
1188 AddToISelQueue(Tmp1);
1189 AddToISelQueue(Tmp2);
1190 AddToISelQueue(Tmp3);
1192 SDOperand(CurDAG->getTargetNode(X86::MOVZX16rm8, MVT::i16, MVT::Other,
1194 Chain = Move.getValue(1);
1195 ReplaceUses(N0.getValue(1), Chain);
1199 SDOperand(CurDAG->getTargetNode(X86::MOVZX16rr8, MVT::i16, N0), 0);
1200 Chain = CurDAG->getEntryNode();
1202 Chain = CurDAG->getCopyToReg(Chain, X86::AX, Move, InFlag);
1203 InFlag = Chain.getValue(1);
1207 CurDAG->getCopyToReg(CurDAG->getEntryNode(), LoReg, N0,
1208 InFlag).getValue(1);
1210 // Sign extend the low part into the high part.
1212 SDOperand(CurDAG->getTargetNode(SExtOpcode, MVT::Flag, InFlag), 0);
1214 // Zero out the high part, effectively zero extending the input.
1215 SDOperand ClrNode = SDOperand(CurDAG->getTargetNode(ClrOpcode, NVT), 0);
1216 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), HiReg, ClrNode,
1217 InFlag).getValue(1);
1221 SDOperand Tmp0, Tmp1, Tmp2, Tmp3, Chain;
1222 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
1224 AddToISelQueue(N1.getOperand(0));
1225 AddToISelQueue(Tmp0);
1226 AddToISelQueue(Tmp1);
1227 AddToISelQueue(Tmp2);
1228 AddToISelQueue(Tmp3);
1229 SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
1231 CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
1232 Chain = SDOperand(CNode, 0);
1233 InFlag = SDOperand(CNode, 1);
1236 Chain = CurDAG->getEntryNode();
1238 SDOperand(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
1242 CurDAG->getCopyFromReg(Chain, isDiv ? LoReg : HiReg, NVT, InFlag);
1243 ReplaceUses(N.getValue(0), Result);
1245 ReplaceUses(N1.getValue(1), Result.getValue(1));
1248 DOUT << std::string(Indent-2, ' ') << "=> ";
1249 DEBUG(Result.Val->dump(CurDAG));
1257 case ISD::TRUNCATE: {
1258 if (!Subtarget->is64Bit() && NVT == MVT::i8) {
1261 switch (Node->getOperand(0).getValueType()) {
1262 default: assert(0 && "Unknown truncate!");
1264 Opc = X86::MOV16to16_;
1266 Opc2 = X86::TRUNC_16_to8;
1269 Opc = X86::MOV32to32_;
1271 Opc2 = X86::TRUNC_32_to8;
1275 AddToISelQueue(Node->getOperand(0));
1277 SDOperand(CurDAG->getTargetNode(Opc, VT, Node->getOperand(0)), 0);
1278 SDNode *ResNode = CurDAG->getTargetNode(Opc2, NVT, Tmp);
1281 DOUT << std::string(Indent-2, ' ') << "=> ";
1282 DEBUG(ResNode->dump(CurDAG));
1293 SDNode *ResNode = SelectCode(N);
1296 DOUT << std::string(Indent-2, ' ') << "=> ";
1297 if (ResNode == NULL || ResNode == N.Val)
1298 DEBUG(N.Val->dump(CurDAG));
1300 DEBUG(ResNode->dump(CurDAG));
1308 bool X86DAGToDAGISel::
1309 SelectInlineAsmMemoryOperand(const SDOperand &Op, char ConstraintCode,
1310 std::vector<SDOperand> &OutOps, SelectionDAG &DAG){
1311 SDOperand Op0, Op1, Op2, Op3;
1312 switch (ConstraintCode) {
1313 case 'o': // offsetable ??
1314 case 'v': // not offsetable ??
1315 default: return true;
1317 if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3))
1322 OutOps.push_back(Op0);
1323 OutOps.push_back(Op1);
1324 OutOps.push_back(Op2);
1325 OutOps.push_back(Op3);
1326 AddToISelQueue(Op0);
1327 AddToISelQueue(Op1);
1328 AddToISelQueue(Op2);
1329 AddToISelQueue(Op3);
1333 /// createX86ISelDag - This pass converts a legalized DAG into a
1334 /// X86-specific DAG, ready for instruction scheduling.
1336 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, bool Fast) {
1337 return new X86DAGToDAGISel(TM, Fast);