1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "x86-isel"
17 #include "X86InstrBuilder.h"
18 #include "X86MachineFunctionInfo.h"
19 #include "X86RegisterInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/Instructions.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/Support/CFG.h"
25 #include "llvm/Type.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/SelectionDAGISel.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 #include "llvm/ADT/SmallPtrSet.h"
39 #include "llvm/ADT/Statistic.h"
42 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
44 //===----------------------------------------------------------------------===//
45 // Pattern Matcher Implementation
46 //===----------------------------------------------------------------------===//
49 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
50 /// SDValue's instead of register numbers for the leaves of the matched
52 struct X86ISelAddressMode {
58 struct { // This is really a union, discriminated by BaseType!
67 const GlobalValue *GV;
69 const BlockAddress *BlockAddr;
72 unsigned Align; // CP alignment.
73 unsigned char SymbolFlags; // X86II::MO_*
76 : BaseType(RegBase), Scale(1), IndexReg(), Disp(0),
77 Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0),
78 SymbolFlags(X86II::MO_NO_FLAG) {
81 bool hasSymbolicDisplacement() const {
82 return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0;
85 bool hasBaseOrIndexReg() const {
86 return IndexReg.getNode() != 0 || Base.Reg.getNode() != 0;
89 /// isRIPRelative - Return true if this addressing mode is already RIP
91 bool isRIPRelative() const {
92 if (BaseType != RegBase) return false;
93 if (RegisterSDNode *RegNode =
94 dyn_cast_or_null<RegisterSDNode>(Base.Reg.getNode()))
95 return RegNode->getReg() == X86::RIP;
99 void setBaseReg(SDValue Reg) {
105 dbgs() << "X86ISelAddressMode " << this << '\n';
106 dbgs() << "Base.Reg ";
107 if (Base.Reg.getNode() != 0)
108 Base.Reg.getNode()->dump();
111 dbgs() << " Base.FrameIndex " << Base.FrameIndex << '\n'
112 << " Scale" << Scale << '\n'
114 if (IndexReg.getNode() != 0)
115 IndexReg.getNode()->dump();
118 dbgs() << " Disp " << Disp << '\n'
135 dbgs() << " JT" << JT << " Align" << Align << '\n';
141 class X86ISelListener : public SelectionDAG::DAGUpdateListener {
142 SmallSet<SDNode*, 4> Deletes;
144 explicit X86ISelListener() {}
145 virtual void NodeDeleted(SDNode *N, SDNode *E) {
148 virtual void NodeUpdated(SDNode *N) {
151 bool IsDeleted(SDNode *N) {
152 return Deletes.count(N);
156 //===--------------------------------------------------------------------===//
157 /// ISel - X86 specific code to select X86 machine instructions for
158 /// SelectionDAG operations.
160 class X86DAGToDAGISel : public SelectionDAGISel {
161 /// X86Lowering - This object fully describes how to lower LLVM code to an
162 /// X86-specific SelectionDAG.
163 const X86TargetLowering &X86Lowering;
165 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
166 /// make the right decision when generating code for different targets.
167 const X86Subtarget *Subtarget;
169 /// OptForSize - If true, selector should try to optimize for code size
170 /// instead of performance.
174 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
175 : SelectionDAGISel(tm, OptLevel),
176 X86Lowering(*tm.getTargetLowering()),
177 Subtarget(&tm.getSubtarget<X86Subtarget>()),
180 virtual const char *getPassName() const {
181 return "X86 DAG->DAG Instruction Selection";
184 virtual void EmitFunctionEntryCode();
186 virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
188 virtual void PreprocessISelDAG();
190 // Include the pieces autogenerated from the target description.
191 #include "X86GenDAGISel.inc"
194 SDNode *Select(SDNode *N);
195 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
196 SDNode *SelectAtomicLoadAdd(SDNode *Node, EVT NVT);
198 bool MatchSegmentBaseAddress(SDValue N, X86ISelAddressMode &AM);
199 bool MatchLoad(SDValue N, X86ISelAddressMode &AM);
200 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
201 bool MatchAddress(SDValue N, X86ISelAddressMode &AM);
202 bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
203 X86ISelListener &DeadNodes,
205 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
206 bool SelectAddr(SDNode *Op, SDValue N, SDValue &Base,
207 SDValue &Scale, SDValue &Index, SDValue &Disp,
209 bool SelectLEAAddr(SDNode *Op, SDValue N, SDValue &Base,
210 SDValue &Scale, SDValue &Index, SDValue &Disp);
211 bool SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base,
212 SDValue &Scale, SDValue &Index, SDValue &Disp);
213 bool SelectScalarSSELoad(SDNode *Root, SDValue N,
214 SDValue &Base, SDValue &Scale,
215 SDValue &Index, SDValue &Disp,
217 SDValue &NodeWithChain);
219 bool TryFoldLoad(SDNode *P, SDValue N,
220 SDValue &Base, SDValue &Scale,
221 SDValue &Index, SDValue &Disp,
224 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
225 /// inline asm expressions.
226 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
228 std::vector<SDValue> &OutOps);
230 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
232 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
233 SDValue &Scale, SDValue &Index,
234 SDValue &Disp, SDValue &Segment) {
235 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
236 CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
238 Scale = getI8Imm(AM.Scale);
240 // These are 32-bit even in 64-bit mode since RIP relative offset
243 Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp,
246 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
247 AM.Align, AM.Disp, AM.SymbolFlags);
249 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
250 else if (AM.JT != -1)
251 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
252 else if (AM.BlockAddr)
253 Disp = CurDAG->getBlockAddress(AM.BlockAddr, MVT::i32,
254 true, AM.SymbolFlags);
256 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
258 if (AM.Segment.getNode())
259 Segment = AM.Segment;
261 Segment = CurDAG->getRegister(0, MVT::i32);
264 /// getI8Imm - Return a target constant with the specified value, of type
266 inline SDValue getI8Imm(unsigned Imm) {
267 return CurDAG->getTargetConstant(Imm, MVT::i8);
270 /// getI16Imm - Return a target constant with the specified value, of type
272 inline SDValue getI16Imm(unsigned Imm) {
273 return CurDAG->getTargetConstant(Imm, MVT::i16);
276 /// getI32Imm - Return a target constant with the specified value, of type
278 inline SDValue getI32Imm(unsigned Imm) {
279 return CurDAG->getTargetConstant(Imm, MVT::i32);
282 /// getGlobalBaseReg - Return an SDNode that returns the value of
283 /// the global base register. Output instructions required to
284 /// initialize the global base register, if necessary.
286 SDNode *getGlobalBaseReg();
288 /// getTargetMachine - Return a reference to the TargetMachine, casted
289 /// to the target-specific type.
290 const X86TargetMachine &getTargetMachine() {
291 return static_cast<const X86TargetMachine &>(TM);
294 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
295 /// to the target-specific type.
296 const X86InstrInfo *getInstrInfo() {
297 return getTargetMachine().getInstrInfo();
304 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
305 if (OptLevel == CodeGenOpt::None) return false;
310 if (N.getOpcode() != ISD::LOAD)
313 // If N is a load, do additional profitability checks.
315 switch (U->getOpcode()) {
328 SDValue Op1 = U->getOperand(1);
330 // If the other operand is a 8-bit immediate we should fold the immediate
331 // instead. This reduces code size.
333 // movl 4(%esp), %eax
337 // addl 4(%esp), %eax
338 // The former is 2 bytes shorter. In case where the increment is 1, then
339 // the saving can be 4 bytes (by using incl %eax).
340 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
341 if (Imm->getAPIntValue().isSignedIntN(8))
344 // If the other operand is a TLS address, we should fold it instead.
347 // leal i@NTPOFF(%eax), %eax
349 // movl $i@NTPOFF, %eax
351 // if the block also has an access to a second TLS address this will save
353 // FIXME: This is probably also true for non TLS addresses.
354 if (Op1.getOpcode() == X86ISD::Wrapper) {
355 SDValue Val = Op1.getOperand(0);
356 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
366 /// MoveBelowCallOrigChain - Replace the original chain operand of the call with
367 /// load's chain operand and move load below the call's chain operand.
368 static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
369 SDValue Call, SDValue OrigChain) {
370 SmallVector<SDValue, 8> Ops;
371 SDValue Chain = OrigChain.getOperand(0);
372 if (Chain.getNode() == Load.getNode())
373 Ops.push_back(Load.getOperand(0));
375 assert(Chain.getOpcode() == ISD::TokenFactor &&
376 "Unexpected chain operand");
377 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
378 if (Chain.getOperand(i).getNode() == Load.getNode())
379 Ops.push_back(Load.getOperand(0));
381 Ops.push_back(Chain.getOperand(i));
383 CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(),
384 MVT::Other, &Ops[0], Ops.size());
386 Ops.push_back(NewChain);
388 for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i)
389 Ops.push_back(OrigChain.getOperand(i));
390 CurDAG->UpdateNodeOperands(OrigChain, &Ops[0], Ops.size());
391 CurDAG->UpdateNodeOperands(Load, Call.getOperand(0),
392 Load.getOperand(1), Load.getOperand(2));
394 Ops.push_back(SDValue(Load.getNode(), 1));
395 for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
396 Ops.push_back(Call.getOperand(i));
397 CurDAG->UpdateNodeOperands(Call, &Ops[0], Ops.size());
400 /// isCalleeLoad - Return true if call address is a load and it can be
401 /// moved below CALLSEQ_START and the chains leading up to the call.
402 /// Return the CALLSEQ_START by reference as a second output.
403 /// In the case of a tail call, there isn't a callseq node between the call
404 /// chain and the load.
405 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
406 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
408 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
411 LD->getAddressingMode() != ISD::UNINDEXED ||
412 LD->getExtensionType() != ISD::NON_EXTLOAD)
415 // Now let's find the callseq_start.
416 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
417 if (!Chain.hasOneUse())
419 Chain = Chain.getOperand(0);
422 if (!Chain.getNumOperands())
424 if (Chain.getOperand(0).getNode() == Callee.getNode())
426 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
427 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
428 Callee.getValue(1).hasOneUse())
433 void X86DAGToDAGISel::PreprocessISelDAG() {
434 // OptForSize is used in pattern predicates that isel is matching.
435 OptForSize = MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize);
437 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
438 E = CurDAG->allnodes_end(); I != E; ) {
439 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
441 if (OptLevel != CodeGenOpt::None &&
442 (N->getOpcode() == X86ISD::CALL ||
443 N->getOpcode() == X86ISD::TC_RETURN)) {
444 /// Also try moving call address load from outside callseq_start to just
445 /// before the call to allow it to be folded.
463 bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
464 SDValue Chain = N->getOperand(0);
465 SDValue Load = N->getOperand(1);
466 if (!isCalleeLoad(Load, Chain, HasCallSeq))
468 MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
473 // Lower fpround and fpextend nodes that target the FP stack to be store and
474 // load to the stack. This is a gross hack. We would like to simply mark
475 // these as being illegal, but when we do that, legalize produces these when
476 // it expands calls, then expands these in the same legalize pass. We would
477 // like dag combine to be able to hack on these between the call expansion
478 // and the node legalization. As such this pass basically does "really
479 // late" legalization of these inline with the X86 isel pass.
480 // FIXME: This should only happen when not compiled with -O0.
481 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
484 // If the source and destination are SSE registers, then this is a legal
485 // conversion that should not be lowered.
486 EVT SrcVT = N->getOperand(0).getValueType();
487 EVT DstVT = N->getValueType(0);
488 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
489 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
490 if (SrcIsSSE && DstIsSSE)
493 if (!SrcIsSSE && !DstIsSSE) {
494 // If this is an FPStack extension, it is a noop.
495 if (N->getOpcode() == ISD::FP_EXTEND)
497 // If this is a value-preserving FPStack truncation, it is a noop.
498 if (N->getConstantOperandVal(1))
502 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
503 // FPStack has extload and truncstore. SSE can fold direct loads into other
504 // operations. Based on this, decide what we want to do.
506 if (N->getOpcode() == ISD::FP_ROUND)
507 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
509 MemVT = SrcIsSSE ? SrcVT : DstVT;
511 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
512 DebugLoc dl = N->getDebugLoc();
514 // FIXME: optimize the case where the src/dest is a load or store?
515 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
517 MemTmp, NULL, 0, MemVT,
519 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
520 NULL, 0, MemVT, false, false, 0);
522 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
523 // extload we created. This will cause general havok on the dag because
524 // anything below the conversion could be folded into other existing nodes.
525 // To avoid invalidating 'I', back it up to the convert node.
527 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
529 // Now that we did that, the node is dead. Increment the iterator to the
530 // next node to process, then delete N.
532 CurDAG->DeleteNode(N);
537 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
538 /// the main function.
539 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
540 MachineFrameInfo *MFI) {
541 const TargetInstrInfo *TII = TM.getInstrInfo();
542 if (Subtarget->isTargetCygMing())
543 BuildMI(BB, DebugLoc(),
544 TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
547 void X86DAGToDAGISel::EmitFunctionEntryCode() {
548 // If this is main, emit special code for main.
549 if (const Function *Fn = MF->getFunction())
550 if (Fn->hasExternalLinkage() && Fn->getName() == "main")
551 EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo());
555 bool X86DAGToDAGISel::MatchSegmentBaseAddress(SDValue N,
556 X86ISelAddressMode &AM) {
557 assert(N.getOpcode() == X86ISD::SegmentBaseAddress);
558 SDValue Segment = N.getOperand(0);
560 if (AM.Segment.getNode() == 0) {
561 AM.Segment = Segment;
568 bool X86DAGToDAGISel::MatchLoad(SDValue N, X86ISelAddressMode &AM) {
569 // This optimization is valid because the GNU TLS model defines that
570 // gs:0 (or fs:0 on X86-64) contains its own address.
571 // For more information see http://people.redhat.com/drepper/tls.pdf
573 SDValue Address = N.getOperand(1);
574 if (Address.getOpcode() == X86ISD::SegmentBaseAddress &&
575 !MatchSegmentBaseAddress (Address, AM))
581 /// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
582 /// into an addressing mode. These wrap things that will resolve down into a
583 /// symbol reference. If no match is possible, this returns true, otherwise it
585 bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
586 // If the addressing mode already has a symbol as the displacement, we can
587 // never match another symbol.
588 if (AM.hasSymbolicDisplacement())
591 SDValue N0 = N.getOperand(0);
592 CodeModel::Model M = TM.getCodeModel();
594 // Handle X86-64 rip-relative addresses. We check this before checking direct
595 // folding because RIP is preferable to non-RIP accesses.
596 if (Subtarget->is64Bit() &&
597 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
598 // they cannot be folded into immediate fields.
599 // FIXME: This can be improved for kernel and other models?
600 (M == CodeModel::Small || M == CodeModel::Kernel) &&
601 // Base and index reg must be 0 in order to use %rip as base and lowering
603 !AM.hasBaseOrIndexReg() && N.getOpcode() == X86ISD::WrapperRIP) {
604 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
605 int64_t Offset = AM.Disp + G->getOffset();
606 if (!X86::isOffsetSuitableForCodeModel(Offset, M)) return true;
607 AM.GV = G->getGlobal();
609 AM.SymbolFlags = G->getTargetFlags();
610 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
611 int64_t Offset = AM.Disp + CP->getOffset();
612 if (!X86::isOffsetSuitableForCodeModel(Offset, M)) return true;
613 AM.CP = CP->getConstVal();
614 AM.Align = CP->getAlignment();
616 AM.SymbolFlags = CP->getTargetFlags();
617 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
618 AM.ES = S->getSymbol();
619 AM.SymbolFlags = S->getTargetFlags();
620 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
621 AM.JT = J->getIndex();
622 AM.SymbolFlags = J->getTargetFlags();
624 AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
625 AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
628 if (N.getOpcode() == X86ISD::WrapperRIP)
629 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
633 // Handle the case when globals fit in our immediate field: This is true for
634 // X86-32 always and X86-64 when in -static -mcmodel=small mode. In 64-bit
635 // mode, this results in a non-RIP-relative computation.
636 if (!Subtarget->is64Bit() ||
637 ((M == CodeModel::Small || M == CodeModel::Kernel) &&
638 TM.getRelocationModel() == Reloc::Static)) {
639 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
640 AM.GV = G->getGlobal();
641 AM.Disp += G->getOffset();
642 AM.SymbolFlags = G->getTargetFlags();
643 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
644 AM.CP = CP->getConstVal();
645 AM.Align = CP->getAlignment();
646 AM.Disp += CP->getOffset();
647 AM.SymbolFlags = CP->getTargetFlags();
648 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
649 AM.ES = S->getSymbol();
650 AM.SymbolFlags = S->getTargetFlags();
651 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
652 AM.JT = J->getIndex();
653 AM.SymbolFlags = J->getTargetFlags();
655 AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
656 AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
664 /// MatchAddress - Add the specified node to the specified addressing mode,
665 /// returning true if it cannot be done. This just pattern matches for the
667 bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
668 X86ISelListener DeadNodes;
669 if (MatchAddressRecursively(N, AM, DeadNodes, 0))
672 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
673 // a smaller encoding and avoids a scaled-index.
675 AM.BaseType == X86ISelAddressMode::RegBase &&
676 AM.Base.Reg.getNode() == 0) {
677 AM.Base.Reg = AM.IndexReg;
681 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
682 // because it has a smaller encoding.
683 // TODO: Which other code models can use this?
684 if (TM.getCodeModel() == CodeModel::Small &&
685 Subtarget->is64Bit() &&
687 AM.BaseType == X86ISelAddressMode::RegBase &&
688 AM.Base.Reg.getNode() == 0 &&
689 AM.IndexReg.getNode() == 0 &&
690 AM.SymbolFlags == X86II::MO_NO_FLAG &&
691 AM.hasSymbolicDisplacement())
692 AM.Base.Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
697 /// isLogicallyAddWithConstant - Return true if this node is semantically an
698 /// add of a value with a constantint.
699 static bool isLogicallyAddWithConstant(SDValue V, SelectionDAG *CurDAG) {
700 // Check for (add x, Cst)
701 if (V->getOpcode() == ISD::ADD)
702 return isa<ConstantSDNode>(V->getOperand(1));
704 // Check for (or x, Cst), where Cst & x == 0.
705 if (V->getOpcode() != ISD::OR ||
706 !isa<ConstantSDNode>(V->getOperand(1)))
709 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
710 ConstantSDNode *CN = cast<ConstantSDNode>(V->getOperand(1));
712 // Check to see if the LHS & C is zero.
713 return CurDAG->MaskedValueIsZero(V->getOperand(0), CN->getAPIntValue());
716 bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
717 X86ISelListener &DeadNodes,
719 bool is64Bit = Subtarget->is64Bit();
720 DebugLoc dl = N.getDebugLoc();
722 dbgs() << "MatchAddress: ";
727 return MatchAddressBase(N, AM);
729 CodeModel::Model M = TM.getCodeModel();
731 // If this is already a %rip relative address, we can only merge immediates
732 // into it. Instead of handling this in every case, we handle it here.
733 // RIP relative addressing: %rip + 32-bit displacement!
734 if (AM.isRIPRelative()) {
735 // FIXME: JumpTable and ExternalSymbol address currently don't like
736 // displacements. It isn't very important, but this should be fixed for
738 if (!AM.ES && AM.JT != -1) return true;
740 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) {
741 int64_t Val = AM.Disp + Cst->getSExtValue();
742 if (X86::isOffsetSuitableForCodeModel(Val, M,
743 AM.hasSymbolicDisplacement())) {
751 switch (N.getOpcode()) {
753 case ISD::Constant: {
754 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
756 X86::isOffsetSuitableForCodeModel(AM.Disp + Val, M,
757 AM.hasSymbolicDisplacement())) {
764 case X86ISD::SegmentBaseAddress:
765 if (!MatchSegmentBaseAddress(N, AM))
769 case X86ISD::Wrapper:
770 case X86ISD::WrapperRIP:
771 if (!MatchWrapper(N, AM))
776 if (!MatchLoad(N, AM))
780 case ISD::FrameIndex:
781 if (AM.BaseType == X86ISelAddressMode::RegBase
782 && AM.Base.Reg.getNode() == 0) {
783 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
784 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
790 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1)
794 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
795 unsigned Val = CN->getZExtValue();
796 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
797 // that the base operand remains free for further matching. If
798 // the base doesn't end up getting used, a post-processing step
799 // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
800 if (Val == 1 || Val == 2 || Val == 3) {
802 SDValue ShVal = N.getNode()->getOperand(0);
804 // Okay, we know that we have a scale by now. However, if the scaled
805 // value is an add of something and a constant, we can fold the
806 // constant into the disp field here.
807 if (isLogicallyAddWithConstant(ShVal, CurDAG)) {
808 AM.IndexReg = ShVal.getNode()->getOperand(0);
809 ConstantSDNode *AddVal =
810 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
811 uint64_t Disp = AM.Disp + (AddVal->getSExtValue() << Val);
813 X86::isOffsetSuitableForCodeModel(Disp, M,
814 AM.hasSymbolicDisplacement()))
828 // A mul_lohi where we need the low part can be folded as a plain multiply.
829 if (N.getResNo() != 0) break;
832 case X86ISD::MUL_IMM:
833 // X*[3,5,9] -> X+X*[2,4,8]
834 if (AM.BaseType == X86ISelAddressMode::RegBase &&
835 AM.Base.Reg.getNode() == 0 &&
836 AM.IndexReg.getNode() == 0) {
838 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
839 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
840 CN->getZExtValue() == 9) {
841 AM.Scale = unsigned(CN->getZExtValue())-1;
843 SDValue MulVal = N.getNode()->getOperand(0);
846 // Okay, we know that we have a scale by now. However, if the scaled
847 // value is an add of something and a constant, we can fold the
848 // constant into the disp field here.
849 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
850 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
851 Reg = MulVal.getNode()->getOperand(0);
852 ConstantSDNode *AddVal =
853 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
854 uint64_t Disp = AM.Disp + AddVal->getSExtValue() *
857 X86::isOffsetSuitableForCodeModel(Disp, M,
858 AM.hasSymbolicDisplacement()))
861 Reg = N.getNode()->getOperand(0);
863 Reg = N.getNode()->getOperand(0);
866 AM.IndexReg = AM.Base.Reg = Reg;
873 // Given A-B, if A can be completely folded into the address and
874 // the index field with the index field unused, use -B as the index.
875 // This is a win if a has multiple parts that can be folded into
876 // the address. Also, this saves a mov if the base register has
877 // other uses, since it avoids a two-address sub instruction, however
878 // it costs an additional mov if the index register has other uses.
880 // Test if the LHS of the sub can be folded.
881 X86ISelAddressMode Backup = AM;
882 if (MatchAddressRecursively(N.getNode()->getOperand(0), AM,
883 DeadNodes, Depth+1) ||
884 // If it is successful but the recursive update causes N to be deleted,
885 // then it's not safe to continue.
886 DeadNodes.IsDeleted(N.getNode())) {
890 // Test if the index field is free for use.
891 if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
897 SDValue RHS = N.getNode()->getOperand(1);
898 // If the RHS involves a register with multiple uses, this
899 // transformation incurs an extra mov, due to the neg instruction
900 // clobbering its operand.
901 if (!RHS.getNode()->hasOneUse() ||
902 RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
903 RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
904 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
905 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
906 RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
908 // If the base is a register with multiple uses, this
909 // transformation may save a mov.
910 if ((AM.BaseType == X86ISelAddressMode::RegBase &&
911 AM.Base.Reg.getNode() &&
912 !AM.Base.Reg.getNode()->hasOneUse()) ||
913 AM.BaseType == X86ISelAddressMode::FrameIndexBase)
915 // If the folded LHS was interesting, this transformation saves
916 // address arithmetic.
917 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
918 ((AM.Disp != 0) && (Backup.Disp == 0)) +
919 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
921 // If it doesn't look like it may be an overall win, don't do it.
927 // Ok, the transformation is legal and appears profitable. Go for it.
928 SDValue Zero = CurDAG->getConstant(0, N.getValueType());
929 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
933 // Insert the new nodes into the topological ordering.
934 if (Zero.getNode()->getNodeId() == -1 ||
935 Zero.getNode()->getNodeId() > N.getNode()->getNodeId()) {
936 CurDAG->RepositionNode(N.getNode(), Zero.getNode());
937 Zero.getNode()->setNodeId(N.getNode()->getNodeId());
939 if (Neg.getNode()->getNodeId() == -1 ||
940 Neg.getNode()->getNodeId() > N.getNode()->getNodeId()) {
941 CurDAG->RepositionNode(N.getNode(), Neg.getNode());
942 Neg.getNode()->setNodeId(N.getNode()->getNodeId());
948 X86ISelAddressMode Backup = AM;
949 if (!MatchAddressRecursively(N.getNode()->getOperand(0), AM,
950 DeadNodes, Depth+1)) {
951 if (DeadNodes.IsDeleted(N.getNode()))
952 // If it is successful but the recursive update causes N to be deleted,
953 // then it's not safe to continue.
955 if (!MatchAddressRecursively(N.getNode()->getOperand(1), AM,
957 // If it is successful but the recursive update causes N to be deleted,
958 // then it's not safe to continue.
959 return DeadNodes.IsDeleted(N.getNode());
962 // Try again after commuting the operands.
964 if (!MatchAddressRecursively(N.getNode()->getOperand(1), AM,
965 DeadNodes, Depth+1)) {
966 if (DeadNodes.IsDeleted(N.getNode()))
967 // If it is successful but the recursive update causes N to be deleted,
968 // then it's not safe to continue.
970 if (!MatchAddressRecursively(N.getNode()->getOperand(0), AM,
972 // If it is successful but the recursive update causes N to be deleted,
973 // then it's not safe to continue.
974 return DeadNodes.IsDeleted(N.getNode());
978 // If we couldn't fold both operands into the address at the same time,
979 // see if we can just put each operand into a register and fold at least
981 if (AM.BaseType == X86ISelAddressMode::RegBase &&
982 !AM.Base.Reg.getNode() &&
983 !AM.IndexReg.getNode()) {
984 AM.Base.Reg = N.getNode()->getOperand(0);
985 AM.IndexReg = N.getNode()->getOperand(1);
993 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
994 if (isLogicallyAddWithConstant(N, CurDAG)) {
995 X86ISelAddressMode Backup = AM;
996 ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1));
997 uint64_t Offset = CN->getSExtValue();
999 // Start with the LHS as an addr mode.
1000 if (!MatchAddressRecursively(N.getOperand(0), AM, DeadNodes, Depth+1) &&
1001 // Address could not have picked a GV address for the displacement.
1003 // On x86-64, the resultant disp must fit in 32-bits.
1005 X86::isOffsetSuitableForCodeModel(AM.Disp + Offset, M,
1006 AM.hasSymbolicDisplacement()))) {
1015 // Perform some heroic transforms on an and of a constant-count shift
1016 // with a constant to enable use of the scaled offset field.
1018 SDValue Shift = N.getOperand(0);
1019 if (Shift.getNumOperands() != 2) break;
1021 // Scale must not be used already.
1022 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
1024 SDValue X = Shift.getOperand(0);
1025 ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
1026 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
1027 if (!C1 || !C2) break;
1029 // Handle "(X >> (8-C1)) & C2" as "(X >> 8) & 0xff)" if safe. This
1030 // allows us to convert the shift and and into an h-register extract and
1032 if (Shift.getOpcode() == ISD::SRL && Shift.hasOneUse()) {
1033 unsigned ScaleLog = 8 - C1->getZExtValue();
1034 if (ScaleLog > 0 && ScaleLog < 4 &&
1035 C2->getZExtValue() == (UINT64_C(0xff) << ScaleLog)) {
1036 SDValue Eight = CurDAG->getConstant(8, MVT::i8);
1037 SDValue Mask = CurDAG->getConstant(0xff, N.getValueType());
1038 SDValue Srl = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
1040 SDValue And = CurDAG->getNode(ISD::AND, dl, N.getValueType(),
1042 SDValue ShlCount = CurDAG->getConstant(ScaleLog, MVT::i8);
1043 SDValue Shl = CurDAG->getNode(ISD::SHL, dl, N.getValueType(),
1046 // Insert the new nodes into the topological ordering.
1047 if (Eight.getNode()->getNodeId() == -1 ||
1048 Eight.getNode()->getNodeId() > X.getNode()->getNodeId()) {
1049 CurDAG->RepositionNode(X.getNode(), Eight.getNode());
1050 Eight.getNode()->setNodeId(X.getNode()->getNodeId());
1052 if (Mask.getNode()->getNodeId() == -1 ||
1053 Mask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
1054 CurDAG->RepositionNode(X.getNode(), Mask.getNode());
1055 Mask.getNode()->setNodeId(X.getNode()->getNodeId());
1057 if (Srl.getNode()->getNodeId() == -1 ||
1058 Srl.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
1059 CurDAG->RepositionNode(Shift.getNode(), Srl.getNode());
1060 Srl.getNode()->setNodeId(Shift.getNode()->getNodeId());
1062 if (And.getNode()->getNodeId() == -1 ||
1063 And.getNode()->getNodeId() > N.getNode()->getNodeId()) {
1064 CurDAG->RepositionNode(N.getNode(), And.getNode());
1065 And.getNode()->setNodeId(N.getNode()->getNodeId());
1067 if (ShlCount.getNode()->getNodeId() == -1 ||
1068 ShlCount.getNode()->getNodeId() > X.getNode()->getNodeId()) {
1069 CurDAG->RepositionNode(X.getNode(), ShlCount.getNode());
1070 ShlCount.getNode()->setNodeId(N.getNode()->getNodeId());
1072 if (Shl.getNode()->getNodeId() == -1 ||
1073 Shl.getNode()->getNodeId() > N.getNode()->getNodeId()) {
1074 CurDAG->RepositionNode(N.getNode(), Shl.getNode());
1075 Shl.getNode()->setNodeId(N.getNode()->getNodeId());
1077 CurDAG->ReplaceAllUsesWith(N, Shl, &DeadNodes);
1079 AM.Scale = (1 << ScaleLog);
1084 // Handle "(X << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
1085 // allows us to fold the shift into this addressing mode.
1086 if (Shift.getOpcode() != ISD::SHL) break;
1088 // Not likely to be profitable if either the AND or SHIFT node has more
1089 // than one use (unless all uses are for address computation). Besides,
1090 // isel mechanism requires their node ids to be reused.
1091 if (!N.hasOneUse() || !Shift.hasOneUse())
1094 // Verify that the shift amount is something we can fold.
1095 unsigned ShiftCst = C1->getZExtValue();
1096 if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
1099 // Get the new AND mask, this folds to a constant.
1100 SDValue NewANDMask = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
1101 SDValue(C2, 0), SDValue(C1, 0));
1102 SDValue NewAND = CurDAG->getNode(ISD::AND, dl, N.getValueType(), X,
1104 SDValue NewSHIFT = CurDAG->getNode(ISD::SHL, dl, N.getValueType(),
1105 NewAND, SDValue(C1, 0));
1107 // Insert the new nodes into the topological ordering.
1108 if (C1->getNodeId() > X.getNode()->getNodeId()) {
1109 CurDAG->RepositionNode(X.getNode(), C1);
1110 C1->setNodeId(X.getNode()->getNodeId());
1112 if (NewANDMask.getNode()->getNodeId() == -1 ||
1113 NewANDMask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
1114 CurDAG->RepositionNode(X.getNode(), NewANDMask.getNode());
1115 NewANDMask.getNode()->setNodeId(X.getNode()->getNodeId());
1117 if (NewAND.getNode()->getNodeId() == -1 ||
1118 NewAND.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
1119 CurDAG->RepositionNode(Shift.getNode(), NewAND.getNode());
1120 NewAND.getNode()->setNodeId(Shift.getNode()->getNodeId());
1122 if (NewSHIFT.getNode()->getNodeId() == -1 ||
1123 NewSHIFT.getNode()->getNodeId() > N.getNode()->getNodeId()) {
1124 CurDAG->RepositionNode(N.getNode(), NewSHIFT.getNode());
1125 NewSHIFT.getNode()->setNodeId(N.getNode()->getNodeId());
1128 CurDAG->ReplaceAllUsesWith(N, NewSHIFT, &DeadNodes);
1130 AM.Scale = 1 << ShiftCst;
1131 AM.IndexReg = NewAND;
1136 return MatchAddressBase(N, AM);
1139 /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
1140 /// specified addressing mode without any further recursion.
1141 bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
1142 // Is the base register already occupied?
1143 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.getNode()) {
1144 // If so, check to see if the scale index register is set.
1145 if (AM.IndexReg.getNode() == 0) {
1151 // Otherwise, we cannot select it.
1155 // Default, generate it as a register.
1156 AM.BaseType = X86ISelAddressMode::RegBase;
1161 /// SelectAddr - returns true if it is able pattern match an addressing mode.
1162 /// It returns the operands which make up the maximal addressing mode it can
1163 /// match by reference.
1164 bool X86DAGToDAGISel::SelectAddr(SDNode *Op, SDValue N, SDValue &Base,
1165 SDValue &Scale, SDValue &Index,
1166 SDValue &Disp, SDValue &Segment) {
1167 X86ISelAddressMode AM;
1168 if (MatchAddress(N, AM))
1171 EVT VT = N.getValueType();
1172 if (AM.BaseType == X86ISelAddressMode::RegBase) {
1173 if (!AM.Base.Reg.getNode())
1174 AM.Base.Reg = CurDAG->getRegister(0, VT);
1177 if (!AM.IndexReg.getNode())
1178 AM.IndexReg = CurDAG->getRegister(0, VT);
1180 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1184 /// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
1185 /// match a load whose top elements are either undef or zeros. The load flavor
1186 /// is derived from the type of N, which is either v4f32 or v2f64.
1189 /// PatternChainNode: this is the matched node that has a chain input and
1191 bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
1192 SDValue N, SDValue &Base,
1193 SDValue &Scale, SDValue &Index,
1194 SDValue &Disp, SDValue &Segment,
1195 SDValue &PatternNodeWithChain) {
1196 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
1197 PatternNodeWithChain = N.getOperand(0);
1198 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1199 PatternNodeWithChain.hasOneUse() &&
1200 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1201 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1202 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1203 if (!SelectAddr(Root, LD->getBasePtr(), Base, Scale, Index, Disp,Segment))
1209 // Also handle the case where we explicitly require zeros in the top
1210 // elements. This is a vector shuffle from the zero vector.
1211 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1212 // Check to see if the top elements are all zeros (or bitcast of zeros).
1213 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1214 N.getOperand(0).getNode()->hasOneUse() &&
1215 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
1216 N.getOperand(0).getOperand(0).hasOneUse() &&
1217 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1218 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1219 // Okay, this is a zero extending load. Fold it.
1220 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
1221 if (!SelectAddr(Root, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1223 PatternNodeWithChain = SDValue(LD, 0);
1230 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
1231 /// mode it matches can be cost effectively emitted as an LEA instruction.
1232 bool X86DAGToDAGISel::SelectLEAAddr(SDNode *Op, SDValue N,
1233 SDValue &Base, SDValue &Scale,
1234 SDValue &Index, SDValue &Disp) {
1235 X86ISelAddressMode AM;
1237 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
1239 SDValue Copy = AM.Segment;
1240 SDValue T = CurDAG->getRegister(0, MVT::i32);
1242 if (MatchAddress(N, AM))
1244 assert (T == AM.Segment);
1247 EVT VT = N.getValueType();
1248 unsigned Complexity = 0;
1249 if (AM.BaseType == X86ISelAddressMode::RegBase)
1250 if (AM.Base.Reg.getNode())
1253 AM.Base.Reg = CurDAG->getRegister(0, VT);
1254 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1257 if (AM.IndexReg.getNode())
1260 AM.IndexReg = CurDAG->getRegister(0, VT);
1262 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1267 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1268 // to a LEA. This is determined with some expermentation but is by no means
1269 // optimal (especially for code size consideration). LEA is nice because of
1270 // its three-address nature. Tweak the cost function again when we can run
1271 // convertToThreeAddress() at register allocation time.
1272 if (AM.hasSymbolicDisplacement()) {
1273 // For X86-64, we should always use lea to materialize RIP relative
1275 if (Subtarget->is64Bit())
1281 if (AM.Disp && (AM.Base.Reg.getNode() || AM.IndexReg.getNode()))
1284 // If it isn't worth using an LEA, reject it.
1285 if (Complexity <= 2)
1289 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1293 /// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
1294 bool X86DAGToDAGISel::SelectTLSADDRAddr(SDNode *Op, SDValue N, SDValue &Base,
1295 SDValue &Scale, SDValue &Index,
1297 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
1298 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
1300 X86ISelAddressMode AM;
1301 AM.GV = GA->getGlobal();
1302 AM.Disp += GA->getOffset();
1303 AM.Base.Reg = CurDAG->getRegister(0, N.getValueType());
1304 AM.SymbolFlags = GA->getTargetFlags();
1306 if (N.getValueType() == MVT::i32) {
1308 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
1310 AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
1314 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1319 bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
1320 SDValue &Base, SDValue &Scale,
1321 SDValue &Index, SDValue &Disp,
1323 if (!ISD::isNON_EXTLoad(N.getNode()) ||
1324 !IsProfitableToFold(N, P, P) ||
1325 !IsLegalToFold(N, P, P, OptLevel))
1328 return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp, Segment);
1331 /// getGlobalBaseReg - Return an SDNode that returns the value of
1332 /// the global base register. Output instructions required to
1333 /// initialize the global base register, if necessary.
1335 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1336 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
1337 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
1340 static SDNode *FindCallStartFromCall(SDNode *Node) {
1341 if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
1342 assert(Node->getOperand(0).getValueType() == MVT::Other &&
1343 "Node doesn't have a token chain argument!");
1344 return FindCallStartFromCall(Node->getOperand(0).getNode());
1347 SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
1348 SDValue Chain = Node->getOperand(0);
1349 SDValue In1 = Node->getOperand(1);
1350 SDValue In2L = Node->getOperand(2);
1351 SDValue In2H = Node->getOperand(3);
1352 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1353 if (!SelectAddr(In1.getNode(), In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1355 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1356 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1357 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain};
1358 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
1359 MVT::i32, MVT::i32, MVT::Other, Ops,
1360 array_lengthof(Ops));
1361 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
1365 SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
1366 if (Node->hasAnyUseOfValue(0))
1369 // Optimize common patterns for __sync_add_and_fetch and
1370 // __sync_sub_and_fetch where the result is not used. This allows us
1371 // to use "lock" version of add, sub, inc, dec instructions.
1372 // FIXME: Do not use special instructions but instead add the "lock"
1373 // prefix to the target node somehow. The extra information will then be
1374 // transferred to machine instruction and it denotes the prefix.
1375 SDValue Chain = Node->getOperand(0);
1376 SDValue Ptr = Node->getOperand(1);
1377 SDValue Val = Node->getOperand(2);
1378 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1379 if (!SelectAddr(Ptr.getNode(), Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1382 bool isInc = false, isDec = false, isSub = false, isCN = false;
1383 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val);
1386 int64_t CNVal = CN->getSExtValue();
1389 else if (CNVal == -1)
1391 else if (CNVal >= 0)
1392 Val = CurDAG->getTargetConstant(CNVal, NVT);
1395 Val = CurDAG->getTargetConstant(-CNVal, NVT);
1397 } else if (Val.hasOneUse() &&
1398 Val.getOpcode() == ISD::SUB &&
1399 X86::isZeroNode(Val.getOperand(0))) {
1401 Val = Val.getOperand(1);
1405 switch (NVT.getSimpleVT().SimpleTy) {
1409 Opc = X86::LOCK_INC8m;
1411 Opc = X86::LOCK_DEC8m;
1414 Opc = X86::LOCK_SUB8mi;
1416 Opc = X86::LOCK_SUB8mr;
1419 Opc = X86::LOCK_ADD8mi;
1421 Opc = X86::LOCK_ADD8mr;
1426 Opc = X86::LOCK_INC16m;
1428 Opc = X86::LOCK_DEC16m;
1431 if (Predicate_immSext8(Val.getNode()))
1432 Opc = X86::LOCK_SUB16mi8;
1434 Opc = X86::LOCK_SUB16mi;
1436 Opc = X86::LOCK_SUB16mr;
1439 if (Predicate_immSext8(Val.getNode()))
1440 Opc = X86::LOCK_ADD16mi8;
1442 Opc = X86::LOCK_ADD16mi;
1444 Opc = X86::LOCK_ADD16mr;
1449 Opc = X86::LOCK_INC32m;
1451 Opc = X86::LOCK_DEC32m;
1454 if (Predicate_immSext8(Val.getNode()))
1455 Opc = X86::LOCK_SUB32mi8;
1457 Opc = X86::LOCK_SUB32mi;
1459 Opc = X86::LOCK_SUB32mr;
1462 if (Predicate_immSext8(Val.getNode()))
1463 Opc = X86::LOCK_ADD32mi8;
1465 Opc = X86::LOCK_ADD32mi;
1467 Opc = X86::LOCK_ADD32mr;
1472 Opc = X86::LOCK_INC64m;
1474 Opc = X86::LOCK_DEC64m;
1476 Opc = X86::LOCK_SUB64mr;
1478 if (Predicate_immSext8(Val.getNode()))
1479 Opc = X86::LOCK_SUB64mi8;
1480 else if (Predicate_i64immSExt32(Val.getNode()))
1481 Opc = X86::LOCK_SUB64mi32;
1484 Opc = X86::LOCK_ADD64mr;
1486 if (Predicate_immSext8(Val.getNode()))
1487 Opc = X86::LOCK_ADD64mi8;
1488 else if (Predicate_i64immSExt32(Val.getNode()))
1489 Opc = X86::LOCK_ADD64mi32;
1495 DebugLoc dl = Node->getDebugLoc();
1496 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
1498 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1499 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1500 if (isInc || isDec) {
1501 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain };
1502 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 6), 0);
1503 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1504 SDValue RetVals[] = { Undef, Ret };
1505 return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1507 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
1508 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
1509 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1510 SDValue RetVals[] = { Undef, Ret };
1511 return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1515 /// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has
1516 /// any uses which require the SF or OF bits to be accurate.
1517 static bool HasNoSignedComparisonUses(SDNode *N) {
1518 // Examine each user of the node.
1519 for (SDNode::use_iterator UI = N->use_begin(),
1520 UE = N->use_end(); UI != UE; ++UI) {
1521 // Only examine CopyToReg uses.
1522 if (UI->getOpcode() != ISD::CopyToReg)
1524 // Only examine CopyToReg uses that copy to EFLAGS.
1525 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
1528 // Examine each user of the CopyToReg use.
1529 for (SDNode::use_iterator FlagUI = UI->use_begin(),
1530 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
1531 // Only examine the Flag result.
1532 if (FlagUI.getUse().getResNo() != 1) continue;
1533 // Anything unusual: assume conservatively.
1534 if (!FlagUI->isMachineOpcode()) return false;
1535 // Examine the opcode of the user.
1536 switch (FlagUI->getMachineOpcode()) {
1537 // These comparisons don't treat the most significant bit specially.
1538 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
1539 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
1540 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
1541 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
1542 case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4:
1543 case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4:
1544 case X86::CMOVA16rr: case X86::CMOVA16rm:
1545 case X86::CMOVA32rr: case X86::CMOVA32rm:
1546 case X86::CMOVA64rr: case X86::CMOVA64rm:
1547 case X86::CMOVAE16rr: case X86::CMOVAE16rm:
1548 case X86::CMOVAE32rr: case X86::CMOVAE32rm:
1549 case X86::CMOVAE64rr: case X86::CMOVAE64rm:
1550 case X86::CMOVB16rr: case X86::CMOVB16rm:
1551 case X86::CMOVB32rr: case X86::CMOVB32rm:
1552 case X86::CMOVB64rr: case X86::CMOVB64rm:
1553 case X86::CMOVBE16rr: case X86::CMOVBE16rm:
1554 case X86::CMOVBE32rr: case X86::CMOVBE32rm:
1555 case X86::CMOVBE64rr: case X86::CMOVBE64rm:
1556 case X86::CMOVE16rr: case X86::CMOVE16rm:
1557 case X86::CMOVE32rr: case X86::CMOVE32rm:
1558 case X86::CMOVE64rr: case X86::CMOVE64rm:
1559 case X86::CMOVNE16rr: case X86::CMOVNE16rm:
1560 case X86::CMOVNE32rr: case X86::CMOVNE32rm:
1561 case X86::CMOVNE64rr: case X86::CMOVNE64rm:
1562 case X86::CMOVNP16rr: case X86::CMOVNP16rm:
1563 case X86::CMOVNP32rr: case X86::CMOVNP32rm:
1564 case X86::CMOVNP64rr: case X86::CMOVNP64rm:
1565 case X86::CMOVP16rr: case X86::CMOVP16rm:
1566 case X86::CMOVP32rr: case X86::CMOVP32rm:
1567 case X86::CMOVP64rr: case X86::CMOVP64rm:
1569 // Anything else: assume conservatively.
1570 default: return false;
1577 SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
1578 EVT NVT = Node->getValueType(0);
1580 unsigned Opcode = Node->getOpcode();
1581 DebugLoc dl = Node->getDebugLoc();
1583 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
1585 if (Node->isMachineOpcode()) {
1586 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
1587 return NULL; // Already selected.
1592 case X86ISD::GlobalBaseReg:
1593 return getGlobalBaseReg();
1595 case X86ISD::ATOMOR64_DAG:
1596 return SelectAtomic64(Node, X86::ATOMOR6432);
1597 case X86ISD::ATOMXOR64_DAG:
1598 return SelectAtomic64(Node, X86::ATOMXOR6432);
1599 case X86ISD::ATOMADD64_DAG:
1600 return SelectAtomic64(Node, X86::ATOMADD6432);
1601 case X86ISD::ATOMSUB64_DAG:
1602 return SelectAtomic64(Node, X86::ATOMSUB6432);
1603 case X86ISD::ATOMNAND64_DAG:
1604 return SelectAtomic64(Node, X86::ATOMNAND6432);
1605 case X86ISD::ATOMAND64_DAG:
1606 return SelectAtomic64(Node, X86::ATOMAND6432);
1607 case X86ISD::ATOMSWAP64_DAG:
1608 return SelectAtomic64(Node, X86::ATOMSWAP6432);
1610 case ISD::ATOMIC_LOAD_ADD: {
1611 SDNode *RetVal = SelectAtomicLoadAdd(Node, NVT);
1617 case ISD::SMUL_LOHI:
1618 case ISD::UMUL_LOHI: {
1619 SDValue N0 = Node->getOperand(0);
1620 SDValue N1 = Node->getOperand(1);
1622 bool isSigned = Opcode == ISD::SMUL_LOHI;
1624 switch (NVT.getSimpleVT().SimpleTy) {
1625 default: llvm_unreachable("Unsupported VT!");
1626 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
1627 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
1628 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
1629 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
1632 switch (NVT.getSimpleVT().SimpleTy) {
1633 default: llvm_unreachable("Unsupported VT!");
1634 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
1635 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
1636 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
1637 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
1641 unsigned LoReg, HiReg;
1642 switch (NVT.getSimpleVT().SimpleTy) {
1643 default: llvm_unreachable("Unsupported VT!");
1644 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
1645 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
1646 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
1647 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
1650 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1651 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
1652 // Multiply is commmutative.
1654 foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
1659 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
1660 N0, SDValue()).getValue(1);
1663 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
1666 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
1667 array_lengthof(Ops));
1668 InFlag = SDValue(CNode, 1);
1669 // Update the chain.
1670 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
1673 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
1676 // Copy the low half of the result, if it is needed.
1677 if (!SDValue(Node, 0).use_empty()) {
1678 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1679 LoReg, NVT, InFlag);
1680 InFlag = Result.getValue(2);
1681 ReplaceUses(SDValue(Node, 0), Result);
1682 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
1684 // Copy the high half of the result, if it is needed.
1685 if (!SDValue(Node, 1).use_empty()) {
1687 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1688 // Prevent use of AH in a REX instruction by referencing AX instead.
1689 // Shift it down 8 bits.
1690 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1691 X86::AX, MVT::i16, InFlag);
1692 InFlag = Result.getValue(2);
1693 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
1695 CurDAG->getTargetConstant(8, MVT::i8)), 0);
1696 // Then truncate it down to i8.
1697 Result = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl,
1700 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1701 HiReg, NVT, InFlag);
1702 InFlag = Result.getValue(2);
1704 ReplaceUses(SDValue(Node, 1), Result);
1705 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
1712 case ISD::UDIVREM: {
1713 SDValue N0 = Node->getOperand(0);
1714 SDValue N1 = Node->getOperand(1);
1716 bool isSigned = Opcode == ISD::SDIVREM;
1718 switch (NVT.getSimpleVT().SimpleTy) {
1719 default: llvm_unreachable("Unsupported VT!");
1720 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
1721 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
1722 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
1723 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
1726 switch (NVT.getSimpleVT().SimpleTy) {
1727 default: llvm_unreachable("Unsupported VT!");
1728 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
1729 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
1730 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
1731 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
1735 unsigned LoReg, HiReg, ClrReg;
1736 unsigned ClrOpcode, SExtOpcode;
1737 switch (NVT.getSimpleVT().SimpleTy) {
1738 default: llvm_unreachable("Unsupported VT!");
1740 LoReg = X86::AL; ClrReg = HiReg = X86::AH;
1742 SExtOpcode = X86::CBW;
1745 LoReg = X86::AX; HiReg = X86::DX;
1746 ClrOpcode = X86::MOV16r0; ClrReg = X86::DX;
1747 SExtOpcode = X86::CWD;
1750 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
1751 ClrOpcode = X86::MOV32r0;
1752 SExtOpcode = X86::CDQ;
1755 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
1756 ClrOpcode = X86::MOV64r0;
1757 SExtOpcode = X86::CQO;
1761 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1762 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
1763 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
1766 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
1767 // Special case for div8, just use a move with zero extension to AX to
1768 // clear the upper 8 bits (AH).
1769 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
1770 if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
1771 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
1773 SDValue(CurDAG->getMachineNode(X86::MOVZX16rm8, dl, MVT::i16,
1775 array_lengthof(Ops)), 0);
1776 Chain = Move.getValue(1);
1777 ReplaceUses(N0.getValue(1), Chain);
1780 SDValue(CurDAG->getMachineNode(X86::MOVZX16rr8, dl, MVT::i16, N0),0);
1781 Chain = CurDAG->getEntryNode();
1783 Chain = CurDAG->getCopyToReg(Chain, dl, X86::AX, Move, SDValue());
1784 InFlag = Chain.getValue(1);
1787 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
1788 LoReg, N0, SDValue()).getValue(1);
1789 if (isSigned && !signBitIsZero) {
1790 // Sign extend the low part into the high part.
1792 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Flag, InFlag),0);
1794 // Zero out the high part, effectively zero extending the input.
1796 SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0);
1797 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
1798 ClrNode, InFlag).getValue(1);
1803 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
1806 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
1807 array_lengthof(Ops));
1808 InFlag = SDValue(CNode, 1);
1809 // Update the chain.
1810 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
1813 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
1816 // Copy the division (low) result, if it is needed.
1817 if (!SDValue(Node, 0).use_empty()) {
1818 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1819 LoReg, NVT, InFlag);
1820 InFlag = Result.getValue(2);
1821 ReplaceUses(SDValue(Node, 0), Result);
1822 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
1824 // Copy the remainder (high) result, if it is needed.
1825 if (!SDValue(Node, 1).use_empty()) {
1827 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1828 // Prevent use of AH in a REX instruction by referencing AX instead.
1829 // Shift it down 8 bits.
1830 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1831 X86::AX, MVT::i16, InFlag);
1832 InFlag = Result.getValue(2);
1833 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
1835 CurDAG->getTargetConstant(8, MVT::i8)),
1837 // Then truncate it down to i8.
1838 Result = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl,
1841 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
1842 HiReg, NVT, InFlag);
1843 InFlag = Result.getValue(2);
1845 ReplaceUses(SDValue(Node, 1), Result);
1846 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
1852 SDValue N0 = Node->getOperand(0);
1853 SDValue N1 = Node->getOperand(1);
1855 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
1856 // use a smaller encoding.
1857 if (N0.getNode()->getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
1858 N0.getValueType() != MVT::i8 &&
1859 X86::isZeroNode(N1)) {
1860 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1));
1863 // For example, convert "testl %eax, $8" to "testb %al, $8"
1864 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 &&
1865 (!(C->getZExtValue() & 0x80) ||
1866 HasNoSignedComparisonUses(Node))) {
1867 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8);
1868 SDValue Reg = N0.getNode()->getOperand(0);
1870 // On x86-32, only the ABCD registers have 8-bit subregisters.
1871 if (!Subtarget->is64Bit()) {
1872 TargetRegisterClass *TRC = 0;
1873 switch (N0.getValueType().getSimpleVT().SimpleTy) {
1874 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
1875 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
1876 default: llvm_unreachable("Unsupported TEST operand type!");
1878 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
1879 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
1880 Reg.getValueType(), Reg, RC), 0);
1883 // Extract the l-register.
1884 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl,
1888 return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, Subreg, Imm);
1891 // For example, "testl %eax, $2048" to "testb %ah, $8".
1892 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 &&
1893 (!(C->getZExtValue() & 0x8000) ||
1894 HasNoSignedComparisonUses(Node))) {
1895 // Shift the immediate right by 8 bits.
1896 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8,
1898 SDValue Reg = N0.getNode()->getOperand(0);
1900 // Put the value in an ABCD register.
1901 TargetRegisterClass *TRC = 0;
1902 switch (N0.getValueType().getSimpleVT().SimpleTy) {
1903 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break;
1904 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
1905 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
1906 default: llvm_unreachable("Unsupported TEST operand type!");
1908 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
1909 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
1910 Reg.getValueType(), Reg, RC), 0);
1912 // Extract the h-register.
1913 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT_HI, dl,
1916 // Emit a testb. No special NOREX tricks are needed since there's
1917 // only one GPR operand!
1918 return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32,
1919 Subreg, ShiftedImm);
1922 // For example, "testl %eax, $32776" to "testw %ax, $32776".
1923 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 &&
1924 N0.getValueType() != MVT::i16 &&
1925 (!(C->getZExtValue() & 0x8000) ||
1926 HasNoSignedComparisonUses(Node))) {
1927 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16);
1928 SDValue Reg = N0.getNode()->getOperand(0);
1930 // Extract the 16-bit subregister.
1931 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_16BIT, dl,
1935 return CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, Subreg, Imm);
1938 // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
1939 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 &&
1940 N0.getValueType() == MVT::i64 &&
1941 (!(C->getZExtValue() & 0x80000000) ||
1942 HasNoSignedComparisonUses(Node))) {
1943 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
1944 SDValue Reg = N0.getNode()->getOperand(0);
1946 // Extract the 32-bit subregister.
1947 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_32BIT, dl,
1951 return CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, Subreg, Imm);
1958 SDNode *ResNode = SelectCode(Node);
1960 DEBUG(dbgs() << "=> ";
1961 if (ResNode == NULL || ResNode == Node)
1964 ResNode->dump(CurDAG);
1970 bool X86DAGToDAGISel::
1971 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
1972 std::vector<SDValue> &OutOps) {
1973 SDValue Op0, Op1, Op2, Op3, Op4;
1974 switch (ConstraintCode) {
1975 case 'o': // offsetable ??
1976 case 'v': // not offsetable ??
1977 default: return true;
1979 if (!SelectAddr(Op.getNode(), Op, Op0, Op1, Op2, Op3, Op4))
1984 OutOps.push_back(Op0);
1985 OutOps.push_back(Op1);
1986 OutOps.push_back(Op2);
1987 OutOps.push_back(Op3);
1988 OutOps.push_back(Op4);
1992 /// createX86ISelDag - This pass converts a legalized DAG into a
1993 /// X86-specific DAG, ready for instruction scheduling.
1995 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
1996 llvm::CodeGenOpt::Level OptLevel) {
1997 return new X86DAGToDAGISel(TM, OptLevel);