1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "x86-isel"
17 #include "X86InstrBuilder.h"
18 #include "X86MachineFunctionInfo.h"
19 #include "X86RegisterInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/Instructions.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/Type.h"
25 #include "llvm/CodeGen/FunctionLoweringInfo.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/SelectionDAGISel.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include "llvm/Support/CFG.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/Statistic.h"
42 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
44 //===----------------------------------------------------------------------===//
45 // Pattern Matcher Implementation
46 //===----------------------------------------------------------------------===//
49 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
50 /// SDValue's instead of register numbers for the leaves of the matched
52 struct X86ISelAddressMode {
58 // This is really a union, discriminated by BaseType!
66 const GlobalValue *GV;
68 const BlockAddress *BlockAddr;
71 unsigned Align; // CP alignment.
72 unsigned char SymbolFlags; // X86II::MO_*
75 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
76 Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0),
77 SymbolFlags(X86II::MO_NO_FLAG) {
80 bool hasSymbolicDisplacement() const {
81 return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0;
84 bool hasBaseOrIndexReg() const {
85 return IndexReg.getNode() != 0 || Base_Reg.getNode() != 0;
88 /// isRIPRelative - Return true if this addressing mode is already RIP
90 bool isRIPRelative() const {
91 if (BaseType != RegBase) return false;
92 if (RegisterSDNode *RegNode =
93 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
94 return RegNode->getReg() == X86::RIP;
98 void setBaseReg(SDValue Reg) {
103 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
105 dbgs() << "X86ISelAddressMode " << this << '\n';
106 dbgs() << "Base_Reg ";
107 if (Base_Reg.getNode() != 0)
108 Base_Reg.getNode()->dump();
111 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'
112 << " Scale" << Scale << '\n'
114 if (IndexReg.getNode() != 0)
115 IndexReg.getNode()->dump();
118 dbgs() << " Disp " << Disp << '\n'
135 dbgs() << " JT" << JT << " Align" << Align << '\n';
142 //===--------------------------------------------------------------------===//
143 /// ISel - X86 specific code to select X86 machine instructions for
144 /// SelectionDAG operations.
146 class X86DAGToDAGISel : public SelectionDAGISel {
147 /// X86Lowering - This object fully describes how to lower LLVM code to an
148 /// X86-specific SelectionDAG.
149 const X86TargetLowering &X86Lowering;
151 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
152 /// make the right decision when generating code for different targets.
153 const X86Subtarget *Subtarget;
155 /// OptForSize - If true, selector should try to optimize for code size
156 /// instead of performance.
160 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
161 : SelectionDAGISel(tm, OptLevel),
162 X86Lowering(*tm.getTargetLowering()),
163 Subtarget(&tm.getSubtarget<X86Subtarget>()),
166 virtual const char *getPassName() const {
167 return "X86 DAG->DAG Instruction Selection";
170 virtual void EmitFunctionEntryCode();
172 virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
174 virtual void PreprocessISelDAG();
176 inline bool immSext8(SDNode *N) const {
177 return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue());
180 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
181 // sign extended field.
182 inline bool i64immSExt32(SDNode *N) const {
183 uint64_t v = cast<ConstantSDNode>(N)->getZExtValue();
184 return (int64_t)v == (int32_t)v;
187 // Include the pieces autogenerated from the target description.
188 #include "X86GenDAGISel.inc"
191 SDNode *Select(SDNode *N);
192 SDNode *SelectGather(SDNode *N, unsigned Opc);
193 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
194 SDNode *SelectAtomicLoadArith(SDNode *Node, EVT NVT);
196 bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
197 bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
198 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
199 bool MatchAddress(SDValue N, X86ISelAddressMode &AM);
200 bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
202 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
203 bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
204 SDValue &Scale, SDValue &Index, SDValue &Disp,
206 bool SelectLEAAddr(SDValue N, SDValue &Base,
207 SDValue &Scale, SDValue &Index, SDValue &Disp,
209 bool SelectTLSADDRAddr(SDValue N, SDValue &Base,
210 SDValue &Scale, SDValue &Index, SDValue &Disp,
212 bool SelectScalarSSELoad(SDNode *Root, SDValue N,
213 SDValue &Base, SDValue &Scale,
214 SDValue &Index, SDValue &Disp,
216 SDValue &NodeWithChain);
218 bool TryFoldLoad(SDNode *P, SDValue N,
219 SDValue &Base, SDValue &Scale,
220 SDValue &Index, SDValue &Disp,
223 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
224 /// inline asm expressions.
225 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
227 std::vector<SDValue> &OutOps);
229 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
231 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
232 SDValue &Scale, SDValue &Index,
233 SDValue &Disp, SDValue &Segment) {
234 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
235 CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI.getPointerTy()) :
237 Scale = getI8Imm(AM.Scale);
239 // These are 32-bit even in 64-bit mode since RIP relative offset
242 Disp = CurDAG->getTargetGlobalAddress(AM.GV, DebugLoc(),
246 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
247 AM.Align, AM.Disp, AM.SymbolFlags);
249 assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
250 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
251 } else if (AM.JT != -1) {
252 assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
253 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
254 } else if (AM.BlockAddr)
255 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
258 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
260 if (AM.Segment.getNode())
261 Segment = AM.Segment;
263 Segment = CurDAG->getRegister(0, MVT::i32);
266 /// getI8Imm - Return a target constant with the specified value, of type
268 inline SDValue getI8Imm(unsigned Imm) {
269 return CurDAG->getTargetConstant(Imm, MVT::i8);
272 /// getI32Imm - Return a target constant with the specified value, of type
274 inline SDValue getI32Imm(unsigned Imm) {
275 return CurDAG->getTargetConstant(Imm, MVT::i32);
278 /// getGlobalBaseReg - Return an SDNode that returns the value of
279 /// the global base register. Output instructions required to
280 /// initialize the global base register, if necessary.
282 SDNode *getGlobalBaseReg();
284 /// getTargetMachine - Return a reference to the TargetMachine, casted
285 /// to the target-specific type.
286 const X86TargetMachine &getTargetMachine() {
287 return static_cast<const X86TargetMachine &>(TM);
290 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
291 /// to the target-specific type.
292 const X86InstrInfo *getInstrInfo() {
293 return getTargetMachine().getInstrInfo();
300 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
301 if (OptLevel == CodeGenOpt::None) return false;
306 if (N.getOpcode() != ISD::LOAD)
309 // If N is a load, do additional profitability checks.
311 switch (U->getOpcode()) {
324 SDValue Op1 = U->getOperand(1);
326 // If the other operand is a 8-bit immediate we should fold the immediate
327 // instead. This reduces code size.
329 // movl 4(%esp), %eax
333 // addl 4(%esp), %eax
334 // The former is 2 bytes shorter. In case where the increment is 1, then
335 // the saving can be 4 bytes (by using incl %eax).
336 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
337 if (Imm->getAPIntValue().isSignedIntN(8))
340 // If the other operand is a TLS address, we should fold it instead.
343 // leal i@NTPOFF(%eax), %eax
345 // movl $i@NTPOFF, %eax
347 // if the block also has an access to a second TLS address this will save
349 // FIXME: This is probably also true for non TLS addresses.
350 if (Op1.getOpcode() == X86ISD::Wrapper) {
351 SDValue Val = Op1.getOperand(0);
352 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
362 /// MoveBelowCallOrigChain - Replace the original chain operand of the call with
363 /// load's chain operand and move load below the call's chain operand.
364 static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
365 SDValue Call, SDValue OrigChain) {
366 SmallVector<SDValue, 8> Ops;
367 SDValue Chain = OrigChain.getOperand(0);
368 if (Chain.getNode() == Load.getNode())
369 Ops.push_back(Load.getOperand(0));
371 assert(Chain.getOpcode() == ISD::TokenFactor &&
372 "Unexpected chain operand");
373 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
374 if (Chain.getOperand(i).getNode() == Load.getNode())
375 Ops.push_back(Load.getOperand(0));
377 Ops.push_back(Chain.getOperand(i));
379 CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(),
380 MVT::Other, &Ops[0], Ops.size());
382 Ops.push_back(NewChain);
384 for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i)
385 Ops.push_back(OrigChain.getOperand(i));
386 CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size());
387 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
388 Load.getOperand(1), Load.getOperand(2));
390 unsigned NumOps = Call.getNode()->getNumOperands();
392 Ops.push_back(SDValue(Load.getNode(), 1));
393 for (unsigned i = 1, e = NumOps; i != e; ++i)
394 Ops.push_back(Call.getOperand(i));
395 CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], NumOps);
398 /// isCalleeLoad - Return true if call address is a load and it can be
399 /// moved below CALLSEQ_START and the chains leading up to the call.
400 /// Return the CALLSEQ_START by reference as a second output.
401 /// In the case of a tail call, there isn't a callseq node between the call
402 /// chain and the load.
403 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
404 // The transformation is somewhat dangerous if the call's chain was glued to
405 // the call. After MoveBelowOrigChain the load is moved between the call and
406 // the chain, this can create a cycle if the load is not folded. So it is
407 // *really* important that we are sure the load will be folded.
408 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
410 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
413 LD->getAddressingMode() != ISD::UNINDEXED ||
414 LD->getExtensionType() != ISD::NON_EXTLOAD)
417 // Now let's find the callseq_start.
418 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
419 if (!Chain.hasOneUse())
421 Chain = Chain.getOperand(0);
424 if (!Chain.getNumOperands())
426 if (Chain.getOperand(0).getNode() == Callee.getNode())
428 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
429 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
430 Callee.getValue(1).hasOneUse())
435 void X86DAGToDAGISel::PreprocessISelDAG() {
436 // OptForSize is used in pattern predicates that isel is matching.
437 OptForSize = MF->getFunction()->getFnAttributes().hasOptimizeForSizeAttr();
439 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
440 E = CurDAG->allnodes_end(); I != E; ) {
441 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
443 if (OptLevel != CodeGenOpt::None &&
444 (N->getOpcode() == X86ISD::CALL ||
445 (N->getOpcode() == X86ISD::TC_RETURN &&
446 // Only does this if load can be foled into TC_RETURN.
447 (Subtarget->is64Bit() ||
448 getTargetMachine().getRelocationModel() != Reloc::PIC_)))) {
449 /// Also try moving call address load from outside callseq_start to just
450 /// before the call to allow it to be folded.
468 bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
469 SDValue Chain = N->getOperand(0);
470 SDValue Load = N->getOperand(1);
471 if (!isCalleeLoad(Load, Chain, HasCallSeq))
473 MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
478 // Lower fpround and fpextend nodes that target the FP stack to be store and
479 // load to the stack. This is a gross hack. We would like to simply mark
480 // these as being illegal, but when we do that, legalize produces these when
481 // it expands calls, then expands these in the same legalize pass. We would
482 // like dag combine to be able to hack on these between the call expansion
483 // and the node legalization. As such this pass basically does "really
484 // late" legalization of these inline with the X86 isel pass.
485 // FIXME: This should only happen when not compiled with -O0.
486 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
489 EVT SrcVT = N->getOperand(0).getValueType();
490 EVT DstVT = N->getValueType(0);
492 // If any of the sources are vectors, no fp stack involved.
493 if (SrcVT.isVector() || DstVT.isVector())
496 // If the source and destination are SSE registers, then this is a legal
497 // conversion that should not be lowered.
498 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
499 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
500 if (SrcIsSSE && DstIsSSE)
503 if (!SrcIsSSE && !DstIsSSE) {
504 // If this is an FPStack extension, it is a noop.
505 if (N->getOpcode() == ISD::FP_EXTEND)
507 // If this is a value-preserving FPStack truncation, it is a noop.
508 if (N->getConstantOperandVal(1))
512 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
513 // FPStack has extload and truncstore. SSE can fold direct loads into other
514 // operations. Based on this, decide what we want to do.
516 if (N->getOpcode() == ISD::FP_ROUND)
517 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
519 MemVT = SrcIsSSE ? SrcVT : DstVT;
521 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
522 DebugLoc dl = N->getDebugLoc();
524 // FIXME: optimize the case where the src/dest is a load or store?
525 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
527 MemTmp, MachinePointerInfo(), MemVT,
529 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
530 MachinePointerInfo(),
531 MemVT, false, false, 0);
533 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
534 // extload we created. This will cause general havok on the dag because
535 // anything below the conversion could be folded into other existing nodes.
536 // To avoid invalidating 'I', back it up to the convert node.
538 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
540 // Now that we did that, the node is dead. Increment the iterator to the
541 // next node to process, then delete N.
543 CurDAG->DeleteNode(N);
548 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
549 /// the main function.
550 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
551 MachineFrameInfo *MFI) {
552 const TargetInstrInfo *TII = TM.getInstrInfo();
553 if (Subtarget->isTargetCygMing()) {
555 Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32;
556 BuildMI(BB, DebugLoc(),
557 TII->get(CallOp)).addExternalSymbol("__main");
561 void X86DAGToDAGISel::EmitFunctionEntryCode() {
562 // If this is main, emit special code for main.
563 if (const Function *Fn = MF->getFunction())
564 if (Fn->hasExternalLinkage() && Fn->getName() == "main")
565 EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo());
568 static bool isDispSafeForFrameIndex(int64_t Val) {
569 // On 64-bit platforms, we can run into an issue where a frame index
570 // includes a displacement that, when added to the explicit displacement,
571 // will overflow the displacement field. Assuming that the frame index
572 // displacement fits into a 31-bit integer (which is only slightly more
573 // aggressive than the current fundamental assumption that it fits into
574 // a 32-bit integer), a 31-bit disp should always be safe.
575 return isInt<31>(Val);
578 bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset,
579 X86ISelAddressMode &AM) {
580 int64_t Val = AM.Disp + Offset;
581 CodeModel::Model M = TM.getCodeModel();
582 if (Subtarget->is64Bit()) {
583 if (!X86::isOffsetSuitableForCodeModel(Val, M,
584 AM.hasSymbolicDisplacement()))
586 // In addition to the checks required for a register base, check that
587 // we do not try to use an unsafe Disp with a frame index.
588 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
589 !isDispSafeForFrameIndex(Val))
597 bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
598 SDValue Address = N->getOperand(1);
600 // load gs:0 -> GS segment register.
601 // load fs:0 -> FS segment register.
603 // This optimization is valid because the GNU TLS model defines that
604 // gs:0 (or fs:0 on X86-64) contains its own address.
605 // For more information see http://people.redhat.com/drepper/tls.pdf
606 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
607 if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 &&
608 Subtarget->isTargetLinux())
609 switch (N->getPointerInfo().getAddrSpace()) {
611 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
614 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
621 /// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
622 /// into an addressing mode. These wrap things that will resolve down into a
623 /// symbol reference. If no match is possible, this returns true, otherwise it
625 bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
626 // If the addressing mode already has a symbol as the displacement, we can
627 // never match another symbol.
628 if (AM.hasSymbolicDisplacement())
631 SDValue N0 = N.getOperand(0);
632 CodeModel::Model M = TM.getCodeModel();
634 // Handle X86-64 rip-relative addresses. We check this before checking direct
635 // folding because RIP is preferable to non-RIP accesses.
636 if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP &&
637 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
638 // they cannot be folded into immediate fields.
639 // FIXME: This can be improved for kernel and other models?
640 (M == CodeModel::Small || M == CodeModel::Kernel)) {
641 // Base and index reg must be 0 in order to use %rip as base.
642 if (AM.hasBaseOrIndexReg())
644 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
645 X86ISelAddressMode Backup = AM;
646 AM.GV = G->getGlobal();
647 AM.SymbolFlags = G->getTargetFlags();
648 if (FoldOffsetIntoAddress(G->getOffset(), AM)) {
652 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
653 X86ISelAddressMode Backup = AM;
654 AM.CP = CP->getConstVal();
655 AM.Align = CP->getAlignment();
656 AM.SymbolFlags = CP->getTargetFlags();
657 if (FoldOffsetIntoAddress(CP->getOffset(), AM)) {
661 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
662 AM.ES = S->getSymbol();
663 AM.SymbolFlags = S->getTargetFlags();
664 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
665 AM.JT = J->getIndex();
666 AM.SymbolFlags = J->getTargetFlags();
667 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
668 X86ISelAddressMode Backup = AM;
669 AM.BlockAddr = BA->getBlockAddress();
670 AM.SymbolFlags = BA->getTargetFlags();
671 if (FoldOffsetIntoAddress(BA->getOffset(), AM)) {
676 llvm_unreachable("Unhandled symbol reference node.");
678 if (N.getOpcode() == X86ISD::WrapperRIP)
679 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
683 // Handle the case when globals fit in our immediate field: This is true for
684 // X86-32 always and X86-64 when in -mcmodel=small mode. In 64-bit
685 // mode, this only applies to a non-RIP-relative computation.
686 if (!Subtarget->is64Bit() ||
687 M == CodeModel::Small || M == CodeModel::Kernel) {
688 assert(N.getOpcode() != X86ISD::WrapperRIP &&
689 "RIP-relative addressing already handled");
690 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
691 AM.GV = G->getGlobal();
692 AM.Disp += G->getOffset();
693 AM.SymbolFlags = G->getTargetFlags();
694 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
695 AM.CP = CP->getConstVal();
696 AM.Align = CP->getAlignment();
697 AM.Disp += CP->getOffset();
698 AM.SymbolFlags = CP->getTargetFlags();
699 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
700 AM.ES = S->getSymbol();
701 AM.SymbolFlags = S->getTargetFlags();
702 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
703 AM.JT = J->getIndex();
704 AM.SymbolFlags = J->getTargetFlags();
705 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
706 AM.BlockAddr = BA->getBlockAddress();
707 AM.Disp += BA->getOffset();
708 AM.SymbolFlags = BA->getTargetFlags();
710 llvm_unreachable("Unhandled symbol reference node.");
717 /// MatchAddress - Add the specified node to the specified addressing mode,
718 /// returning true if it cannot be done. This just pattern matches for the
720 bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
721 if (MatchAddressRecursively(N, AM, 0))
724 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
725 // a smaller encoding and avoids a scaled-index.
727 AM.BaseType == X86ISelAddressMode::RegBase &&
728 AM.Base_Reg.getNode() == 0) {
729 AM.Base_Reg = AM.IndexReg;
733 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
734 // because it has a smaller encoding.
735 // TODO: Which other code models can use this?
736 if (TM.getCodeModel() == CodeModel::Small &&
737 Subtarget->is64Bit() &&
739 AM.BaseType == X86ISelAddressMode::RegBase &&
740 AM.Base_Reg.getNode() == 0 &&
741 AM.IndexReg.getNode() == 0 &&
742 AM.SymbolFlags == X86II::MO_NO_FLAG &&
743 AM.hasSymbolicDisplacement())
744 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
749 // Insert a node into the DAG at least before the Pos node's position. This
750 // will reposition the node as needed, and will assign it a node ID that is <=
751 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node
752 // IDs! The selection DAG must no longer depend on their uniqueness when this
754 static void InsertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
755 if (N.getNode()->getNodeId() == -1 ||
756 N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) {
757 DAG.RepositionNode(Pos.getNode(), N.getNode());
758 N.getNode()->setNodeId(Pos.getNode()->getNodeId());
762 // Transform "(X >> (8-C1)) & C2" to "(X >> 8) & 0xff)" if safe. This
763 // allows us to convert the shift and and into an h-register extract and
764 // a scaled index. Returns false if the simplification is performed.
765 static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
767 SDValue Shift, SDValue X,
768 X86ISelAddressMode &AM) {
769 if (Shift.getOpcode() != ISD::SRL ||
770 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
774 int ScaleLog = 8 - Shift.getConstantOperandVal(1);
775 if (ScaleLog <= 0 || ScaleLog >= 4 ||
776 Mask != (0xffu << ScaleLog))
779 EVT VT = N.getValueType();
780 DebugLoc DL = N.getDebugLoc();
781 SDValue Eight = DAG.getConstant(8, MVT::i8);
782 SDValue NewMask = DAG.getConstant(0xff, VT);
783 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
784 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
785 SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8);
786 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
788 // Insert the new nodes into the topological ordering. We must do this in
789 // a valid topological ordering as nothing is going to go back and re-sort
790 // these nodes. We continually insert before 'N' in sequence as this is
791 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
792 // hierarchy left to express.
793 InsertDAGNode(DAG, N, Eight);
794 InsertDAGNode(DAG, N, Srl);
795 InsertDAGNode(DAG, N, NewMask);
796 InsertDAGNode(DAG, N, And);
797 InsertDAGNode(DAG, N, ShlCount);
798 InsertDAGNode(DAG, N, Shl);
799 DAG.ReplaceAllUsesWith(N, Shl);
801 AM.Scale = (1 << ScaleLog);
805 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
806 // allows us to fold the shift into this addressing mode. Returns false if the
807 // transform succeeded.
808 static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
810 SDValue Shift, SDValue X,
811 X86ISelAddressMode &AM) {
812 if (Shift.getOpcode() != ISD::SHL ||
813 !isa<ConstantSDNode>(Shift.getOperand(1)))
816 // Not likely to be profitable if either the AND or SHIFT node has more
817 // than one use (unless all uses are for address computation). Besides,
818 // isel mechanism requires their node ids to be reused.
819 if (!N.hasOneUse() || !Shift.hasOneUse())
822 // Verify that the shift amount is something we can fold.
823 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
824 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
827 EVT VT = N.getValueType();
828 DebugLoc DL = N.getDebugLoc();
829 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT);
830 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
831 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
833 // Insert the new nodes into the topological ordering. We must do this in
834 // a valid topological ordering as nothing is going to go back and re-sort
835 // these nodes. We continually insert before 'N' in sequence as this is
836 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
837 // hierarchy left to express.
838 InsertDAGNode(DAG, N, NewMask);
839 InsertDAGNode(DAG, N, NewAnd);
840 InsertDAGNode(DAG, N, NewShift);
841 DAG.ReplaceAllUsesWith(N, NewShift);
843 AM.Scale = 1 << ShiftAmt;
844 AM.IndexReg = NewAnd;
848 // Implement some heroics to detect shifts of masked values where the mask can
849 // be replaced by extending the shift and undoing that in the addressing mode
850 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
851 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
852 // the addressing mode. This results in code such as:
854 // int f(short *y, int *lookup_table) {
856 // return *y + lookup_table[*y >> 11];
860 // movzwl (%rdi), %eax
863 // addl (%rsi,%rcx,4), %eax
866 // movzwl (%rdi), %eax
870 // addl (%rsi,%rcx), %eax
872 // Note that this function assumes the mask is provided as a mask *after* the
873 // value is shifted. The input chain may or may not match that, but computing
874 // such a mask is trivial.
875 static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
877 SDValue Shift, SDValue X,
878 X86ISelAddressMode &AM) {
879 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
880 !isa<ConstantSDNode>(Shift.getOperand(1)))
883 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
884 unsigned MaskLZ = CountLeadingZeros_64(Mask);
885 unsigned MaskTZ = CountTrailingZeros_64(Mask);
887 // The amount of shift we're trying to fit into the addressing mode is taken
888 // from the trailing zeros of the mask.
889 unsigned AMShiftAmt = MaskTZ;
891 // There is nothing we can do here unless the mask is removing some bits.
892 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
893 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
895 // We also need to ensure that mask is a continuous run of bits.
896 if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
898 // Scale the leading zero count down based on the actual size of the value.
899 // Also scale it down based on the size of the shift.
900 MaskLZ -= (64 - X.getValueSizeInBits()) + ShiftAmt;
902 // The final check is to ensure that any masked out high bits of X are
903 // already known to be zero. Otherwise, the mask has a semantic impact
904 // other than masking out a couple of low bits. Unfortunately, because of
905 // the mask, zero extensions will be removed from operands in some cases.
906 // This code works extra hard to look through extensions because we can
907 // replace them with zero extensions cheaply if necessary.
908 bool ReplacingAnyExtend = false;
909 if (X.getOpcode() == ISD::ANY_EXTEND) {
910 unsigned ExtendBits =
911 X.getValueSizeInBits() - X.getOperand(0).getValueSizeInBits();
912 // Assume that we'll replace the any-extend with a zero-extend, and
913 // narrow the search to the extended value.
915 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
916 ReplacingAnyExtend = true;
918 APInt MaskedHighBits = APInt::getHighBitsSet(X.getValueSizeInBits(),
920 APInt KnownZero, KnownOne;
921 DAG.ComputeMaskedBits(X, KnownZero, KnownOne);
922 if (MaskedHighBits != KnownZero) return true;
924 // We've identified a pattern that can be transformed into a single shift
925 // and an addressing mode. Make it so.
926 EVT VT = N.getValueType();
927 if (ReplacingAnyExtend) {
928 assert(X.getValueType() != VT);
929 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
930 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, X.getDebugLoc(), VT, X);
931 InsertDAGNode(DAG, N, NewX);
934 DebugLoc DL = N.getDebugLoc();
935 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8);
936 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
937 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8);
938 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
940 // Insert the new nodes into the topological ordering. We must do this in
941 // a valid topological ordering as nothing is going to go back and re-sort
942 // these nodes. We continually insert before 'N' in sequence as this is
943 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
944 // hierarchy left to express.
945 InsertDAGNode(DAG, N, NewSRLAmt);
946 InsertDAGNode(DAG, N, NewSRL);
947 InsertDAGNode(DAG, N, NewSHLAmt);
948 InsertDAGNode(DAG, N, NewSHL);
949 DAG.ReplaceAllUsesWith(N, NewSHL);
951 AM.Scale = 1 << AMShiftAmt;
952 AM.IndexReg = NewSRL;
956 bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
958 DebugLoc dl = N.getDebugLoc();
960 dbgs() << "MatchAddress: ";
965 return MatchAddressBase(N, AM);
967 // If this is already a %rip relative address, we can only merge immediates
968 // into it. Instead of handling this in every case, we handle it here.
969 // RIP relative addressing: %rip + 32-bit displacement!
970 if (AM.isRIPRelative()) {
971 // FIXME: JumpTable and ExternalSymbol address currently don't like
972 // displacements. It isn't very important, but this should be fixed for
974 if (!AM.ES && AM.JT != -1) return true;
976 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
977 if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM))
982 switch (N.getOpcode()) {
984 case ISD::Constant: {
985 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
986 if (!FoldOffsetIntoAddress(Val, AM))
991 case X86ISD::Wrapper:
992 case X86ISD::WrapperRIP:
993 if (!MatchWrapper(N, AM))
998 if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM))
1002 case ISD::FrameIndex:
1003 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1004 AM.Base_Reg.getNode() == 0 &&
1005 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
1006 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
1007 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
1013 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1)
1017 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
1018 unsigned Val = CN->getZExtValue();
1019 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
1020 // that the base operand remains free for further matching. If
1021 // the base doesn't end up getting used, a post-processing step
1022 // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
1023 if (Val == 1 || Val == 2 || Val == 3) {
1024 AM.Scale = 1 << Val;
1025 SDValue ShVal = N.getNode()->getOperand(0);
1027 // Okay, we know that we have a scale by now. However, if the scaled
1028 // value is an add of something and a constant, we can fold the
1029 // constant into the disp field here.
1030 if (CurDAG->isBaseWithConstantOffset(ShVal)) {
1031 AM.IndexReg = ShVal.getNode()->getOperand(0);
1032 ConstantSDNode *AddVal =
1033 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
1034 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
1035 if (!FoldOffsetIntoAddress(Disp, AM))
1039 AM.IndexReg = ShVal;
1046 // Scale must not be used already.
1047 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
1049 SDValue And = N.getOperand(0);
1050 if (And.getOpcode() != ISD::AND) break;
1051 SDValue X = And.getOperand(0);
1053 // We only handle up to 64-bit values here as those are what matter for
1054 // addressing mode optimizations.
1055 if (X.getValueSizeInBits() > 64) break;
1057 // The mask used for the transform is expected to be post-shift, but we
1058 // found the shift first so just apply the shift to the mask before passing
1060 if (!isa<ConstantSDNode>(N.getOperand(1)) ||
1061 !isa<ConstantSDNode>(And.getOperand(1)))
1063 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
1065 // Try to fold the mask and shift into the scale, and return false if we
1067 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
1072 case ISD::SMUL_LOHI:
1073 case ISD::UMUL_LOHI:
1074 // A mul_lohi where we need the low part can be folded as a plain multiply.
1075 if (N.getResNo() != 0) break;
1078 case X86ISD::MUL_IMM:
1079 // X*[3,5,9] -> X+X*[2,4,8]
1080 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1081 AM.Base_Reg.getNode() == 0 &&
1082 AM.IndexReg.getNode() == 0) {
1084 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
1085 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
1086 CN->getZExtValue() == 9) {
1087 AM.Scale = unsigned(CN->getZExtValue())-1;
1089 SDValue MulVal = N.getNode()->getOperand(0);
1092 // Okay, we know that we have a scale by now. However, if the scaled
1093 // value is an add of something and a constant, we can fold the
1094 // constant into the disp field here.
1095 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1096 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
1097 Reg = MulVal.getNode()->getOperand(0);
1098 ConstantSDNode *AddVal =
1099 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
1100 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
1101 if (FoldOffsetIntoAddress(Disp, AM))
1102 Reg = N.getNode()->getOperand(0);
1104 Reg = N.getNode()->getOperand(0);
1107 AM.IndexReg = AM.Base_Reg = Reg;
1114 // Given A-B, if A can be completely folded into the address and
1115 // the index field with the index field unused, use -B as the index.
1116 // This is a win if a has multiple parts that can be folded into
1117 // the address. Also, this saves a mov if the base register has
1118 // other uses, since it avoids a two-address sub instruction, however
1119 // it costs an additional mov if the index register has other uses.
1121 // Add an artificial use to this node so that we can keep track of
1122 // it if it gets CSE'd with a different node.
1123 HandleSDNode Handle(N);
1125 // Test if the LHS of the sub can be folded.
1126 X86ISelAddressMode Backup = AM;
1127 if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) {
1131 // Test if the index field is free for use.
1132 if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
1138 SDValue RHS = Handle.getValue().getNode()->getOperand(1);
1139 // If the RHS involves a register with multiple uses, this
1140 // transformation incurs an extra mov, due to the neg instruction
1141 // clobbering its operand.
1142 if (!RHS.getNode()->hasOneUse() ||
1143 RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
1144 RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
1145 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
1146 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
1147 RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
1149 // If the base is a register with multiple uses, this
1150 // transformation may save a mov.
1151 if ((AM.BaseType == X86ISelAddressMode::RegBase &&
1152 AM.Base_Reg.getNode() &&
1153 !AM.Base_Reg.getNode()->hasOneUse()) ||
1154 AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1156 // If the folded LHS was interesting, this transformation saves
1157 // address arithmetic.
1158 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
1159 ((AM.Disp != 0) && (Backup.Disp == 0)) +
1160 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
1162 // If it doesn't look like it may be an overall win, don't do it.
1168 // Ok, the transformation is legal and appears profitable. Go for it.
1169 SDValue Zero = CurDAG->getConstant(0, N.getValueType());
1170 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
1174 // Insert the new nodes into the topological ordering.
1175 InsertDAGNode(*CurDAG, N, Zero);
1176 InsertDAGNode(*CurDAG, N, Neg);
1181 // Add an artificial use to this node so that we can keep track of
1182 // it if it gets CSE'd with a different node.
1183 HandleSDNode Handle(N);
1185 X86ISelAddressMode Backup = AM;
1186 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1187 !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
1191 // Try again after commuting the operands.
1192 if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&&
1193 !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
1197 // If we couldn't fold both operands into the address at the same time,
1198 // see if we can just put each operand into a register and fold at least
1200 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1201 !AM.Base_Reg.getNode() &&
1202 !AM.IndexReg.getNode()) {
1203 N = Handle.getValue();
1204 AM.Base_Reg = N.getOperand(0);
1205 AM.IndexReg = N.getOperand(1);
1209 N = Handle.getValue();
1214 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
1215 if (CurDAG->isBaseWithConstantOffset(N)) {
1216 X86ISelAddressMode Backup = AM;
1217 ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1));
1219 // Start with the LHS as an addr mode.
1220 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1221 !FoldOffsetIntoAddress(CN->getSExtValue(), AM))
1228 // Perform some heroic transforms on an and of a constant-count shift
1229 // with a constant to enable use of the scaled offset field.
1231 // Scale must not be used already.
1232 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
1234 SDValue Shift = N.getOperand(0);
1235 if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break;
1236 SDValue X = Shift.getOperand(0);
1238 // We only handle up to 64-bit values here as those are what matter for
1239 // addressing mode optimizations.
1240 if (X.getValueSizeInBits() > 64) break;
1242 if (!isa<ConstantSDNode>(N.getOperand(1)))
1244 uint64_t Mask = N.getConstantOperandVal(1);
1246 // Try to fold the mask and shift into an extract and scale.
1247 if (!FoldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
1250 // Try to fold the mask and shift directly into the scale.
1251 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
1254 // Try to swap the mask and shift to place shifts which can be done as
1255 // a scale on the outside of the mask.
1256 if (!FoldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM))
1262 return MatchAddressBase(N, AM);
1265 /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
1266 /// specified addressing mode without any further recursion.
1267 bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
1268 // Is the base register already occupied?
1269 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
1270 // If so, check to see if the scale index register is set.
1271 if (AM.IndexReg.getNode() == 0) {
1277 // Otherwise, we cannot select it.
1281 // Default, generate it as a register.
1282 AM.BaseType = X86ISelAddressMode::RegBase;
1287 /// SelectAddr - returns true if it is able pattern match an addressing mode.
1288 /// It returns the operands which make up the maximal addressing mode it can
1289 /// match by reference.
1291 /// Parent is the parent node of the addr operand that is being matched. It
1292 /// is always a load, store, atomic node, or null. It is only null when
1293 /// checking memory operands for inline asm nodes.
1294 bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
1295 SDValue &Scale, SDValue &Index,
1296 SDValue &Disp, SDValue &Segment) {
1297 X86ISelAddressMode AM;
1300 // This list of opcodes are all the nodes that have an "addr:$ptr" operand
1301 // that are not a MemSDNode, and thus don't have proper addrspace info.
1302 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
1303 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
1304 Parent->getOpcode() != X86ISD::TLSCALL) { // Fixme
1305 unsigned AddrSpace =
1306 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
1307 // AddrSpace 256 -> GS, 257 -> FS.
1308 if (AddrSpace == 256)
1309 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1310 if (AddrSpace == 257)
1311 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1314 if (MatchAddress(N, AM))
1317 EVT VT = N.getValueType();
1318 if (AM.BaseType == X86ISelAddressMode::RegBase) {
1319 if (!AM.Base_Reg.getNode())
1320 AM.Base_Reg = CurDAG->getRegister(0, VT);
1323 if (!AM.IndexReg.getNode())
1324 AM.IndexReg = CurDAG->getRegister(0, VT);
1326 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1330 /// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
1331 /// match a load whose top elements are either undef or zeros. The load flavor
1332 /// is derived from the type of N, which is either v4f32 or v2f64.
1335 /// PatternChainNode: this is the matched node that has a chain input and
1337 bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
1338 SDValue N, SDValue &Base,
1339 SDValue &Scale, SDValue &Index,
1340 SDValue &Disp, SDValue &Segment,
1341 SDValue &PatternNodeWithChain) {
1342 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
1343 PatternNodeWithChain = N.getOperand(0);
1344 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1345 PatternNodeWithChain.hasOneUse() &&
1346 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1347 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1348 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1349 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1355 // Also handle the case where we explicitly require zeros in the top
1356 // elements. This is a vector shuffle from the zero vector.
1357 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1358 // Check to see if the top elements are all zeros (or bitcast of zeros).
1359 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1360 N.getOperand(0).getNode()->hasOneUse() &&
1361 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
1362 N.getOperand(0).getOperand(0).hasOneUse() &&
1363 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1364 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1365 // Okay, this is a zero extending load. Fold it.
1366 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
1367 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1369 PatternNodeWithChain = SDValue(LD, 0);
1376 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
1377 /// mode it matches can be cost effectively emitted as an LEA instruction.
1378 bool X86DAGToDAGISel::SelectLEAAddr(SDValue N,
1379 SDValue &Base, SDValue &Scale,
1380 SDValue &Index, SDValue &Disp,
1382 X86ISelAddressMode AM;
1384 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
1386 SDValue Copy = AM.Segment;
1387 SDValue T = CurDAG->getRegister(0, MVT::i32);
1389 if (MatchAddress(N, AM))
1391 assert (T == AM.Segment);
1394 EVT VT = N.getValueType();
1395 unsigned Complexity = 0;
1396 if (AM.BaseType == X86ISelAddressMode::RegBase)
1397 if (AM.Base_Reg.getNode())
1400 AM.Base_Reg = CurDAG->getRegister(0, VT);
1401 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1404 if (AM.IndexReg.getNode())
1407 AM.IndexReg = CurDAG->getRegister(0, VT);
1409 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1414 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1415 // to a LEA. This is determined with some expermentation but is by no means
1416 // optimal (especially for code size consideration). LEA is nice because of
1417 // its three-address nature. Tweak the cost function again when we can run
1418 // convertToThreeAddress() at register allocation time.
1419 if (AM.hasSymbolicDisplacement()) {
1420 // For X86-64, we should always use lea to materialize RIP relative
1422 if (Subtarget->is64Bit())
1428 if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode()))
1431 // If it isn't worth using an LEA, reject it.
1432 if (Complexity <= 2)
1435 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1439 /// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
1440 bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base,
1441 SDValue &Scale, SDValue &Index,
1442 SDValue &Disp, SDValue &Segment) {
1443 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
1444 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
1446 X86ISelAddressMode AM;
1447 AM.GV = GA->getGlobal();
1448 AM.Disp += GA->getOffset();
1449 AM.Base_Reg = CurDAG->getRegister(0, N.getValueType());
1450 AM.SymbolFlags = GA->getTargetFlags();
1452 if (N.getValueType() == MVT::i32) {
1454 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
1456 AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
1459 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1464 bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
1465 SDValue &Base, SDValue &Scale,
1466 SDValue &Index, SDValue &Disp,
1468 if (!ISD::isNON_EXTLoad(N.getNode()) ||
1469 !IsProfitableToFold(N, P, P) ||
1470 !IsLegalToFold(N, P, P, OptLevel))
1473 return SelectAddr(N.getNode(),
1474 N.getOperand(1), Base, Scale, Index, Disp, Segment);
1477 /// getGlobalBaseReg - Return an SDNode that returns the value of
1478 /// the global base register. Output instructions required to
1479 /// initialize the global base register, if necessary.
1481 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1482 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
1483 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
1486 SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
1487 SDValue Chain = Node->getOperand(0);
1488 SDValue In1 = Node->getOperand(1);
1489 SDValue In2L = Node->getOperand(2);
1490 SDValue In2H = Node->getOperand(3);
1492 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1493 if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1495 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1496 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1497 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain};
1498 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
1499 MVT::i32, MVT::i32, MVT::Other, Ops,
1500 array_lengthof(Ops));
1501 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
1505 /// Atomic opcode table
1533 static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
1544 X86::LOCK_ADD64mi32,
1557 X86::LOCK_SUB64mi32,
1609 X86::LOCK_AND64mi32,
1622 X86::LOCK_XOR64mi32,
1627 // Return the target constant operand for atomic-load-op and do simple
1628 // translations, such as from atomic-load-add to lock-sub. The return value is
1629 // one of the following 3 cases:
1630 // + target-constant, the operand could be supported as a target constant.
1631 // + empty, the operand is not needed any more with the new op selected.
1632 // + non-empty, otherwise.
1633 static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG,
1635 enum AtomicOpc &Op, EVT NVT,
1637 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val)) {
1638 int64_t CNVal = CN->getSExtValue();
1639 // Quit if not 32-bit imm.
1640 if ((int32_t)CNVal != CNVal)
1642 // For atomic-load-add, we could do some optimizations.
1644 // Translate to INC/DEC if ADD by 1 or -1.
1645 if ((CNVal == 1) || (CNVal == -1)) {
1646 Op = (CNVal == 1) ? INC : DEC;
1647 // No more constant operand after being translated into INC/DEC.
1650 // Translate to SUB if ADD by negative value.
1656 return CurDAG->getTargetConstant(CNVal, NVT);
1659 // If the value operand is single-used, try to optimize it.
1660 if (Op == ADD && Val.hasOneUse()) {
1661 // Translate (atomic-load-add ptr (sub 0 x)) back to (lock-sub x).
1662 if (Val.getOpcode() == ISD::SUB && X86::isZeroNode(Val.getOperand(0))) {
1664 return Val.getOperand(1);
1666 // A special case for i16, which needs truncating as, in most cases, it's
1667 // promoted to i32. We will translate
1668 // (atomic-load-add (truncate (sub 0 x))) to (lock-sub (EXTRACT_SUBREG x))
1669 if (Val.getOpcode() == ISD::TRUNCATE && NVT == MVT::i16 &&
1670 Val.getOperand(0).getOpcode() == ISD::SUB &&
1671 X86::isZeroNode(Val.getOperand(0).getOperand(0))) {
1673 Val = Val.getOperand(0);
1674 return CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, NVT,
1682 SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) {
1683 if (Node->hasAnyUseOfValue(0))
1686 DebugLoc dl = Node->getDebugLoc();
1688 // Optimize common patterns for __sync_or_and_fetch and similar arith
1689 // operations where the result is not used. This allows us to use the "lock"
1690 // version of the arithmetic instruction.
1691 SDValue Chain = Node->getOperand(0);
1692 SDValue Ptr = Node->getOperand(1);
1693 SDValue Val = Node->getOperand(2);
1694 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1695 if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1698 // Which index into the table.
1700 switch (Node->getOpcode()) {
1703 case ISD::ATOMIC_LOAD_OR:
1706 case ISD::ATOMIC_LOAD_AND:
1709 case ISD::ATOMIC_LOAD_XOR:
1712 case ISD::ATOMIC_LOAD_ADD:
1717 Val = getAtomicLoadArithTargetConstant(CurDAG, dl, Op, NVT, Val);
1718 bool isUnOp = !Val.getNode();
1719 bool isCN = Val.getNode() && (Val.getOpcode() == ISD::TargetConstant);
1722 switch (NVT.getSimpleVT().SimpleTy) {
1726 Opc = AtomicOpcTbl[Op][ConstantI8];
1728 Opc = AtomicOpcTbl[Op][I8];
1732 if (immSext8(Val.getNode()))
1733 Opc = AtomicOpcTbl[Op][SextConstantI16];
1735 Opc = AtomicOpcTbl[Op][ConstantI16];
1737 Opc = AtomicOpcTbl[Op][I16];
1741 if (immSext8(Val.getNode()))
1742 Opc = AtomicOpcTbl[Op][SextConstantI32];
1744 Opc = AtomicOpcTbl[Op][ConstantI32];
1746 Opc = AtomicOpcTbl[Op][I32];
1749 Opc = AtomicOpcTbl[Op][I64];
1751 if (immSext8(Val.getNode()))
1752 Opc = AtomicOpcTbl[Op][SextConstantI64];
1753 else if (i64immSExt32(Val.getNode()))
1754 Opc = AtomicOpcTbl[Op][ConstantI64];
1759 assert(Opc != 0 && "Invalid arith lock transform!");
1762 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
1764 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1765 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1767 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain };
1768 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops,
1769 array_lengthof(Ops)), 0);
1771 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
1772 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops,
1773 array_lengthof(Ops)), 0);
1775 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1776 SDValue RetVals[] = { Undef, Ret };
1777 return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1780 /// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has
1781 /// any uses which require the SF or OF bits to be accurate.
1782 static bool HasNoSignedComparisonUses(SDNode *N) {
1783 // Examine each user of the node.
1784 for (SDNode::use_iterator UI = N->use_begin(),
1785 UE = N->use_end(); UI != UE; ++UI) {
1786 // Only examine CopyToReg uses.
1787 if (UI->getOpcode() != ISD::CopyToReg)
1789 // Only examine CopyToReg uses that copy to EFLAGS.
1790 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
1793 // Examine each user of the CopyToReg use.
1794 for (SDNode::use_iterator FlagUI = UI->use_begin(),
1795 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
1796 // Only examine the Flag result.
1797 if (FlagUI.getUse().getResNo() != 1) continue;
1798 // Anything unusual: assume conservatively.
1799 if (!FlagUI->isMachineOpcode()) return false;
1800 // Examine the opcode of the user.
1801 switch (FlagUI->getMachineOpcode()) {
1802 // These comparisons don't treat the most significant bit specially.
1803 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
1804 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
1805 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
1806 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
1807 case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4:
1808 case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4:
1809 case X86::CMOVA16rr: case X86::CMOVA16rm:
1810 case X86::CMOVA32rr: case X86::CMOVA32rm:
1811 case X86::CMOVA64rr: case X86::CMOVA64rm:
1812 case X86::CMOVAE16rr: case X86::CMOVAE16rm:
1813 case X86::CMOVAE32rr: case X86::CMOVAE32rm:
1814 case X86::CMOVAE64rr: case X86::CMOVAE64rm:
1815 case X86::CMOVB16rr: case X86::CMOVB16rm:
1816 case X86::CMOVB32rr: case X86::CMOVB32rm:
1817 case X86::CMOVB64rr: case X86::CMOVB64rm:
1818 case X86::CMOVBE16rr: case X86::CMOVBE16rm:
1819 case X86::CMOVBE32rr: case X86::CMOVBE32rm:
1820 case X86::CMOVBE64rr: case X86::CMOVBE64rm:
1821 case X86::CMOVE16rr: case X86::CMOVE16rm:
1822 case X86::CMOVE32rr: case X86::CMOVE32rm:
1823 case X86::CMOVE64rr: case X86::CMOVE64rm:
1824 case X86::CMOVNE16rr: case X86::CMOVNE16rm:
1825 case X86::CMOVNE32rr: case X86::CMOVNE32rm:
1826 case X86::CMOVNE64rr: case X86::CMOVNE64rm:
1827 case X86::CMOVNP16rr: case X86::CMOVNP16rm:
1828 case X86::CMOVNP32rr: case X86::CMOVNP32rm:
1829 case X86::CMOVNP64rr: case X86::CMOVNP64rm:
1830 case X86::CMOVP16rr: case X86::CMOVP16rm:
1831 case X86::CMOVP32rr: case X86::CMOVP32rm:
1832 case X86::CMOVP64rr: case X86::CMOVP64rm:
1834 // Anything else: assume conservatively.
1835 default: return false;
1842 /// isLoadIncOrDecStore - Check whether or not the chain ending in StoreNode
1843 /// is suitable for doing the {load; increment or decrement; store} to modify
1845 static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc,
1846 SDValue StoredVal, SelectionDAG *CurDAG,
1847 LoadSDNode* &LoadNode, SDValue &InputChain) {
1849 // is the value stored the result of a DEC or INC?
1850 if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false;
1852 // is the stored value result 0 of the load?
1853 if (StoredVal.getResNo() != 0) return false;
1855 // are there other uses of the loaded value than the inc or dec?
1856 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
1858 // is the store non-extending and non-indexed?
1859 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
1862 SDValue Load = StoredVal->getOperand(0);
1863 // Is the stored value a non-extending and non-indexed load?
1864 if (!ISD::isNormalLoad(Load.getNode())) return false;
1866 // Return LoadNode by reference.
1867 LoadNode = cast<LoadSDNode>(Load);
1868 // is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8)
1869 EVT LdVT = LoadNode->getMemoryVT();
1870 if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 &&
1874 // Is store the only read of the loaded value?
1875 if (!Load.hasOneUse())
1878 // Is the address of the store the same as the load?
1879 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
1880 LoadNode->getOffset() != StoreNode->getOffset())
1883 // Check if the chain is produced by the load or is a TokenFactor with
1884 // the load output chain as an operand. Return InputChain by reference.
1885 SDValue Chain = StoreNode->getChain();
1887 bool ChainCheck = false;
1888 if (Chain == Load.getValue(1)) {
1890 InputChain = LoadNode->getChain();
1891 } else if (Chain.getOpcode() == ISD::TokenFactor) {
1892 SmallVector<SDValue, 4> ChainOps;
1893 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
1894 SDValue Op = Chain.getOperand(i);
1895 if (Op == Load.getValue(1)) {
1900 // Make sure using Op as part of the chain would not cause a cycle here.
1901 // In theory, we could check whether the chain node is a predecessor of
1902 // the load. But that can be very expensive. Instead visit the uses and
1903 // make sure they all have smaller node id than the load.
1904 int LoadId = LoadNode->getNodeId();
1905 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
1906 UE = UI->use_end(); UI != UE; ++UI) {
1907 if (UI.getUse().getResNo() != 0)
1909 if (UI->getNodeId() > LoadId)
1913 ChainOps.push_back(Op);
1917 // Make a new TokenFactor with all the other input chains except
1919 InputChain = CurDAG->getNode(ISD::TokenFactor, Chain.getDebugLoc(),
1920 MVT::Other, &ChainOps[0], ChainOps.size());
1928 /// getFusedLdStOpcode - Get the appropriate X86 opcode for an in memory
1929 /// increment or decrement. Opc should be X86ISD::DEC or X86ISD::INC.
1930 static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) {
1931 if (Opc == X86ISD::DEC) {
1932 if (LdVT == MVT::i64) return X86::DEC64m;
1933 if (LdVT == MVT::i32) return X86::DEC32m;
1934 if (LdVT == MVT::i16) return X86::DEC16m;
1935 if (LdVT == MVT::i8) return X86::DEC8m;
1937 assert(Opc == X86ISD::INC && "unrecognized opcode");
1938 if (LdVT == MVT::i64) return X86::INC64m;
1939 if (LdVT == MVT::i32) return X86::INC32m;
1940 if (LdVT == MVT::i16) return X86::INC16m;
1941 if (LdVT == MVT::i8) return X86::INC8m;
1943 llvm_unreachable("unrecognized size for LdVT");
1946 /// SelectGather - Customized ISel for GATHER operations.
1948 SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) {
1949 // Operands of Gather: VSrc, Base, VIdx, VMask, Scale
1950 SDValue Chain = Node->getOperand(0);
1951 SDValue VSrc = Node->getOperand(2);
1952 SDValue Base = Node->getOperand(3);
1953 SDValue VIdx = Node->getOperand(4);
1954 SDValue VMask = Node->getOperand(5);
1955 ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6));
1959 SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(),
1962 // Memory Operands: Base, Scale, Index, Disp, Segment
1963 SDValue Disp = CurDAG->getTargetConstant(0, MVT::i32);
1964 SDValue Segment = CurDAG->getRegister(0, MVT::i32);
1965 const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue()), VIdx,
1966 Disp, Segment, VMask, Chain};
1967 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
1968 VTs, Ops, array_lengthof(Ops));
1969 // Node has 2 outputs: VDst and MVT::Other.
1970 // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other.
1971 // We replace VDst of Node with VDst of ResNode, and Other of Node with Other
1973 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
1974 ReplaceUses(SDValue(Node, 1), SDValue(ResNode, 2));
1978 SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
1979 EVT NVT = Node->getValueType(0);
1981 unsigned Opcode = Node->getOpcode();
1982 DebugLoc dl = Node->getDebugLoc();
1984 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
1986 if (Node->isMachineOpcode()) {
1987 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
1988 return NULL; // Already selected.
1993 case ISD::INTRINSIC_W_CHAIN: {
1994 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
1997 case Intrinsic::x86_avx2_gather_d_pd:
1998 case Intrinsic::x86_avx2_gather_d_pd_256:
1999 case Intrinsic::x86_avx2_gather_q_pd:
2000 case Intrinsic::x86_avx2_gather_q_pd_256:
2001 case Intrinsic::x86_avx2_gather_d_ps:
2002 case Intrinsic::x86_avx2_gather_d_ps_256:
2003 case Intrinsic::x86_avx2_gather_q_ps:
2004 case Intrinsic::x86_avx2_gather_q_ps_256:
2005 case Intrinsic::x86_avx2_gather_d_q:
2006 case Intrinsic::x86_avx2_gather_d_q_256:
2007 case Intrinsic::x86_avx2_gather_q_q:
2008 case Intrinsic::x86_avx2_gather_q_q_256:
2009 case Intrinsic::x86_avx2_gather_d_d:
2010 case Intrinsic::x86_avx2_gather_d_d_256:
2011 case Intrinsic::x86_avx2_gather_q_d:
2012 case Intrinsic::x86_avx2_gather_q_d_256: {
2015 default: llvm_unreachable("Impossible intrinsic");
2016 case Intrinsic::x86_avx2_gather_d_pd: Opc = X86::VGATHERDPDrm; break;
2017 case Intrinsic::x86_avx2_gather_d_pd_256: Opc = X86::VGATHERDPDYrm; break;
2018 case Intrinsic::x86_avx2_gather_q_pd: Opc = X86::VGATHERQPDrm; break;
2019 case Intrinsic::x86_avx2_gather_q_pd_256: Opc = X86::VGATHERQPDYrm; break;
2020 case Intrinsic::x86_avx2_gather_d_ps: Opc = X86::VGATHERDPSrm; break;
2021 case Intrinsic::x86_avx2_gather_d_ps_256: Opc = X86::VGATHERDPSYrm; break;
2022 case Intrinsic::x86_avx2_gather_q_ps: Opc = X86::VGATHERQPSrm; break;
2023 case Intrinsic::x86_avx2_gather_q_ps_256: Opc = X86::VGATHERQPSYrm; break;
2024 case Intrinsic::x86_avx2_gather_d_q: Opc = X86::VPGATHERDQrm; break;
2025 case Intrinsic::x86_avx2_gather_d_q_256: Opc = X86::VPGATHERDQYrm; break;
2026 case Intrinsic::x86_avx2_gather_q_q: Opc = X86::VPGATHERQQrm; break;
2027 case Intrinsic::x86_avx2_gather_q_q_256: Opc = X86::VPGATHERQQYrm; break;
2028 case Intrinsic::x86_avx2_gather_d_d: Opc = X86::VPGATHERDDrm; break;
2029 case Intrinsic::x86_avx2_gather_d_d_256: Opc = X86::VPGATHERDDYrm; break;
2030 case Intrinsic::x86_avx2_gather_q_d: Opc = X86::VPGATHERQDrm; break;
2031 case Intrinsic::x86_avx2_gather_q_d_256: Opc = X86::VPGATHERQDYrm; break;
2033 SDNode *RetVal = SelectGather(Node, Opc);
2035 // We already called ReplaceUses inside SelectGather.
2042 case X86ISD::GlobalBaseReg:
2043 return getGlobalBaseReg();
2046 case X86ISD::ATOMOR64_DAG:
2047 case X86ISD::ATOMXOR64_DAG:
2048 case X86ISD::ATOMADD64_DAG:
2049 case X86ISD::ATOMSUB64_DAG:
2050 case X86ISD::ATOMNAND64_DAG:
2051 case X86ISD::ATOMAND64_DAG:
2052 case X86ISD::ATOMMAX64_DAG:
2053 case X86ISD::ATOMMIN64_DAG:
2054 case X86ISD::ATOMUMAX64_DAG:
2055 case X86ISD::ATOMUMIN64_DAG:
2056 case X86ISD::ATOMSWAP64_DAG: {
2059 default: llvm_unreachable("Impossible opcode");
2060 case X86ISD::ATOMOR64_DAG: Opc = X86::ATOMOR6432; break;
2061 case X86ISD::ATOMXOR64_DAG: Opc = X86::ATOMXOR6432; break;
2062 case X86ISD::ATOMADD64_DAG: Opc = X86::ATOMADD6432; break;
2063 case X86ISD::ATOMSUB64_DAG: Opc = X86::ATOMSUB6432; break;
2064 case X86ISD::ATOMNAND64_DAG: Opc = X86::ATOMNAND6432; break;
2065 case X86ISD::ATOMAND64_DAG: Opc = X86::ATOMAND6432; break;
2066 case X86ISD::ATOMMAX64_DAG: Opc = X86::ATOMMAX6432; break;
2067 case X86ISD::ATOMMIN64_DAG: Opc = X86::ATOMMIN6432; break;
2068 case X86ISD::ATOMUMAX64_DAG: Opc = X86::ATOMUMAX6432; break;
2069 case X86ISD::ATOMUMIN64_DAG: Opc = X86::ATOMUMIN6432; break;
2070 case X86ISD::ATOMSWAP64_DAG: Opc = X86::ATOMSWAP6432; break;
2072 SDNode *RetVal = SelectAtomic64(Node, Opc);
2078 case ISD::ATOMIC_LOAD_XOR:
2079 case ISD::ATOMIC_LOAD_AND:
2080 case ISD::ATOMIC_LOAD_OR:
2081 case ISD::ATOMIC_LOAD_ADD: {
2082 SDNode *RetVal = SelectAtomicLoadArith(Node, NVT);
2090 // For operations of the form (x << C1) op C2, check if we can use a smaller
2091 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
2092 SDValue N0 = Node->getOperand(0);
2093 SDValue N1 = Node->getOperand(1);
2095 if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse())
2098 // i8 is unshrinkable, i16 should be promoted to i32.
2099 if (NVT != MVT::i32 && NVT != MVT::i64)
2102 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
2103 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
2104 if (!Cst || !ShlCst)
2107 int64_t Val = Cst->getSExtValue();
2108 uint64_t ShlVal = ShlCst->getZExtValue();
2110 // Make sure that we don't change the operation by removing bits.
2111 // This only matters for OR and XOR, AND is unaffected.
2112 uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1;
2113 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
2119 // Check the minimum bitwidth for the new constant.
2120 // TODO: AND32ri is the same as AND64ri32 with zext imm.
2121 // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr
2122 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
2123 if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal))
2125 else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal))
2128 // Bail if there is no smaller encoding.
2132 switch (NVT.getSimpleVT().SimpleTy) {
2133 default: llvm_unreachable("Unsupported VT!");
2135 assert(CstVT == MVT::i8);
2136 ShlOp = X86::SHL32ri;
2139 default: llvm_unreachable("Impossible opcode");
2140 case ISD::AND: Op = X86::AND32ri8; break;
2141 case ISD::OR: Op = X86::OR32ri8; break;
2142 case ISD::XOR: Op = X86::XOR32ri8; break;
2146 assert(CstVT == MVT::i8 || CstVT == MVT::i32);
2147 ShlOp = X86::SHL64ri;
2150 default: llvm_unreachable("Impossible opcode");
2151 case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break;
2152 case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break;
2153 case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break;
2158 // Emit the smaller op and the shift.
2159 SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT);
2160 SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
2161 return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
2164 case X86ISD::UMUL: {
2165 SDValue N0 = Node->getOperand(0);
2166 SDValue N1 = Node->getOperand(1);
2169 switch (NVT.getSimpleVT().SimpleTy) {
2170 default: llvm_unreachable("Unsupported VT!");
2171 case MVT::i8: LoReg = X86::AL; Opc = X86::MUL8r; break;
2172 case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break;
2173 case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
2174 case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
2177 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
2178 N0, SDValue()).getValue(1);
2180 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
2181 SDValue Ops[] = {N1, InFlag};
2182 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2);
2184 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
2185 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
2186 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2));
2190 case ISD::SMUL_LOHI:
2191 case ISD::UMUL_LOHI: {
2192 SDValue N0 = Node->getOperand(0);
2193 SDValue N1 = Node->getOperand(1);
2195 bool isSigned = Opcode == ISD::SMUL_LOHI;
2196 bool hasBMI2 = Subtarget->hasBMI2();
2198 switch (NVT.getSimpleVT().SimpleTy) {
2199 default: llvm_unreachable("Unsupported VT!");
2200 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
2201 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
2202 case MVT::i32: Opc = hasBMI2 ? X86::MULX32rr : X86::MUL32r;
2203 MOpc = hasBMI2 ? X86::MULX32rm : X86::MUL32m; break;
2204 case MVT::i64: Opc = hasBMI2 ? X86::MULX64rr : X86::MUL64r;
2205 MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break;
2208 switch (NVT.getSimpleVT().SimpleTy) {
2209 default: llvm_unreachable("Unsupported VT!");
2210 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
2211 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
2212 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
2213 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
2217 unsigned SrcReg, LoReg, HiReg;
2219 default: llvm_unreachable("Unknown MUL opcode!");
2222 SrcReg = LoReg = X86::AL; HiReg = X86::AH;
2226 SrcReg = LoReg = X86::AX; HiReg = X86::DX;
2230 SrcReg = LoReg = X86::EAX; HiReg = X86::EDX;
2234 SrcReg = LoReg = X86::RAX; HiReg = X86::RDX;
2237 SrcReg = X86::EDX; LoReg = HiReg = 0;
2240 SrcReg = X86::RDX; LoReg = HiReg = 0;
2244 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2245 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2246 // Multiply is commmutative.
2248 foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2253 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg,
2254 N0, SDValue()).getValue(1);
2255 SDValue ResHi, ResLo;
2259 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2261 if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) {
2262 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue);
2263 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops,
2264 array_lengthof(Ops));
2265 ResHi = SDValue(CNode, 0);
2266 ResLo = SDValue(CNode, 1);
2267 Chain = SDValue(CNode, 2);
2268 InFlag = SDValue(CNode, 3);
2270 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
2271 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops,
2272 array_lengthof(Ops));
2273 Chain = SDValue(CNode, 0);
2274 InFlag = SDValue(CNode, 1);
2277 // Update the chain.
2278 ReplaceUses(N1.getValue(1), Chain);
2280 SDValue Ops[] = { N1, InFlag };
2281 if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) {
2282 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue);
2283 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops,
2284 array_lengthof(Ops));
2285 ResHi = SDValue(CNode, 0);
2286 ResLo = SDValue(CNode, 1);
2287 InFlag = SDValue(CNode, 2);
2289 SDVTList VTs = CurDAG->getVTList(MVT::Glue);
2290 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops,
2291 array_lengthof(Ops));
2292 InFlag = SDValue(CNode, 0);
2296 // Prevent use of AH in a REX instruction by referencing AX instead.
2297 if (HiReg == X86::AH && Subtarget->is64Bit() &&
2298 !SDValue(Node, 1).use_empty()) {
2299 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2300 X86::AX, MVT::i16, InFlag);
2301 InFlag = Result.getValue(2);
2302 // Get the low part if needed. Don't use getCopyFromReg for aliasing
2304 if (!SDValue(Node, 0).use_empty())
2305 ReplaceUses(SDValue(Node, 1),
2306 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2308 // Shift AX down 8 bits.
2309 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2311 CurDAG->getTargetConstant(8, MVT::i8)), 0);
2312 // Then truncate it down to i8.
2313 ReplaceUses(SDValue(Node, 1),
2314 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2316 // Copy the low half of the result, if it is needed.
2317 if (!SDValue(Node, 0).use_empty()) {
2318 if (ResLo.getNode() == 0) {
2319 assert(LoReg && "Register for low half is not defined!");
2320 ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT,
2322 InFlag = ResLo.getValue(2);
2324 ReplaceUses(SDValue(Node, 0), ResLo);
2325 DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); dbgs() << '\n');
2327 // Copy the high half of the result, if it is needed.
2328 if (!SDValue(Node, 1).use_empty()) {
2329 if (ResHi.getNode() == 0) {
2330 assert(HiReg && "Register for high half is not defined!");
2331 ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT,
2333 InFlag = ResHi.getValue(2);
2335 ReplaceUses(SDValue(Node, 1), ResHi);
2336 DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n');
2343 case ISD::UDIVREM: {
2344 SDValue N0 = Node->getOperand(0);
2345 SDValue N1 = Node->getOperand(1);
2347 bool isSigned = Opcode == ISD::SDIVREM;
2349 switch (NVT.getSimpleVT().SimpleTy) {
2350 default: llvm_unreachable("Unsupported VT!");
2351 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
2352 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
2353 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
2354 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
2357 switch (NVT.getSimpleVT().SimpleTy) {
2358 default: llvm_unreachable("Unsupported VT!");
2359 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
2360 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
2361 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
2362 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
2366 unsigned LoReg, HiReg, ClrReg;
2367 unsigned ClrOpcode, SExtOpcode;
2368 switch (NVT.getSimpleVT().SimpleTy) {
2369 default: llvm_unreachable("Unsupported VT!");
2371 LoReg = X86::AL; ClrReg = HiReg = X86::AH;
2373 SExtOpcode = X86::CBW;
2376 LoReg = X86::AX; HiReg = X86::DX;
2377 ClrOpcode = X86::MOV16r0; ClrReg = X86::DX;
2378 SExtOpcode = X86::CWD;
2381 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
2382 ClrOpcode = X86::MOV32r0;
2383 SExtOpcode = X86::CDQ;
2386 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
2387 ClrOpcode = X86::MOV64r0;
2388 SExtOpcode = X86::CQO;
2392 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2393 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2394 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
2397 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
2398 // Special case for div8, just use a move with zero extension to AX to
2399 // clear the upper 8 bits (AH).
2400 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
2401 if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
2402 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
2404 SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32,
2406 array_lengthof(Ops)), 0);
2407 Chain = Move.getValue(1);
2408 ReplaceUses(N0.getValue(1), Chain);
2411 SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0);
2412 Chain = CurDAG->getEntryNode();
2414 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue());
2415 InFlag = Chain.getValue(1);
2418 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
2419 LoReg, N0, SDValue()).getValue(1);
2420 if (isSigned && !signBitIsZero) {
2421 // Sign extend the low part into the high part.
2423 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
2425 // Zero out the high part, effectively zero extending the input.
2427 SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0);
2428 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
2429 ClrNode, InFlag).getValue(1);
2434 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2437 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops,
2438 array_lengthof(Ops));
2439 InFlag = SDValue(CNode, 1);
2440 // Update the chain.
2441 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
2444 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
2447 // Prevent use of AH in a REX instruction by referencing AX instead.
2448 // Shift it down 8 bits.
2449 if (HiReg == X86::AH && Subtarget->is64Bit() &&
2450 !SDValue(Node, 1).use_empty()) {
2451 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2452 X86::AX, MVT::i16, InFlag);
2453 InFlag = Result.getValue(2);
2455 // If we also need AL (the quotient), get it by extracting a subreg from
2456 // Result. The fast register allocator does not like multiple CopyFromReg
2457 // nodes using aliasing registers.
2458 if (!SDValue(Node, 0).use_empty())
2459 ReplaceUses(SDValue(Node, 0),
2460 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2462 // Shift AX right by 8 bits instead of using AH.
2463 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2465 CurDAG->getTargetConstant(8, MVT::i8)),
2467 ReplaceUses(SDValue(Node, 1),
2468 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2470 // Copy the division (low) result, if it is needed.
2471 if (!SDValue(Node, 0).use_empty()) {
2472 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2473 LoReg, NVT, InFlag);
2474 InFlag = Result.getValue(2);
2475 ReplaceUses(SDValue(Node, 0), Result);
2476 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2478 // Copy the remainder (high) result, if it is needed.
2479 if (!SDValue(Node, 1).use_empty()) {
2480 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2481 HiReg, NVT, InFlag);
2482 InFlag = Result.getValue(2);
2483 ReplaceUses(SDValue(Node, 1), Result);
2484 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2491 // Sometimes a SUB is used to perform comparison.
2492 if (Opcode == X86ISD::SUB && Node->hasAnyUseOfValue(0))
2493 // This node is not a CMP.
2495 SDValue N0 = Node->getOperand(0);
2496 SDValue N1 = Node->getOperand(1);
2498 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
2499 // use a smaller encoding.
2500 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
2501 HasNoSignedComparisonUses(Node))
2502 // Look past the truncate if CMP is the only use of it.
2503 N0 = N0.getOperand(0);
2504 if ((N0.getNode()->getOpcode() == ISD::AND ||
2505 (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) &&
2506 N0.getNode()->hasOneUse() &&
2507 N0.getValueType() != MVT::i8 &&
2508 X86::isZeroNode(N1)) {
2509 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1));
2512 // For example, convert "testl %eax, $8" to "testb %al, $8"
2513 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 &&
2514 (!(C->getZExtValue() & 0x80) ||
2515 HasNoSignedComparisonUses(Node))) {
2516 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8);
2517 SDValue Reg = N0.getNode()->getOperand(0);
2519 // On x86-32, only the ABCD registers have 8-bit subregisters.
2520 if (!Subtarget->is64Bit()) {
2521 const TargetRegisterClass *TRC;
2522 switch (N0.getValueType().getSimpleVT().SimpleTy) {
2523 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2524 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2525 default: llvm_unreachable("Unsupported TEST operand type!");
2527 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
2528 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2529 Reg.getValueType(), Reg, RC), 0);
2532 // Extract the l-register.
2533 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
2537 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32,
2539 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2540 // one, do not call ReplaceAllUsesWith.
2541 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2542 SDValue(NewNode, 0));
2546 // For example, "testl %eax, $2048" to "testb %ah, $8".
2547 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 &&
2548 (!(C->getZExtValue() & 0x8000) ||
2549 HasNoSignedComparisonUses(Node))) {
2550 // Shift the immediate right by 8 bits.
2551 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8,
2553 SDValue Reg = N0.getNode()->getOperand(0);
2555 // Put the value in an ABCD register.
2556 const TargetRegisterClass *TRC;
2557 switch (N0.getValueType().getSimpleVT().SimpleTy) {
2558 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break;
2559 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2560 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2561 default: llvm_unreachable("Unsupported TEST operand type!");
2563 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
2564 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2565 Reg.getValueType(), Reg, RC), 0);
2567 // Extract the h-register.
2568 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl,
2571 // Emit a testb. The EXTRACT_SUBREG becomes a COPY that can only
2572 // target GR8_NOREX registers, so make sure the register class is
2574 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl,
2575 MVT::i32, Subreg, ShiftedImm);
2576 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2577 // one, do not call ReplaceAllUsesWith.
2578 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2579 SDValue(NewNode, 0));
2583 // For example, "testl %eax, $32776" to "testw %ax, $32776".
2584 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 &&
2585 N0.getValueType() != MVT::i16 &&
2586 (!(C->getZExtValue() & 0x8000) ||
2587 HasNoSignedComparisonUses(Node))) {
2588 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16);
2589 SDValue Reg = N0.getNode()->getOperand(0);
2591 // Extract the 16-bit subregister.
2592 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl,
2596 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32,
2598 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2599 // one, do not call ReplaceAllUsesWith.
2600 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2601 SDValue(NewNode, 0));
2605 // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
2606 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 &&
2607 N0.getValueType() == MVT::i64 &&
2608 (!(C->getZExtValue() & 0x80000000) ||
2609 HasNoSignedComparisonUses(Node))) {
2610 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
2611 SDValue Reg = N0.getNode()->getOperand(0);
2613 // Extract the 32-bit subregister.
2614 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl,
2618 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32,
2620 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2621 // one, do not call ReplaceAllUsesWith.
2622 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2623 SDValue(NewNode, 0));
2630 // Change a chain of {load; incr or dec; store} of the same value into
2631 // a simple increment or decrement through memory of that value, if the
2632 // uses of the modified value and its address are suitable.
2633 // The DEC64m tablegen pattern is currently not able to match the case where
2634 // the EFLAGS on the original DEC are used. (This also applies to
2635 // {INC,DEC}X{64,32,16,8}.)
2636 // We'll need to improve tablegen to allow flags to be transferred from a
2637 // node in the pattern to the result node. probably with a new keyword
2638 // for example, we have this
2639 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2640 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2641 // (implicit EFLAGS)]>;
2642 // but maybe need something like this
2643 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2644 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2645 // (transferrable EFLAGS)]>;
2647 StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
2648 SDValue StoredVal = StoreNode->getOperand(1);
2649 unsigned Opc = StoredVal->getOpcode();
2651 LoadSDNode *LoadNode = 0;
2653 if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG,
2654 LoadNode, InputChain))
2657 SDValue Base, Scale, Index, Disp, Segment;
2658 if (!SelectAddr(LoadNode, LoadNode->getBasePtr(),
2659 Base, Scale, Index, Disp, Segment))
2662 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2);
2663 MemOp[0] = StoreNode->getMemOperand();
2664 MemOp[1] = LoadNode->getMemOperand();
2665 const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain };
2666 EVT LdVT = LoadNode->getMemoryVT();
2667 unsigned newOpc = getFusedLdStOpcode(LdVT, Opc);
2668 MachineSDNode *Result = CurDAG->getMachineNode(newOpc,
2669 Node->getDebugLoc(),
2670 MVT::i32, MVT::Other, Ops,
2671 array_lengthof(Ops));
2672 Result->setMemRefs(MemOp, MemOp + 2);
2674 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
2675 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
2680 // FIXME: Custom handling because TableGen doesn't support multiple implicit
2681 // defs in an instruction pattern
2682 case X86ISD::PCMPESTRI: {
2683 SDValue N0 = Node->getOperand(0);
2684 SDValue N1 = Node->getOperand(1);
2685 SDValue N2 = Node->getOperand(2);
2686 SDValue N3 = Node->getOperand(3);
2687 SDValue N4 = Node->getOperand(4);
2689 // Make sure last argument is a constant
2690 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N4);
2694 uint64_t Imm = Cst->getZExtValue();
2696 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
2697 X86::EAX, N1, SDValue()).getValue(1);
2698 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EDX,
2699 N3, InFlag).getValue(1);
2701 SDValue Ops[] = { N0, N2, getI8Imm(Imm), InFlag };
2702 unsigned Opc = Subtarget->hasAVX() ? X86::VPCMPESTRIrr :
2704 InFlag = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Ops,
2705 array_lengthof(Ops)), 0);
2707 if (!SDValue(Node, 0).use_empty()) {
2708 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2709 X86::ECX, NVT, InFlag);
2710 InFlag = Result.getValue(2);
2711 ReplaceUses(SDValue(Node, 0), Result);
2713 if (!SDValue(Node, 1).use_empty()) {
2714 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2715 X86::EFLAGS, NVT, InFlag);
2716 InFlag = Result.getValue(2);
2717 ReplaceUses(SDValue(Node, 1), Result);
2723 // FIXME: Custom handling because TableGen doesn't support multiple implicit
2724 // defs in an instruction pattern
2725 case X86ISD::PCMPISTRI: {
2726 SDValue N0 = Node->getOperand(0);
2727 SDValue N1 = Node->getOperand(1);
2728 SDValue N2 = Node->getOperand(2);
2730 // Make sure last argument is a constant
2731 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N2);
2735 uint64_t Imm = Cst->getZExtValue();
2737 SDValue Ops[] = { N0, N1, getI8Imm(Imm) };
2738 unsigned Opc = Subtarget->hasAVX() ? X86::VPCMPISTRIrr :
2740 SDValue InFlag = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Ops,
2741 array_lengthof(Ops)), 0);
2743 if (!SDValue(Node, 0).use_empty()) {
2744 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2745 X86::ECX, NVT, InFlag);
2746 InFlag = Result.getValue(2);
2747 ReplaceUses(SDValue(Node, 0), Result);
2749 if (!SDValue(Node, 1).use_empty()) {
2750 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2751 X86::EFLAGS, NVT, InFlag);
2752 InFlag = Result.getValue(2);
2753 ReplaceUses(SDValue(Node, 1), Result);
2760 SDNode *ResNode = SelectCode(Node);
2762 DEBUG(dbgs() << "=> ";
2763 if (ResNode == NULL || ResNode == Node)
2766 ResNode->dump(CurDAG);
2772 bool X86DAGToDAGISel::
2773 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
2774 std::vector<SDValue> &OutOps) {
2775 SDValue Op0, Op1, Op2, Op3, Op4;
2776 switch (ConstraintCode) {
2777 case 'o': // offsetable ??
2778 case 'v': // not offsetable ??
2779 default: return true;
2781 if (!SelectAddr(0, Op, Op0, Op1, Op2, Op3, Op4))
2786 OutOps.push_back(Op0);
2787 OutOps.push_back(Op1);
2788 OutOps.push_back(Op2);
2789 OutOps.push_back(Op3);
2790 OutOps.push_back(Op4);
2794 /// createX86ISelDag - This pass converts a legalized DAG into a
2795 /// X86-specific DAG, ready for instruction scheduling.
2797 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
2798 CodeGenOpt::Level OptLevel) {
2799 return new X86DAGToDAGISel(TM, OptLevel);