1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "x86-isel"
17 #include "X86InstrBuilder.h"
18 #include "X86MachineFunctionInfo.h"
19 #include "X86RegisterInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/Instructions.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/Support/CFG.h"
25 #include "llvm/Type.h"
26 #include "llvm/CodeGen/FunctionLoweringInfo.h"
27 #include "llvm/CodeGen/MachineConstantPool.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineRegisterInfo.h"
32 #include "llvm/CodeGen/SelectionDAGISel.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/SmallPtrSet.h"
40 #include "llvm/ADT/Statistic.h"
43 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
45 //===----------------------------------------------------------------------===//
46 // Pattern Matcher Implementation
47 //===----------------------------------------------------------------------===//
50 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
51 /// SDValue's instead of register numbers for the leaves of the matched
53 struct X86ISelAddressMode {
59 // This is really a union, discriminated by BaseType!
67 const GlobalValue *GV;
69 const BlockAddress *BlockAddr;
72 unsigned Align; // CP alignment.
73 unsigned char SymbolFlags; // X86II::MO_*
76 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
77 Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0),
78 SymbolFlags(X86II::MO_NO_FLAG) {
81 bool hasSymbolicDisplacement() const {
82 return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0;
85 bool hasBaseOrIndexReg() const {
86 return IndexReg.getNode() != 0 || Base_Reg.getNode() != 0;
89 /// isRIPRelative - Return true if this addressing mode is already RIP
91 bool isRIPRelative() const {
92 if (BaseType != RegBase) return false;
93 if (RegisterSDNode *RegNode =
94 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
95 return RegNode->getReg() == X86::RIP;
99 void setBaseReg(SDValue Reg) {
105 dbgs() << "X86ISelAddressMode " << this << '\n';
106 dbgs() << "Base_Reg ";
107 if (Base_Reg.getNode() != 0)
108 Base_Reg.getNode()->dump();
111 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'
112 << " Scale" << Scale << '\n'
114 if (IndexReg.getNode() != 0)
115 IndexReg.getNode()->dump();
118 dbgs() << " Disp " << Disp << '\n'
135 dbgs() << " JT" << JT << " Align" << Align << '\n';
141 //===--------------------------------------------------------------------===//
142 /// ISel - X86 specific code to select X86 machine instructions for
143 /// SelectionDAG operations.
145 class X86DAGToDAGISel : public SelectionDAGISel {
146 /// X86Lowering - This object fully describes how to lower LLVM code to an
147 /// X86-specific SelectionDAG.
148 const X86TargetLowering &X86Lowering;
150 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
151 /// make the right decision when generating code for different targets.
152 const X86Subtarget *Subtarget;
154 /// OptForSize - If true, selector should try to optimize for code size
155 /// instead of performance.
159 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
160 : SelectionDAGISel(tm, OptLevel),
161 X86Lowering(*tm.getTargetLowering()),
162 Subtarget(&tm.getSubtarget<X86Subtarget>()),
165 virtual const char *getPassName() const {
166 return "X86 DAG->DAG Instruction Selection";
169 virtual void EmitFunctionEntryCode();
171 virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
173 virtual void PreprocessISelDAG();
175 inline bool immSext8(SDNode *N) const {
176 return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue());
179 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
180 // sign extended field.
181 inline bool i64immSExt32(SDNode *N) const {
182 uint64_t v = cast<ConstantSDNode>(N)->getZExtValue();
183 return (int64_t)v == (int32_t)v;
186 // Include the pieces autogenerated from the target description.
187 #include "X86GenDAGISel.inc"
190 SDNode *Select(SDNode *N);
191 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
192 SDNode *SelectAtomicLoadAdd(SDNode *Node, EVT NVT);
193 SDNode *SelectAtomicLoadArith(SDNode *Node, EVT NVT);
195 bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
196 bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
197 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
198 bool MatchAddress(SDValue N, X86ISelAddressMode &AM);
199 bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
201 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
202 bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
203 SDValue &Scale, SDValue &Index, SDValue &Disp,
205 bool SelectLEAAddr(SDValue N, SDValue &Base,
206 SDValue &Scale, SDValue &Index, SDValue &Disp,
208 bool SelectTLSADDRAddr(SDValue N, SDValue &Base,
209 SDValue &Scale, SDValue &Index, SDValue &Disp,
211 bool SelectScalarSSELoad(SDNode *Root, SDValue N,
212 SDValue &Base, SDValue &Scale,
213 SDValue &Index, SDValue &Disp,
215 SDValue &NodeWithChain);
217 bool TryFoldLoad(SDNode *P, SDValue N,
218 SDValue &Base, SDValue &Scale,
219 SDValue &Index, SDValue &Disp,
222 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
223 /// inline asm expressions.
224 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
226 std::vector<SDValue> &OutOps);
228 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
230 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
231 SDValue &Scale, SDValue &Index,
232 SDValue &Disp, SDValue &Segment) {
233 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
234 CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI.getPointerTy()) :
236 Scale = getI8Imm(AM.Scale);
238 // These are 32-bit even in 64-bit mode since RIP relative offset
241 Disp = CurDAG->getTargetGlobalAddress(AM.GV, DebugLoc(),
245 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
246 AM.Align, AM.Disp, AM.SymbolFlags);
248 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
249 else if (AM.JT != -1)
250 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
251 else if (AM.BlockAddr)
252 Disp = CurDAG->getBlockAddress(AM.BlockAddr, MVT::i32,
253 true, AM.SymbolFlags);
255 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
257 if (AM.Segment.getNode())
258 Segment = AM.Segment;
260 Segment = CurDAG->getRegister(0, MVT::i32);
263 /// getI8Imm - Return a target constant with the specified value, of type
265 inline SDValue getI8Imm(unsigned Imm) {
266 return CurDAG->getTargetConstant(Imm, MVT::i8);
269 /// getI32Imm - Return a target constant with the specified value, of type
271 inline SDValue getI32Imm(unsigned Imm) {
272 return CurDAG->getTargetConstant(Imm, MVT::i32);
275 /// getGlobalBaseReg - Return an SDNode that returns the value of
276 /// the global base register. Output instructions required to
277 /// initialize the global base register, if necessary.
279 SDNode *getGlobalBaseReg();
281 /// getTargetMachine - Return a reference to the TargetMachine, casted
282 /// to the target-specific type.
283 const X86TargetMachine &getTargetMachine() {
284 return static_cast<const X86TargetMachine &>(TM);
287 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
288 /// to the target-specific type.
289 const X86InstrInfo *getInstrInfo() {
290 return getTargetMachine().getInstrInfo();
297 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
298 if (OptLevel == CodeGenOpt::None) return false;
303 if (N.getOpcode() != ISD::LOAD)
306 // If N is a load, do additional profitability checks.
308 switch (U->getOpcode()) {
321 SDValue Op1 = U->getOperand(1);
323 // If the other operand is a 8-bit immediate we should fold the immediate
324 // instead. This reduces code size.
326 // movl 4(%esp), %eax
330 // addl 4(%esp), %eax
331 // The former is 2 bytes shorter. In case where the increment is 1, then
332 // the saving can be 4 bytes (by using incl %eax).
333 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
334 if (Imm->getAPIntValue().isSignedIntN(8))
337 // If the other operand is a TLS address, we should fold it instead.
340 // leal i@NTPOFF(%eax), %eax
342 // movl $i@NTPOFF, %eax
344 // if the block also has an access to a second TLS address this will save
346 // FIXME: This is probably also true for non TLS addresses.
347 if (Op1.getOpcode() == X86ISD::Wrapper) {
348 SDValue Val = Op1.getOperand(0);
349 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
359 /// MoveBelowCallOrigChain - Replace the original chain operand of the call with
360 /// load's chain operand and move load below the call's chain operand.
361 static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
362 SDValue Call, SDValue OrigChain) {
363 SmallVector<SDValue, 8> Ops;
364 SDValue Chain = OrigChain.getOperand(0);
365 if (Chain.getNode() == Load.getNode())
366 Ops.push_back(Load.getOperand(0));
368 assert(Chain.getOpcode() == ISD::TokenFactor &&
369 "Unexpected chain operand");
370 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
371 if (Chain.getOperand(i).getNode() == Load.getNode())
372 Ops.push_back(Load.getOperand(0));
374 Ops.push_back(Chain.getOperand(i));
376 CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(),
377 MVT::Other, &Ops[0], Ops.size());
379 Ops.push_back(NewChain);
381 for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i)
382 Ops.push_back(OrigChain.getOperand(i));
383 CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size());
384 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
385 Load.getOperand(1), Load.getOperand(2));
387 Ops.push_back(SDValue(Load.getNode(), 1));
388 for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
389 Ops.push_back(Call.getOperand(i));
390 CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], Ops.size());
393 /// isCalleeLoad - Return true if call address is a load and it can be
394 /// moved below CALLSEQ_START and the chains leading up to the call.
395 /// Return the CALLSEQ_START by reference as a second output.
396 /// In the case of a tail call, there isn't a callseq node between the call
397 /// chain and the load.
398 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
399 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
401 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
404 LD->getAddressingMode() != ISD::UNINDEXED ||
405 LD->getExtensionType() != ISD::NON_EXTLOAD)
408 // Now let's find the callseq_start.
409 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
410 if (!Chain.hasOneUse())
412 Chain = Chain.getOperand(0);
415 if (!Chain.getNumOperands())
417 if (Chain.getOperand(0).getNode() == Callee.getNode())
419 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
420 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
421 Callee.getValue(1).hasOneUse())
426 void X86DAGToDAGISel::PreprocessISelDAG() {
427 // OptForSize is used in pattern predicates that isel is matching.
428 OptForSize = MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize);
430 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
431 E = CurDAG->allnodes_end(); I != E; ) {
432 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
434 if (OptLevel != CodeGenOpt::None &&
435 (N->getOpcode() == X86ISD::CALL ||
436 N->getOpcode() == X86ISD::TC_RETURN)) {
437 /// Also try moving call address load from outside callseq_start to just
438 /// before the call to allow it to be folded.
456 bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
457 SDValue Chain = N->getOperand(0);
458 SDValue Load = N->getOperand(1);
459 if (!isCalleeLoad(Load, Chain, HasCallSeq))
461 MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
466 // Lower fpround and fpextend nodes that target the FP stack to be store and
467 // load to the stack. This is a gross hack. We would like to simply mark
468 // these as being illegal, but when we do that, legalize produces these when
469 // it expands calls, then expands these in the same legalize pass. We would
470 // like dag combine to be able to hack on these between the call expansion
471 // and the node legalization. As such this pass basically does "really
472 // late" legalization of these inline with the X86 isel pass.
473 // FIXME: This should only happen when not compiled with -O0.
474 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
477 EVT SrcVT = N->getOperand(0).getValueType();
478 EVT DstVT = N->getValueType(0);
480 // If any of the sources are vectors, no fp stack involved.
481 if (SrcVT.isVector() || DstVT.isVector())
484 // If the source and destination are SSE registers, then this is a legal
485 // conversion that should not be lowered.
486 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
487 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
488 if (SrcIsSSE && DstIsSSE)
491 if (!SrcIsSSE && !DstIsSSE) {
492 // If this is an FPStack extension, it is a noop.
493 if (N->getOpcode() == ISD::FP_EXTEND)
495 // If this is a value-preserving FPStack truncation, it is a noop.
496 if (N->getConstantOperandVal(1))
500 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
501 // FPStack has extload and truncstore. SSE can fold direct loads into other
502 // operations. Based on this, decide what we want to do.
504 if (N->getOpcode() == ISD::FP_ROUND)
505 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
507 MemVT = SrcIsSSE ? SrcVT : DstVT;
509 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
510 DebugLoc dl = N->getDebugLoc();
512 // FIXME: optimize the case where the src/dest is a load or store?
513 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
515 MemTmp, MachinePointerInfo(), MemVT,
517 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
518 MachinePointerInfo(),
519 MemVT, false, false, 0);
521 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
522 // extload we created. This will cause general havok on the dag because
523 // anything below the conversion could be folded into other existing nodes.
524 // To avoid invalidating 'I', back it up to the convert node.
526 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
528 // Now that we did that, the node is dead. Increment the iterator to the
529 // next node to process, then delete N.
531 CurDAG->DeleteNode(N);
536 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
537 /// the main function.
538 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
539 MachineFrameInfo *MFI) {
540 const TargetInstrInfo *TII = TM.getInstrInfo();
541 if (Subtarget->isTargetCygMing()) {
543 Subtarget->is64Bit() ? X86::WINCALL64pcrel32 : X86::CALLpcrel32;
544 BuildMI(BB, DebugLoc(),
545 TII->get(CallOp)).addExternalSymbol("__main");
549 void X86DAGToDAGISel::EmitFunctionEntryCode() {
550 // If this is main, emit special code for main.
551 if (const Function *Fn = MF->getFunction())
552 if (Fn->hasExternalLinkage() && Fn->getName() == "main")
553 EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo());
556 static bool isDispSafeForFrameIndex(int64_t Val) {
557 // On 64-bit platforms, we can run into an issue where a frame index
558 // includes a displacement that, when added to the explicit displacement,
559 // will overflow the displacement field. Assuming that the frame index
560 // displacement fits into a 31-bit integer (which is only slightly more
561 // aggressive than the current fundamental assumption that it fits into
562 // a 32-bit integer), a 31-bit disp should always be safe.
563 return isInt<31>(Val);
566 bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset,
567 X86ISelAddressMode &AM) {
568 int64_t Val = AM.Disp + Offset;
569 CodeModel::Model M = TM.getCodeModel();
570 if (Subtarget->is64Bit()) {
571 if (!X86::isOffsetSuitableForCodeModel(Val, M,
572 AM.hasSymbolicDisplacement()))
574 // In addition to the checks required for a register base, check that
575 // we do not try to use an unsafe Disp with a frame index.
576 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
577 !isDispSafeForFrameIndex(Val))
585 bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
586 SDValue Address = N->getOperand(1);
588 // load gs:0 -> GS segment register.
589 // load fs:0 -> FS segment register.
591 // This optimization is valid because the GNU TLS model defines that
592 // gs:0 (or fs:0 on X86-64) contains its own address.
593 // For more information see http://people.redhat.com/drepper/tls.pdf
594 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
595 if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 &&
596 Subtarget->isTargetELF())
597 switch (N->getPointerInfo().getAddrSpace()) {
599 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
602 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
609 /// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
610 /// into an addressing mode. These wrap things that will resolve down into a
611 /// symbol reference. If no match is possible, this returns true, otherwise it
613 bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
614 // If the addressing mode already has a symbol as the displacement, we can
615 // never match another symbol.
616 if (AM.hasSymbolicDisplacement())
619 SDValue N0 = N.getOperand(0);
620 CodeModel::Model M = TM.getCodeModel();
622 // Handle X86-64 rip-relative addresses. We check this before checking direct
623 // folding because RIP is preferable to non-RIP accesses.
624 if (Subtarget->is64Bit() &&
625 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
626 // they cannot be folded into immediate fields.
627 // FIXME: This can be improved for kernel and other models?
628 (M == CodeModel::Small || M == CodeModel::Kernel) &&
629 // Base and index reg must be 0 in order to use %rip as base and lowering
631 !AM.hasBaseOrIndexReg() && N.getOpcode() == X86ISD::WrapperRIP) {
632 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
633 X86ISelAddressMode Backup = AM;
634 AM.GV = G->getGlobal();
635 AM.SymbolFlags = G->getTargetFlags();
636 if (FoldOffsetIntoAddress(G->getOffset(), AM)) {
640 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
641 X86ISelAddressMode Backup = AM;
642 AM.CP = CP->getConstVal();
643 AM.Align = CP->getAlignment();
644 AM.SymbolFlags = CP->getTargetFlags();
645 if (FoldOffsetIntoAddress(CP->getOffset(), AM)) {
649 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
650 AM.ES = S->getSymbol();
651 AM.SymbolFlags = S->getTargetFlags();
652 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
653 AM.JT = J->getIndex();
654 AM.SymbolFlags = J->getTargetFlags();
656 AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
657 AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
660 if (N.getOpcode() == X86ISD::WrapperRIP)
661 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
665 // Handle the case when globals fit in our immediate field: This is true for
666 // X86-32 always and X86-64 when in -static -mcmodel=small mode. In 64-bit
667 // mode, this results in a non-RIP-relative computation.
668 if (!Subtarget->is64Bit() ||
669 ((M == CodeModel::Small || M == CodeModel::Kernel) &&
670 TM.getRelocationModel() == Reloc::Static)) {
671 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
672 AM.GV = G->getGlobal();
673 AM.Disp += G->getOffset();
674 AM.SymbolFlags = G->getTargetFlags();
675 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
676 AM.CP = CP->getConstVal();
677 AM.Align = CP->getAlignment();
678 AM.Disp += CP->getOffset();
679 AM.SymbolFlags = CP->getTargetFlags();
680 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
681 AM.ES = S->getSymbol();
682 AM.SymbolFlags = S->getTargetFlags();
683 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
684 AM.JT = J->getIndex();
685 AM.SymbolFlags = J->getTargetFlags();
687 AM.BlockAddr = cast<BlockAddressSDNode>(N0)->getBlockAddress();
688 AM.SymbolFlags = cast<BlockAddressSDNode>(N0)->getTargetFlags();
696 /// MatchAddress - Add the specified node to the specified addressing mode,
697 /// returning true if it cannot be done. This just pattern matches for the
699 bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
700 if (MatchAddressRecursively(N, AM, 0))
703 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
704 // a smaller encoding and avoids a scaled-index.
706 AM.BaseType == X86ISelAddressMode::RegBase &&
707 AM.Base_Reg.getNode() == 0) {
708 AM.Base_Reg = AM.IndexReg;
712 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
713 // because it has a smaller encoding.
714 // TODO: Which other code models can use this?
715 if (TM.getCodeModel() == CodeModel::Small &&
716 Subtarget->is64Bit() &&
718 AM.BaseType == X86ISelAddressMode::RegBase &&
719 AM.Base_Reg.getNode() == 0 &&
720 AM.IndexReg.getNode() == 0 &&
721 AM.SymbolFlags == X86II::MO_NO_FLAG &&
722 AM.hasSymbolicDisplacement())
723 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
728 // Transform "(X >> (8-C1)) & C2" to "(X >> 8) & 0xff)" if safe. This
729 // allows us to convert the shift and and into an h-register extract and
730 // a scaled index. Returns false if the simplification is performed.
731 static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
733 SDValue Shift, SDValue X,
734 X86ISelAddressMode &AM) {
735 if (Shift.getOpcode() != ISD::SRL ||
736 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
740 int ScaleLog = 8 - Shift.getConstantOperandVal(1);
741 if (ScaleLog <= 0 || ScaleLog >= 4 ||
742 Mask != (0xffu << ScaleLog))
745 EVT VT = N.getValueType();
746 DebugLoc DL = N.getDebugLoc();
747 SDValue Eight = DAG.getConstant(8, MVT::i8);
748 SDValue NewMask = DAG.getConstant(0xff, VT);
749 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
750 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
751 SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8);
752 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
754 // Insert the new nodes into the topological ordering.
755 if (Eight.getNode()->getNodeId() == -1 ||
756 Eight.getNode()->getNodeId() > X.getNode()->getNodeId()) {
757 DAG.RepositionNode(X.getNode(), Eight.getNode());
758 Eight.getNode()->setNodeId(X.getNode()->getNodeId());
760 if (NewMask.getNode()->getNodeId() == -1 ||
761 NewMask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
762 DAG.RepositionNode(X.getNode(), NewMask.getNode());
763 NewMask.getNode()->setNodeId(X.getNode()->getNodeId());
765 if (Srl.getNode()->getNodeId() == -1 ||
766 Srl.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
767 DAG.RepositionNode(Shift.getNode(), Srl.getNode());
768 Srl.getNode()->setNodeId(Shift.getNode()->getNodeId());
770 if (And.getNode()->getNodeId() == -1 ||
771 And.getNode()->getNodeId() > N.getNode()->getNodeId()) {
772 DAG.RepositionNode(N.getNode(), And.getNode());
773 And.getNode()->setNodeId(N.getNode()->getNodeId());
775 if (ShlCount.getNode()->getNodeId() == -1 ||
776 ShlCount.getNode()->getNodeId() > X.getNode()->getNodeId()) {
777 DAG.RepositionNode(X.getNode(), ShlCount.getNode());
778 ShlCount.getNode()->setNodeId(N.getNode()->getNodeId());
780 if (Shl.getNode()->getNodeId() == -1 ||
781 Shl.getNode()->getNodeId() > N.getNode()->getNodeId()) {
782 DAG.RepositionNode(N.getNode(), Shl.getNode());
783 Shl.getNode()->setNodeId(N.getNode()->getNodeId());
785 DAG.ReplaceAllUsesWith(N, Shl);
787 AM.Scale = (1 << ScaleLog);
791 // Implement some heroics to detect shifts of masked values where the mask can
792 // be replaced by extending the shift and undoing that in the addressing mode
793 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
794 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
795 // the addressing mode. This results in code such as:
797 // int f(short *y, int *lookup_table) {
799 // return *y + lookup_table[*y >> 11];
803 // movzwl (%rdi), %eax
806 // addl (%rsi,%rcx,4), %eax
809 // movzwl (%rdi), %eax
813 // addl (%rsi,%rcx), %eax
815 static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
816 X86ISelAddressMode &AM) {
817 // Scale must not be used already.
818 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) return true;
821 SDValue And = N.getOperand(0);
822 if (N.getOpcode() != ISD::SRL)
823 std::swap(Shift, And);
824 if (Shift.getOpcode() != ISD::SRL || And.getOpcode() != ISD::AND ||
825 !Shift.hasOneUse() ||
826 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
827 !isa<ConstantSDNode>(And.getOperand(1)))
829 SDValue X = (N == Shift ? And.getOperand(0) : Shift.getOperand(0));
831 // We only handle up to 64-bit values here as those are what matter for
832 // addressing mode optimizations.
833 if (X.getValueSizeInBits() > 64) return true;
835 uint64_t Mask = And.getConstantOperandVal(1);
836 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
837 unsigned MaskLZ = CountLeadingZeros_64(Mask);
838 unsigned MaskTZ = CountTrailingZeros_64(Mask);
840 // The amount of shift we're trying to fit into the addressing mode is taken
841 // from the trailing zeros of the mask. If the mask is pre-shift, we subtract
843 int AMShiftAmt = MaskTZ - (N == Shift ? ShiftAmt : 0);
845 // There is nothing we can do here unless the mask is removing some bits.
846 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
847 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
849 // We also need to ensure that mask is a continuous run of bits.
850 if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
852 // Scale the leading zero count down based on the actual size of the value.
853 // Also scale it down based on the size of the shift if it was applied
855 MaskLZ -= (64 - X.getValueSizeInBits()) + (N == Shift ? 0 : ShiftAmt);
857 // The final check is to ensure that any masked out high bits of X are
858 // already known to be zero. Otherwise, the mask has a semantic impact
859 // other than masking out a couple of low bits. Unfortunately, because of
860 // the mask, zero extensions will be removed from operands in some cases.
861 // This code works extra hard to look through extensions because we can
862 // replace them with zero extensions cheaply if necessary.
863 bool ReplacingAnyExtend = false;
864 if (X.getOpcode() == ISD::ANY_EXTEND) {
865 unsigned ExtendBits =
866 X.getValueSizeInBits() - X.getOperand(0).getValueSizeInBits();
867 // Assume that we'll replace the any-extend with a zero-extend, and
868 // narrow the search to the extended value.
870 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
871 ReplacingAnyExtend = true;
873 APInt MaskedHighBits = APInt::getHighBitsSet(X.getValueSizeInBits(),
875 APInt KnownZero, KnownOne;
876 DAG.ComputeMaskedBits(X, MaskedHighBits, KnownZero, KnownOne);
877 if (MaskedHighBits != KnownZero) return true;
879 // We've identified a pattern that can be transformed into a single shift
880 // and an addressing mode. Make it so.
881 EVT VT = N.getValueType();
882 if (ReplacingAnyExtend) {
883 assert(X.getValueType() != VT);
884 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
885 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, X.getDebugLoc(), VT, X);
886 if (NewX.getNode()->getNodeId() == -1 ||
887 NewX.getNode()->getNodeId() > N.getNode()->getNodeId()) {
888 DAG.RepositionNode(N.getNode(), NewX.getNode());
889 NewX.getNode()->setNodeId(N.getNode()->getNodeId());
893 DebugLoc DL = N.getDebugLoc();
894 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8);
895 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
896 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8);
897 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
898 if (NewSRLAmt.getNode()->getNodeId() == -1 ||
899 NewSRLAmt.getNode()->getNodeId() > N.getNode()->getNodeId()) {
900 DAG.RepositionNode(N.getNode(), NewSRLAmt.getNode());
901 NewSRLAmt.getNode()->setNodeId(N.getNode()->getNodeId());
903 if (NewSRL.getNode()->getNodeId() == -1 ||
904 NewSRL.getNode()->getNodeId() > N.getNode()->getNodeId()) {
905 DAG.RepositionNode(N.getNode(), NewSRL.getNode());
906 NewSRL.getNode()->setNodeId(N.getNode()->getNodeId());
908 if (NewSHLAmt.getNode()->getNodeId() == -1 ||
909 NewSHLAmt.getNode()->getNodeId() > N.getNode()->getNodeId()) {
910 DAG.RepositionNode(N.getNode(), NewSHLAmt.getNode());
911 NewSHLAmt.getNode()->setNodeId(N.getNode()->getNodeId());
913 if (NewSHL.getNode()->getNodeId() == -1 ||
914 NewSHL.getNode()->getNodeId() > N.getNode()->getNodeId()) {
915 DAG.RepositionNode(N.getNode(), NewSHL.getNode());
916 NewSHL.getNode()->setNodeId(N.getNode()->getNodeId());
918 DAG.ReplaceAllUsesWith(N, NewSHL);
920 AM.Scale = 1 << AMShiftAmt;
921 AM.IndexReg = NewSRL;
925 bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
927 DebugLoc dl = N.getDebugLoc();
929 dbgs() << "MatchAddress: ";
934 return MatchAddressBase(N, AM);
936 // If this is already a %rip relative address, we can only merge immediates
937 // into it. Instead of handling this in every case, we handle it here.
938 // RIP relative addressing: %rip + 32-bit displacement!
939 if (AM.isRIPRelative()) {
940 // FIXME: JumpTable and ExternalSymbol address currently don't like
941 // displacements. It isn't very important, but this should be fixed for
943 if (!AM.ES && AM.JT != -1) return true;
945 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
946 if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM))
951 switch (N.getOpcode()) {
953 case ISD::Constant: {
954 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
955 if (!FoldOffsetIntoAddress(Val, AM))
960 case X86ISD::Wrapper:
961 case X86ISD::WrapperRIP:
962 if (!MatchWrapper(N, AM))
967 if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM))
971 case ISD::FrameIndex:
972 if (AM.BaseType == X86ISelAddressMode::RegBase &&
973 AM.Base_Reg.getNode() == 0 &&
974 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
975 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
976 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
982 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1)
986 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
987 unsigned Val = CN->getZExtValue();
988 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
989 // that the base operand remains free for further matching. If
990 // the base doesn't end up getting used, a post-processing step
991 // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
992 if (Val == 1 || Val == 2 || Val == 3) {
994 SDValue ShVal = N.getNode()->getOperand(0);
996 // Okay, we know that we have a scale by now. However, if the scaled
997 // value is an add of something and a constant, we can fold the
998 // constant into the disp field here.
999 if (CurDAG->isBaseWithConstantOffset(ShVal)) {
1000 AM.IndexReg = ShVal.getNode()->getOperand(0);
1001 ConstantSDNode *AddVal =
1002 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
1003 uint64_t Disp = AddVal->getSExtValue() << Val;
1004 if (!FoldOffsetIntoAddress(Disp, AM))
1008 AM.IndexReg = ShVal;
1015 // Try to fold the mask and shift into the scale, and return false if we
1017 if (!FoldMaskAndShiftToScale(*CurDAG, N, AM))
1021 case ISD::SMUL_LOHI:
1022 case ISD::UMUL_LOHI:
1023 // A mul_lohi where we need the low part can be folded as a plain multiply.
1024 if (N.getResNo() != 0) break;
1027 case X86ISD::MUL_IMM:
1028 // X*[3,5,9] -> X+X*[2,4,8]
1029 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1030 AM.Base_Reg.getNode() == 0 &&
1031 AM.IndexReg.getNode() == 0) {
1033 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
1034 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
1035 CN->getZExtValue() == 9) {
1036 AM.Scale = unsigned(CN->getZExtValue())-1;
1038 SDValue MulVal = N.getNode()->getOperand(0);
1041 // Okay, we know that we have a scale by now. However, if the scaled
1042 // value is an add of something and a constant, we can fold the
1043 // constant into the disp field here.
1044 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1045 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
1046 Reg = MulVal.getNode()->getOperand(0);
1047 ConstantSDNode *AddVal =
1048 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
1049 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
1050 if (FoldOffsetIntoAddress(Disp, AM))
1051 Reg = N.getNode()->getOperand(0);
1053 Reg = N.getNode()->getOperand(0);
1056 AM.IndexReg = AM.Base_Reg = Reg;
1063 // Given A-B, if A can be completely folded into the address and
1064 // the index field with the index field unused, use -B as the index.
1065 // This is a win if a has multiple parts that can be folded into
1066 // the address. Also, this saves a mov if the base register has
1067 // other uses, since it avoids a two-address sub instruction, however
1068 // it costs an additional mov if the index register has other uses.
1070 // Add an artificial use to this node so that we can keep track of
1071 // it if it gets CSE'd with a different node.
1072 HandleSDNode Handle(N);
1074 // Test if the LHS of the sub can be folded.
1075 X86ISelAddressMode Backup = AM;
1076 if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) {
1080 // Test if the index field is free for use.
1081 if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
1087 SDValue RHS = Handle.getValue().getNode()->getOperand(1);
1088 // If the RHS involves a register with multiple uses, this
1089 // transformation incurs an extra mov, due to the neg instruction
1090 // clobbering its operand.
1091 if (!RHS.getNode()->hasOneUse() ||
1092 RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
1093 RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
1094 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
1095 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
1096 RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
1098 // If the base is a register with multiple uses, this
1099 // transformation may save a mov.
1100 if ((AM.BaseType == X86ISelAddressMode::RegBase &&
1101 AM.Base_Reg.getNode() &&
1102 !AM.Base_Reg.getNode()->hasOneUse()) ||
1103 AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1105 // If the folded LHS was interesting, this transformation saves
1106 // address arithmetic.
1107 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
1108 ((AM.Disp != 0) && (Backup.Disp == 0)) +
1109 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
1111 // If it doesn't look like it may be an overall win, don't do it.
1117 // Ok, the transformation is legal and appears profitable. Go for it.
1118 SDValue Zero = CurDAG->getConstant(0, N.getValueType());
1119 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
1123 // Insert the new nodes into the topological ordering.
1124 if (Zero.getNode()->getNodeId() == -1 ||
1125 Zero.getNode()->getNodeId() > N.getNode()->getNodeId()) {
1126 CurDAG->RepositionNode(N.getNode(), Zero.getNode());
1127 Zero.getNode()->setNodeId(N.getNode()->getNodeId());
1129 if (Neg.getNode()->getNodeId() == -1 ||
1130 Neg.getNode()->getNodeId() > N.getNode()->getNodeId()) {
1131 CurDAG->RepositionNode(N.getNode(), Neg.getNode());
1132 Neg.getNode()->setNodeId(N.getNode()->getNodeId());
1138 // Add an artificial use to this node so that we can keep track of
1139 // it if it gets CSE'd with a different node.
1140 HandleSDNode Handle(N);
1142 X86ISelAddressMode Backup = AM;
1143 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1144 !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
1148 // Try again after commuting the operands.
1149 if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&&
1150 !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
1154 // If we couldn't fold both operands into the address at the same time,
1155 // see if we can just put each operand into a register and fold at least
1157 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1158 !AM.Base_Reg.getNode() &&
1159 !AM.IndexReg.getNode()) {
1160 N = Handle.getValue();
1161 AM.Base_Reg = N.getOperand(0);
1162 AM.IndexReg = N.getOperand(1);
1166 N = Handle.getValue();
1171 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
1172 if (CurDAG->isBaseWithConstantOffset(N)) {
1173 X86ISelAddressMode Backup = AM;
1174 ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1));
1176 // Start with the LHS as an addr mode.
1177 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1178 !FoldOffsetIntoAddress(CN->getSExtValue(), AM))
1185 // Perform some heroic transforms on an and of a constant-count shift
1186 // with a constant to enable use of the scaled offset field.
1188 SDValue Shift = N.getOperand(0);
1189 if (Shift.getNumOperands() != 2) break;
1191 // Scale must not be used already.
1192 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
1194 SDValue X = Shift.getOperand(0);
1195 ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
1196 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
1197 if (!C1 || !C2) break;
1199 // Try to fold the mask and shift into an extract and scale.
1200 if (!FoldMaskAndShiftToExtract(*CurDAG, N, C2->getZExtValue(),
1204 // Try to fold the mask and shift directly into the scale.
1205 if (!FoldMaskAndShiftToScale(*CurDAG, N, AM))
1208 // Handle "(X << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
1209 // allows us to fold the shift into this addressing mode.
1210 if (Shift.getOpcode() != ISD::SHL) break;
1212 // Not likely to be profitable if either the AND or SHIFT node has more
1213 // than one use (unless all uses are for address computation). Besides,
1214 // isel mechanism requires their node ids to be reused.
1215 if (!N.hasOneUse() || !Shift.hasOneUse())
1218 // Verify that the shift amount is something we can fold.
1219 unsigned ShiftCst = C1->getZExtValue();
1220 if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
1223 // Get the new AND mask, this folds to a constant.
1224 SDValue NewANDMask = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
1225 SDValue(C2, 0), SDValue(C1, 0));
1226 SDValue NewAND = CurDAG->getNode(ISD::AND, dl, N.getValueType(), X,
1228 SDValue NewSHIFT = CurDAG->getNode(ISD::SHL, dl, N.getValueType(),
1229 NewAND, SDValue(C1, 0));
1231 // Insert the new nodes into the topological ordering.
1232 if (C1->getNodeId() > X.getNode()->getNodeId()) {
1233 CurDAG->RepositionNode(X.getNode(), C1);
1234 C1->setNodeId(X.getNode()->getNodeId());
1236 if (NewANDMask.getNode()->getNodeId() == -1 ||
1237 NewANDMask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
1238 CurDAG->RepositionNode(X.getNode(), NewANDMask.getNode());
1239 NewANDMask.getNode()->setNodeId(X.getNode()->getNodeId());
1241 if (NewAND.getNode()->getNodeId() == -1 ||
1242 NewAND.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
1243 CurDAG->RepositionNode(Shift.getNode(), NewAND.getNode());
1244 NewAND.getNode()->setNodeId(Shift.getNode()->getNodeId());
1246 if (NewSHIFT.getNode()->getNodeId() == -1 ||
1247 NewSHIFT.getNode()->getNodeId() > N.getNode()->getNodeId()) {
1248 CurDAG->RepositionNode(N.getNode(), NewSHIFT.getNode());
1249 NewSHIFT.getNode()->setNodeId(N.getNode()->getNodeId());
1252 CurDAG->ReplaceAllUsesWith(N, NewSHIFT);
1254 AM.Scale = 1 << ShiftCst;
1255 AM.IndexReg = NewAND;
1260 return MatchAddressBase(N, AM);
1263 /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
1264 /// specified addressing mode without any further recursion.
1265 bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
1266 // Is the base register already occupied?
1267 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
1268 // If so, check to see if the scale index register is set.
1269 if (AM.IndexReg.getNode() == 0) {
1275 // Otherwise, we cannot select it.
1279 // Default, generate it as a register.
1280 AM.BaseType = X86ISelAddressMode::RegBase;
1285 /// SelectAddr - returns true if it is able pattern match an addressing mode.
1286 /// It returns the operands which make up the maximal addressing mode it can
1287 /// match by reference.
1289 /// Parent is the parent node of the addr operand that is being matched. It
1290 /// is always a load, store, atomic node, or null. It is only null when
1291 /// checking memory operands for inline asm nodes.
1292 bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
1293 SDValue &Scale, SDValue &Index,
1294 SDValue &Disp, SDValue &Segment) {
1295 X86ISelAddressMode AM;
1298 // This list of opcodes are all the nodes that have an "addr:$ptr" operand
1299 // that are not a MemSDNode, and thus don't have proper addrspace info.
1300 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
1301 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
1302 Parent->getOpcode() != X86ISD::TLSCALL) { // Fixme
1303 unsigned AddrSpace =
1304 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
1305 // AddrSpace 256 -> GS, 257 -> FS.
1306 if (AddrSpace == 256)
1307 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1308 if (AddrSpace == 257)
1309 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1312 if (MatchAddress(N, AM))
1315 EVT VT = N.getValueType();
1316 if (AM.BaseType == X86ISelAddressMode::RegBase) {
1317 if (!AM.Base_Reg.getNode())
1318 AM.Base_Reg = CurDAG->getRegister(0, VT);
1321 if (!AM.IndexReg.getNode())
1322 AM.IndexReg = CurDAG->getRegister(0, VT);
1324 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1328 /// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
1329 /// match a load whose top elements are either undef or zeros. The load flavor
1330 /// is derived from the type of N, which is either v4f32 or v2f64.
1333 /// PatternChainNode: this is the matched node that has a chain input and
1335 bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
1336 SDValue N, SDValue &Base,
1337 SDValue &Scale, SDValue &Index,
1338 SDValue &Disp, SDValue &Segment,
1339 SDValue &PatternNodeWithChain) {
1340 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
1341 PatternNodeWithChain = N.getOperand(0);
1342 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1343 PatternNodeWithChain.hasOneUse() &&
1344 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1345 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1346 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1347 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1353 // Also handle the case where we explicitly require zeros in the top
1354 // elements. This is a vector shuffle from the zero vector.
1355 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1356 // Check to see if the top elements are all zeros (or bitcast of zeros).
1357 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1358 N.getOperand(0).getNode()->hasOneUse() &&
1359 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
1360 N.getOperand(0).getOperand(0).hasOneUse() &&
1361 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1362 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1363 // Okay, this is a zero extending load. Fold it.
1364 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
1365 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1367 PatternNodeWithChain = SDValue(LD, 0);
1374 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
1375 /// mode it matches can be cost effectively emitted as an LEA instruction.
1376 bool X86DAGToDAGISel::SelectLEAAddr(SDValue N,
1377 SDValue &Base, SDValue &Scale,
1378 SDValue &Index, SDValue &Disp,
1380 X86ISelAddressMode AM;
1382 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
1384 SDValue Copy = AM.Segment;
1385 SDValue T = CurDAG->getRegister(0, MVT::i32);
1387 if (MatchAddress(N, AM))
1389 assert (T == AM.Segment);
1392 EVT VT = N.getValueType();
1393 unsigned Complexity = 0;
1394 if (AM.BaseType == X86ISelAddressMode::RegBase)
1395 if (AM.Base_Reg.getNode())
1398 AM.Base_Reg = CurDAG->getRegister(0, VT);
1399 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1402 if (AM.IndexReg.getNode())
1405 AM.IndexReg = CurDAG->getRegister(0, VT);
1407 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1412 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1413 // to a LEA. This is determined with some expermentation but is by no means
1414 // optimal (especially for code size consideration). LEA is nice because of
1415 // its three-address nature. Tweak the cost function again when we can run
1416 // convertToThreeAddress() at register allocation time.
1417 if (AM.hasSymbolicDisplacement()) {
1418 // For X86-64, we should always use lea to materialize RIP relative
1420 if (Subtarget->is64Bit())
1426 if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode()))
1429 // If it isn't worth using an LEA, reject it.
1430 if (Complexity <= 2)
1433 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1437 /// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
1438 bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base,
1439 SDValue &Scale, SDValue &Index,
1440 SDValue &Disp, SDValue &Segment) {
1441 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
1442 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
1444 X86ISelAddressMode AM;
1445 AM.GV = GA->getGlobal();
1446 AM.Disp += GA->getOffset();
1447 AM.Base_Reg = CurDAG->getRegister(0, N.getValueType());
1448 AM.SymbolFlags = GA->getTargetFlags();
1450 if (N.getValueType() == MVT::i32) {
1452 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
1454 AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
1457 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1462 bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
1463 SDValue &Base, SDValue &Scale,
1464 SDValue &Index, SDValue &Disp,
1466 if (!ISD::isNON_EXTLoad(N.getNode()) ||
1467 !IsProfitableToFold(N, P, P) ||
1468 !IsLegalToFold(N, P, P, OptLevel))
1471 return SelectAddr(N.getNode(),
1472 N.getOperand(1), Base, Scale, Index, Disp, Segment);
1475 /// getGlobalBaseReg - Return an SDNode that returns the value of
1476 /// the global base register. Output instructions required to
1477 /// initialize the global base register, if necessary.
1479 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1480 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
1481 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
1484 SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
1485 SDValue Chain = Node->getOperand(0);
1486 SDValue In1 = Node->getOperand(1);
1487 SDValue In2L = Node->getOperand(2);
1488 SDValue In2H = Node->getOperand(3);
1489 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1490 if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1492 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1493 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1494 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain};
1495 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
1496 MVT::i32, MVT::i32, MVT::Other, Ops,
1497 array_lengthof(Ops));
1498 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
1502 // FIXME: Figure out some way to unify this with the 'or' and other code
1504 SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
1505 if (Node->hasAnyUseOfValue(0))
1508 // Optimize common patterns for __sync_add_and_fetch and
1509 // __sync_sub_and_fetch where the result is not used. This allows us
1510 // to use "lock" version of add, sub, inc, dec instructions.
1511 // FIXME: Do not use special instructions but instead add the "lock"
1512 // prefix to the target node somehow. The extra information will then be
1513 // transferred to machine instruction and it denotes the prefix.
1514 SDValue Chain = Node->getOperand(0);
1515 SDValue Ptr = Node->getOperand(1);
1516 SDValue Val = Node->getOperand(2);
1517 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1518 if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1521 bool isInc = false, isDec = false, isSub = false, isCN = false;
1522 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val);
1523 if (CN && CN->getSExtValue() == (int32_t)CN->getSExtValue()) {
1525 int64_t CNVal = CN->getSExtValue();
1528 else if (CNVal == -1)
1530 else if (CNVal >= 0)
1531 Val = CurDAG->getTargetConstant(CNVal, NVT);
1534 Val = CurDAG->getTargetConstant(-CNVal, NVT);
1536 } else if (Val.hasOneUse() &&
1537 Val.getOpcode() == ISD::SUB &&
1538 X86::isZeroNode(Val.getOperand(0))) {
1540 Val = Val.getOperand(1);
1543 DebugLoc dl = Node->getDebugLoc();
1545 switch (NVT.getSimpleVT().SimpleTy) {
1549 Opc = X86::LOCK_INC8m;
1551 Opc = X86::LOCK_DEC8m;
1554 Opc = X86::LOCK_SUB8mi;
1556 Opc = X86::LOCK_SUB8mr;
1559 Opc = X86::LOCK_ADD8mi;
1561 Opc = X86::LOCK_ADD8mr;
1566 Opc = X86::LOCK_INC16m;
1568 Opc = X86::LOCK_DEC16m;
1571 if (immSext8(Val.getNode()))
1572 Opc = X86::LOCK_SUB16mi8;
1574 Opc = X86::LOCK_SUB16mi;
1576 Opc = X86::LOCK_SUB16mr;
1579 if (immSext8(Val.getNode()))
1580 Opc = X86::LOCK_ADD16mi8;
1582 Opc = X86::LOCK_ADD16mi;
1584 Opc = X86::LOCK_ADD16mr;
1589 Opc = X86::LOCK_INC32m;
1591 Opc = X86::LOCK_DEC32m;
1594 if (immSext8(Val.getNode()))
1595 Opc = X86::LOCK_SUB32mi8;
1597 Opc = X86::LOCK_SUB32mi;
1599 Opc = X86::LOCK_SUB32mr;
1602 if (immSext8(Val.getNode()))
1603 Opc = X86::LOCK_ADD32mi8;
1605 Opc = X86::LOCK_ADD32mi;
1607 Opc = X86::LOCK_ADD32mr;
1612 Opc = X86::LOCK_INC64m;
1614 Opc = X86::LOCK_DEC64m;
1616 Opc = X86::LOCK_SUB64mr;
1618 if (immSext8(Val.getNode()))
1619 Opc = X86::LOCK_SUB64mi8;
1620 else if (i64immSExt32(Val.getNode()))
1621 Opc = X86::LOCK_SUB64mi32;
1624 Opc = X86::LOCK_ADD64mr;
1626 if (immSext8(Val.getNode()))
1627 Opc = X86::LOCK_ADD64mi8;
1628 else if (i64immSExt32(Val.getNode()))
1629 Opc = X86::LOCK_ADD64mi32;
1635 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
1637 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1638 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1639 if (isInc || isDec) {
1640 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain };
1641 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 6), 0);
1642 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1643 SDValue RetVals[] = { Undef, Ret };
1644 return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1646 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
1647 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
1648 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1649 SDValue RetVals[] = { Undef, Ret };
1650 return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1676 static const unsigned int AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
1700 X86::LOCK_AND64mi32,
1713 X86::LOCK_XOR64mi32,
1718 SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) {
1719 if (Node->hasAnyUseOfValue(0))
1722 // Optimize common patterns for __sync_or_and_fetch and similar arith
1723 // operations where the result is not used. This allows us to use the "lock"
1724 // version of the arithmetic instruction.
1725 // FIXME: Same as for 'add' and 'sub', try to merge those down here.
1726 SDValue Chain = Node->getOperand(0);
1727 SDValue Ptr = Node->getOperand(1);
1728 SDValue Val = Node->getOperand(2);
1729 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1730 if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1733 // Which index into the table.
1735 switch (Node->getOpcode()) {
1736 case ISD::ATOMIC_LOAD_OR:
1739 case ISD::ATOMIC_LOAD_AND:
1742 case ISD::ATOMIC_LOAD_XOR:
1750 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val);
1751 if (CN && (int32_t)CN->getSExtValue() == CN->getSExtValue()) {
1753 Val = CurDAG->getTargetConstant(CN->getSExtValue(), NVT);
1757 switch (NVT.getSimpleVT().SimpleTy) {
1761 Opc = AtomicOpcTbl[Op][ConstantI8];
1763 Opc = AtomicOpcTbl[Op][I8];
1767 if (immSext8(Val.getNode()))
1768 Opc = AtomicOpcTbl[Op][SextConstantI16];
1770 Opc = AtomicOpcTbl[Op][ConstantI16];
1772 Opc = AtomicOpcTbl[Op][I16];
1776 if (immSext8(Val.getNode()))
1777 Opc = AtomicOpcTbl[Op][SextConstantI32];
1779 Opc = AtomicOpcTbl[Op][ConstantI32];
1781 Opc = AtomicOpcTbl[Op][I32];
1784 Opc = AtomicOpcTbl[Op][I64];
1786 if (immSext8(Val.getNode()))
1787 Opc = AtomicOpcTbl[Op][SextConstantI64];
1788 else if (i64immSExt32(Val.getNode()))
1789 Opc = AtomicOpcTbl[Op][ConstantI64];
1794 assert(Opc != 0 && "Invalid arith lock transform!");
1796 DebugLoc dl = Node->getDebugLoc();
1797 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
1799 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1800 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1801 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
1802 SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0);
1803 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1804 SDValue RetVals[] = { Undef, Ret };
1805 return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1808 /// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has
1809 /// any uses which require the SF or OF bits to be accurate.
1810 static bool HasNoSignedComparisonUses(SDNode *N) {
1811 // Examine each user of the node.
1812 for (SDNode::use_iterator UI = N->use_begin(),
1813 UE = N->use_end(); UI != UE; ++UI) {
1814 // Only examine CopyToReg uses.
1815 if (UI->getOpcode() != ISD::CopyToReg)
1817 // Only examine CopyToReg uses that copy to EFLAGS.
1818 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
1821 // Examine each user of the CopyToReg use.
1822 for (SDNode::use_iterator FlagUI = UI->use_begin(),
1823 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
1824 // Only examine the Flag result.
1825 if (FlagUI.getUse().getResNo() != 1) continue;
1826 // Anything unusual: assume conservatively.
1827 if (!FlagUI->isMachineOpcode()) return false;
1828 // Examine the opcode of the user.
1829 switch (FlagUI->getMachineOpcode()) {
1830 // These comparisons don't treat the most significant bit specially.
1831 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
1832 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
1833 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
1834 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
1835 case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4:
1836 case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4:
1837 case X86::CMOVA16rr: case X86::CMOVA16rm:
1838 case X86::CMOVA32rr: case X86::CMOVA32rm:
1839 case X86::CMOVA64rr: case X86::CMOVA64rm:
1840 case X86::CMOVAE16rr: case X86::CMOVAE16rm:
1841 case X86::CMOVAE32rr: case X86::CMOVAE32rm:
1842 case X86::CMOVAE64rr: case X86::CMOVAE64rm:
1843 case X86::CMOVB16rr: case X86::CMOVB16rm:
1844 case X86::CMOVB32rr: case X86::CMOVB32rm:
1845 case X86::CMOVB64rr: case X86::CMOVB64rm:
1846 case X86::CMOVBE16rr: case X86::CMOVBE16rm:
1847 case X86::CMOVBE32rr: case X86::CMOVBE32rm:
1848 case X86::CMOVBE64rr: case X86::CMOVBE64rm:
1849 case X86::CMOVE16rr: case X86::CMOVE16rm:
1850 case X86::CMOVE32rr: case X86::CMOVE32rm:
1851 case X86::CMOVE64rr: case X86::CMOVE64rm:
1852 case X86::CMOVNE16rr: case X86::CMOVNE16rm:
1853 case X86::CMOVNE32rr: case X86::CMOVNE32rm:
1854 case X86::CMOVNE64rr: case X86::CMOVNE64rm:
1855 case X86::CMOVNP16rr: case X86::CMOVNP16rm:
1856 case X86::CMOVNP32rr: case X86::CMOVNP32rm:
1857 case X86::CMOVNP64rr: case X86::CMOVNP64rm:
1858 case X86::CMOVP16rr: case X86::CMOVP16rm:
1859 case X86::CMOVP32rr: case X86::CMOVP32rm:
1860 case X86::CMOVP64rr: case X86::CMOVP64rm:
1862 // Anything else: assume conservatively.
1863 default: return false;
1870 SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
1871 EVT NVT = Node->getValueType(0);
1873 unsigned Opcode = Node->getOpcode();
1874 DebugLoc dl = Node->getDebugLoc();
1876 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
1878 if (Node->isMachineOpcode()) {
1879 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
1880 return NULL; // Already selected.
1885 case X86ISD::GlobalBaseReg:
1886 return getGlobalBaseReg();
1888 case X86ISD::ATOMOR64_DAG:
1889 return SelectAtomic64(Node, X86::ATOMOR6432);
1890 case X86ISD::ATOMXOR64_DAG:
1891 return SelectAtomic64(Node, X86::ATOMXOR6432);
1892 case X86ISD::ATOMADD64_DAG:
1893 return SelectAtomic64(Node, X86::ATOMADD6432);
1894 case X86ISD::ATOMSUB64_DAG:
1895 return SelectAtomic64(Node, X86::ATOMSUB6432);
1896 case X86ISD::ATOMNAND64_DAG:
1897 return SelectAtomic64(Node, X86::ATOMNAND6432);
1898 case X86ISD::ATOMAND64_DAG:
1899 return SelectAtomic64(Node, X86::ATOMAND6432);
1900 case X86ISD::ATOMSWAP64_DAG:
1901 return SelectAtomic64(Node, X86::ATOMSWAP6432);
1903 case ISD::ATOMIC_LOAD_ADD: {
1904 SDNode *RetVal = SelectAtomicLoadAdd(Node, NVT);
1909 case ISD::ATOMIC_LOAD_XOR:
1910 case ISD::ATOMIC_LOAD_AND:
1911 case ISD::ATOMIC_LOAD_OR: {
1912 SDNode *RetVal = SelectAtomicLoadArith(Node, NVT);
1920 // For operations of the form (x << C1) op C2, check if we can use a smaller
1921 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
1922 SDValue N0 = Node->getOperand(0);
1923 SDValue N1 = Node->getOperand(1);
1925 if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse())
1928 // i8 is unshrinkable, i16 should be promoted to i32.
1929 if (NVT != MVT::i32 && NVT != MVT::i64)
1932 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
1933 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
1934 if (!Cst || !ShlCst)
1937 int64_t Val = Cst->getSExtValue();
1938 uint64_t ShlVal = ShlCst->getZExtValue();
1940 // Make sure that we don't change the operation by removing bits.
1941 // This only matters for OR and XOR, AND is unaffected.
1942 if (Opcode != ISD::AND && ((Val >> ShlVal) << ShlVal) != Val)
1945 unsigned ShlOp, Op = 0;
1948 // Check the minimum bitwidth for the new constant.
1949 // TODO: AND32ri is the same as AND64ri32 with zext imm.
1950 // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr
1951 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
1952 if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal))
1954 else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal))
1957 // Bail if there is no smaller encoding.
1961 switch (NVT.getSimpleVT().SimpleTy) {
1962 default: llvm_unreachable("Unsupported VT!");
1964 assert(CstVT == MVT::i8);
1965 ShlOp = X86::SHL32ri;
1968 case ISD::AND: Op = X86::AND32ri8; break;
1969 case ISD::OR: Op = X86::OR32ri8; break;
1970 case ISD::XOR: Op = X86::XOR32ri8; break;
1974 assert(CstVT == MVT::i8 || CstVT == MVT::i32);
1975 ShlOp = X86::SHL64ri;
1978 case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break;
1979 case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break;
1980 case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break;
1985 // Emit the smaller op and the shift.
1986 SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT);
1987 SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
1988 return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
1992 case X86ISD::UMUL: {
1993 SDValue N0 = Node->getOperand(0);
1994 SDValue N1 = Node->getOperand(1);
1997 switch (NVT.getSimpleVT().SimpleTy) {
1998 default: llvm_unreachable("Unsupported VT!");
1999 case MVT::i8: LoReg = X86::AL; Opc = X86::MUL8r; break;
2000 case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break;
2001 case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
2002 case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
2005 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
2006 N0, SDValue()).getValue(1);
2008 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
2009 SDValue Ops[] = {N1, InFlag};
2010 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2);
2012 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
2013 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
2014 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2));
2018 case ISD::SMUL_LOHI:
2019 case ISD::UMUL_LOHI: {
2020 SDValue N0 = Node->getOperand(0);
2021 SDValue N1 = Node->getOperand(1);
2023 bool isSigned = Opcode == ISD::SMUL_LOHI;
2025 switch (NVT.getSimpleVT().SimpleTy) {
2026 default: llvm_unreachable("Unsupported VT!");
2027 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
2028 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
2029 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
2030 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
2033 switch (NVT.getSimpleVT().SimpleTy) {
2034 default: llvm_unreachable("Unsupported VT!");
2035 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
2036 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
2037 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
2038 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
2042 unsigned LoReg, HiReg;
2043 switch (NVT.getSimpleVT().SimpleTy) {
2044 default: llvm_unreachable("Unsupported VT!");
2045 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
2046 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
2047 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
2048 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
2051 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2052 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2053 // Multiply is commmutative.
2055 foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2060 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
2061 N0, SDValue()).getValue(1);
2064 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2067 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops,
2068 array_lengthof(Ops));
2069 InFlag = SDValue(CNode, 1);
2071 // Update the chain.
2072 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
2074 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag);
2075 InFlag = SDValue(CNode, 0);
2078 // Prevent use of AH in a REX instruction by referencing AX instead.
2079 if (HiReg == X86::AH && Subtarget->is64Bit() &&
2080 !SDValue(Node, 1).use_empty()) {
2081 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2082 X86::AX, MVT::i16, InFlag);
2083 InFlag = Result.getValue(2);
2084 // Get the low part if needed. Don't use getCopyFromReg for aliasing
2086 if (!SDValue(Node, 0).use_empty())
2087 ReplaceUses(SDValue(Node, 1),
2088 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2090 // Shift AX down 8 bits.
2091 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2093 CurDAG->getTargetConstant(8, MVT::i8)), 0);
2094 // Then truncate it down to i8.
2095 ReplaceUses(SDValue(Node, 1),
2096 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2098 // Copy the low half of the result, if it is needed.
2099 if (!SDValue(Node, 0).use_empty()) {
2100 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2101 LoReg, NVT, InFlag);
2102 InFlag = Result.getValue(2);
2103 ReplaceUses(SDValue(Node, 0), Result);
2104 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2106 // Copy the high half of the result, if it is needed.
2107 if (!SDValue(Node, 1).use_empty()) {
2108 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2109 HiReg, NVT, InFlag);
2110 InFlag = Result.getValue(2);
2111 ReplaceUses(SDValue(Node, 1), Result);
2112 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2119 case ISD::UDIVREM: {
2120 SDValue N0 = Node->getOperand(0);
2121 SDValue N1 = Node->getOperand(1);
2123 bool isSigned = Opcode == ISD::SDIVREM;
2125 switch (NVT.getSimpleVT().SimpleTy) {
2126 default: llvm_unreachable("Unsupported VT!");
2127 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
2128 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
2129 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
2130 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
2133 switch (NVT.getSimpleVT().SimpleTy) {
2134 default: llvm_unreachable("Unsupported VT!");
2135 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
2136 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
2137 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
2138 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
2142 unsigned LoReg, HiReg, ClrReg;
2143 unsigned ClrOpcode, SExtOpcode;
2144 switch (NVT.getSimpleVT().SimpleTy) {
2145 default: llvm_unreachable("Unsupported VT!");
2147 LoReg = X86::AL; ClrReg = HiReg = X86::AH;
2149 SExtOpcode = X86::CBW;
2152 LoReg = X86::AX; HiReg = X86::DX;
2153 ClrOpcode = X86::MOV16r0; ClrReg = X86::DX;
2154 SExtOpcode = X86::CWD;
2157 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
2158 ClrOpcode = X86::MOV32r0;
2159 SExtOpcode = X86::CDQ;
2162 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
2163 ClrOpcode = X86::MOV64r0;
2164 SExtOpcode = X86::CQO;
2168 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2169 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2170 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
2173 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
2174 // Special case for div8, just use a move with zero extension to AX to
2175 // clear the upper 8 bits (AH).
2176 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
2177 if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
2178 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
2180 SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32,
2182 array_lengthof(Ops)), 0);
2183 Chain = Move.getValue(1);
2184 ReplaceUses(N0.getValue(1), Chain);
2187 SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0);
2188 Chain = CurDAG->getEntryNode();
2190 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue());
2191 InFlag = Chain.getValue(1);
2194 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
2195 LoReg, N0, SDValue()).getValue(1);
2196 if (isSigned && !signBitIsZero) {
2197 // Sign extend the low part into the high part.
2199 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
2201 // Zero out the high part, effectively zero extending the input.
2203 SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0);
2204 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
2205 ClrNode, InFlag).getValue(1);
2210 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2213 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops,
2214 array_lengthof(Ops));
2215 InFlag = SDValue(CNode, 1);
2216 // Update the chain.
2217 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
2220 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
2223 // Prevent use of AH in a REX instruction by referencing AX instead.
2224 // Shift it down 8 bits.
2225 if (HiReg == X86::AH && Subtarget->is64Bit() &&
2226 !SDValue(Node, 1).use_empty()) {
2227 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2228 X86::AX, MVT::i16, InFlag);
2229 InFlag = Result.getValue(2);
2231 // If we also need AL (the quotient), get it by extracting a subreg from
2232 // Result. The fast register allocator does not like multiple CopyFromReg
2233 // nodes using aliasing registers.
2234 if (!SDValue(Node, 0).use_empty())
2235 ReplaceUses(SDValue(Node, 0),
2236 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2238 // Shift AX right by 8 bits instead of using AH.
2239 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2241 CurDAG->getTargetConstant(8, MVT::i8)),
2243 ReplaceUses(SDValue(Node, 1),
2244 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2246 // Copy the division (low) result, if it is needed.
2247 if (!SDValue(Node, 0).use_empty()) {
2248 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2249 LoReg, NVT, InFlag);
2250 InFlag = Result.getValue(2);
2251 ReplaceUses(SDValue(Node, 0), Result);
2252 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2254 // Copy the remainder (high) result, if it is needed.
2255 if (!SDValue(Node, 1).use_empty()) {
2256 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2257 HiReg, NVT, InFlag);
2258 InFlag = Result.getValue(2);
2259 ReplaceUses(SDValue(Node, 1), Result);
2260 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2266 SDValue N0 = Node->getOperand(0);
2267 SDValue N1 = Node->getOperand(1);
2269 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
2270 // use a smaller encoding.
2271 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
2272 HasNoSignedComparisonUses(Node))
2273 // Look past the truncate if CMP is the only use of it.
2274 N0 = N0.getOperand(0);
2275 if ((N0.getNode()->getOpcode() == ISD::AND ||
2276 (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) &&
2277 N0.getNode()->hasOneUse() &&
2278 N0.getValueType() != MVT::i8 &&
2279 X86::isZeroNode(N1)) {
2280 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1));
2283 // For example, convert "testl %eax, $8" to "testb %al, $8"
2284 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 &&
2285 (!(C->getZExtValue() & 0x80) ||
2286 HasNoSignedComparisonUses(Node))) {
2287 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8);
2288 SDValue Reg = N0.getNode()->getOperand(0);
2290 // On x86-32, only the ABCD registers have 8-bit subregisters.
2291 if (!Subtarget->is64Bit()) {
2292 TargetRegisterClass *TRC = 0;
2293 switch (N0.getValueType().getSimpleVT().SimpleTy) {
2294 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2295 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2296 default: llvm_unreachable("Unsupported TEST operand type!");
2298 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
2299 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2300 Reg.getValueType(), Reg, RC), 0);
2303 // Extract the l-register.
2304 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
2308 return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, Subreg, Imm);
2311 // For example, "testl %eax, $2048" to "testb %ah, $8".
2312 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 &&
2313 (!(C->getZExtValue() & 0x8000) ||
2314 HasNoSignedComparisonUses(Node))) {
2315 // Shift the immediate right by 8 bits.
2316 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8,
2318 SDValue Reg = N0.getNode()->getOperand(0);
2320 // Put the value in an ABCD register.
2321 TargetRegisterClass *TRC = 0;
2322 switch (N0.getValueType().getSimpleVT().SimpleTy) {
2323 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break;
2324 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2325 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2326 default: llvm_unreachable("Unsupported TEST operand type!");
2328 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
2329 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2330 Reg.getValueType(), Reg, RC), 0);
2332 // Extract the h-register.
2333 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl,
2336 // Emit a testb. The EXTRACT_SUBREG becomes a COPY that can only
2337 // target GR8_NOREX registers, so make sure the register class is
2339 return CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl, MVT::i32,
2340 Subreg, ShiftedImm);
2343 // For example, "testl %eax, $32776" to "testw %ax, $32776".
2344 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 &&
2345 N0.getValueType() != MVT::i16 &&
2346 (!(C->getZExtValue() & 0x8000) ||
2347 HasNoSignedComparisonUses(Node))) {
2348 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16);
2349 SDValue Reg = N0.getNode()->getOperand(0);
2351 // Extract the 16-bit subregister.
2352 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl,
2356 return CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, Subreg, Imm);
2359 // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
2360 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 &&
2361 N0.getValueType() == MVT::i64 &&
2362 (!(C->getZExtValue() & 0x80000000) ||
2363 HasNoSignedComparisonUses(Node))) {
2364 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
2365 SDValue Reg = N0.getNode()->getOperand(0);
2367 // Extract the 32-bit subregister.
2368 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl,
2372 return CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, Subreg, Imm);
2378 // The DEC64m tablegen pattern is currently not able to match the case where
2379 // the EFLAGS on the original DEC are used.
2380 // we'll need to improve tablegen to allow flags to be transferred from a
2381 // node in the pattern to the result node. probably with a new keyword
2382 // for example, we have this
2383 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2384 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2385 // (implicit EFLAGS)]>;
2386 // but maybe need something like this
2387 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2388 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2389 // (transferrable EFLAGS)]>;
2390 StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
2391 SDValue Chain = StoreNode->getOperand(0);
2392 SDValue StoredVal = StoreNode->getOperand(1);
2393 SDValue Address = StoreNode->getOperand(2);
2394 SDValue Undef = StoreNode->getOperand(3);
2396 if (StoreNode->getMemOperand()->getSize() != 8 ||
2397 Undef->getOpcode() != ISD::UNDEF ||
2398 Chain->getOpcode() != ISD::LOAD ||
2399 StoredVal->getOpcode() != X86ISD::DEC ||
2400 StoredVal.getResNo() != 0 ||
2401 StoredVal->getOperand(0).getNode() != Chain.getNode())
2404 //OPC_CheckPredicate, 1, // Predicate_nontemporalstore
2405 if (StoreNode->isNonTemporal())
2408 LoadSDNode *LoadNode = cast<LoadSDNode>(Chain.getNode());
2409 if (LoadNode->getOperand(1) != Address ||
2410 LoadNode->getOperand(2) != Undef)
2413 if (!ISD::isNormalLoad(LoadNode))
2416 if (!ISD::isNormalStore(StoreNode))
2419 // check load chain has only one use (from the store)
2420 if (!Chain.hasOneUse())
2423 // Merge the input chains if they are not intra-pattern references.
2424 SDValue InputChain = LoadNode->getOperand(0);
2426 SDValue Base, Scale, Index, Disp, Segment;
2427 if (!SelectAddr(LoadNode, LoadNode->getBasePtr(),
2428 Base, Scale, Index, Disp, Segment))
2431 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2);
2432 MemOp[0] = StoreNode->getMemOperand();
2433 MemOp[1] = LoadNode->getMemOperand();
2434 const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain };
2435 MachineSDNode *Result = CurDAG->getMachineNode(X86::DEC64m,
2436 Node->getDebugLoc(),
2437 MVT::i32, MVT::Other, Ops,
2438 array_lengthof(Ops));
2439 Result->setMemRefs(MemOp, MemOp + 2);
2441 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
2442 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
2448 SDNode *ResNode = SelectCode(Node);
2450 DEBUG(dbgs() << "=> ";
2451 if (ResNode == NULL || ResNode == Node)
2454 ResNode->dump(CurDAG);
2460 bool X86DAGToDAGISel::
2461 SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
2462 std::vector<SDValue> &OutOps) {
2463 SDValue Op0, Op1, Op2, Op3, Op4;
2464 switch (ConstraintCode) {
2465 case 'o': // offsetable ??
2466 case 'v': // not offsetable ??
2467 default: return true;
2469 if (!SelectAddr(0, Op, Op0, Op1, Op2, Op3, Op4))
2474 OutOps.push_back(Op0);
2475 OutOps.push_back(Op1);
2476 OutOps.push_back(Op2);
2477 OutOps.push_back(Op3);
2478 OutOps.push_back(Op4);
2482 /// createX86ISelDag - This pass converts a legalized DAG into a
2483 /// X86-specific DAG, ready for instruction scheduling.
2485 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
2486 llvm::CodeGenOpt::Level OptLevel) {
2487 return new X86DAGToDAGISel(TM, OptLevel);