1 //===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines a DAG pattern matching instruction selector for X86,
11 // converting from a legalized dag to a X86 dag.
13 //===----------------------------------------------------------------------===//
16 #include "X86InstrBuilder.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86RegisterInfo.h"
19 #include "X86Subtarget.h"
20 #include "X86TargetMachine.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAGISel.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/Instructions.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/Type.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Target/TargetOptions.h"
40 #define DEBUG_TYPE "x86-isel"
42 STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
44 //===----------------------------------------------------------------------===//
45 // Pattern Matcher Implementation
46 //===----------------------------------------------------------------------===//
49 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
50 /// SDValue's instead of register numbers for the leaves of the matched
52 struct X86ISelAddressMode {
58 // This is really a union, discriminated by BaseType!
66 const GlobalValue *GV;
68 const BlockAddress *BlockAddr;
72 unsigned Align; // CP alignment.
73 unsigned char SymbolFlags; // X86II::MO_*
76 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
77 Segment(), GV(nullptr), CP(nullptr), BlockAddr(nullptr), ES(nullptr),
78 MCSym(nullptr), JT(-1), Align(0), SymbolFlags(X86II::MO_NO_FLAG) {}
80 bool hasSymbolicDisplacement() const {
81 return GV != nullptr || CP != nullptr || ES != nullptr ||
82 MCSym != nullptr || JT != -1 || BlockAddr != nullptr;
85 bool hasBaseOrIndexReg() const {
86 return BaseType == FrameIndexBase ||
87 IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr;
90 /// isRIPRelative - Return true if this addressing mode is already RIP
92 bool isRIPRelative() const {
93 if (BaseType != RegBase) return false;
94 if (RegisterSDNode *RegNode =
95 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
96 return RegNode->getReg() == X86::RIP;
100 void setBaseReg(SDValue Reg) {
105 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
107 dbgs() << "X86ISelAddressMode " << this << '\n';
108 dbgs() << "Base_Reg ";
109 if (Base_Reg.getNode())
110 Base_Reg.getNode()->dump();
113 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'
114 << " Scale" << Scale << '\n'
116 if (IndexReg.getNode())
117 IndexReg.getNode()->dump();
120 dbgs() << " Disp " << Disp << '\n'
142 dbgs() << " JT" << JT << " Align" << Align << '\n';
149 //===--------------------------------------------------------------------===//
150 /// ISel - X86 specific code to select X86 machine instructions for
151 /// SelectionDAG operations.
153 class X86DAGToDAGISel final : public SelectionDAGISel {
154 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
155 /// make the right decision when generating code for different targets.
156 const X86Subtarget *Subtarget;
158 /// OptForSize - If true, selector should try to optimize for code size
159 /// instead of performance.
163 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
164 : SelectionDAGISel(tm, OptLevel), OptForSize(false) {}
166 const char *getPassName() const override {
167 return "X86 DAG->DAG Instruction Selection";
170 bool runOnMachineFunction(MachineFunction &MF) override {
171 // Reset the subtarget each time through.
172 Subtarget = &MF.getSubtarget<X86Subtarget>();
173 SelectionDAGISel::runOnMachineFunction(MF);
177 void EmitFunctionEntryCode() override;
179 bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
181 void PreprocessISelDAG() override;
183 inline bool immSext8(SDNode *N) const {
184 return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue());
187 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
188 // sign extended field.
189 inline bool i64immSExt32(SDNode *N) const {
190 uint64_t v = cast<ConstantSDNode>(N)->getZExtValue();
191 return (int64_t)v == (int32_t)v;
194 // Include the pieces autogenerated from the target description.
195 #include "X86GenDAGISel.inc"
198 SDNode *Select(SDNode *N) override;
199 SDNode *SelectGather(SDNode *N, unsigned Opc);
200 SDNode *SelectAtomicLoadArith(SDNode *Node, MVT NVT);
201 SDNode *SelectAndWithSExtImmediate(SDNode *Node, MVT NVT);
203 bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
204 bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
205 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
206 bool MatchAddress(SDValue N, X86ISelAddressMode &AM);
207 bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
209 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
210 bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
211 SDValue &Scale, SDValue &Index, SDValue &Disp,
213 bool SelectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
214 SDValue &Scale, SDValue &Index, SDValue &Disp,
216 bool SelectMOV64Imm32(SDValue N, SDValue &Imm);
217 bool SelectLEAAddr(SDValue N, SDValue &Base,
218 SDValue &Scale, SDValue &Index, SDValue &Disp,
220 bool SelectLEA64_32Addr(SDValue N, SDValue &Base,
221 SDValue &Scale, SDValue &Index, SDValue &Disp,
223 bool SelectTLSADDRAddr(SDValue N, SDValue &Base,
224 SDValue &Scale, SDValue &Index, SDValue &Disp,
226 bool SelectScalarSSELoad(SDNode *Root, SDValue N,
227 SDValue &Base, SDValue &Scale,
228 SDValue &Index, SDValue &Disp,
230 SDValue &NodeWithChain);
232 bool TryFoldLoad(SDNode *P, SDValue N,
233 SDValue &Base, SDValue &Scale,
234 SDValue &Index, SDValue &Disp,
237 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
238 /// inline asm expressions.
239 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
240 unsigned ConstraintID,
241 std::vector<SDValue> &OutOps) override;
243 void EmitSpecialCodeForMain();
245 inline void getAddressOperands(X86ISelAddressMode &AM, SDLoc DL,
246 SDValue &Base, SDValue &Scale,
247 SDValue &Index, SDValue &Disp,
249 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
250 ? CurDAG->getTargetFrameIndex(
252 TLI->getPointerTy(CurDAG->getDataLayout()))
254 Scale = getI8Imm(AM.Scale, DL);
256 // These are 32-bit even in 64-bit mode since RIP relative offset
259 Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(),
263 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
264 AM.Align, AM.Disp, AM.SymbolFlags);
266 assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
267 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
268 } else if (AM.MCSym) {
269 assert(!AM.Disp && "Non-zero displacement is ignored with MCSym.");
270 assert(AM.SymbolFlags == 0 && "oo");
271 Disp = CurDAG->getMCSymbol(AM.MCSym, MVT::i32);
272 } else if (AM.JT != -1) {
273 assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
274 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
275 } else if (AM.BlockAddr)
276 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
279 Disp = CurDAG->getTargetConstant(AM.Disp, DL, MVT::i32);
281 if (AM.Segment.getNode())
282 Segment = AM.Segment;
284 Segment = CurDAG->getRegister(0, MVT::i32);
287 // Utility function to determine whether we should avoid selecting
288 // immediate forms of instructions for better code size or not.
289 // At a high level, we'd like to avoid such instructions when
290 // we have similar constants used within the same basic block
291 // that can be kept in a register.
293 bool shouldAvoidImmediateInstFormsForSize(SDNode *N) const {
294 uint32_t UseCount = 0;
296 // Do not want to hoist if we're not optimizing for size.
297 // TODO: We'd like to remove this restriction.
298 // See the comment in X86InstrInfo.td for more info.
302 // Walk all the users of the immediate.
303 for (SDNode::use_iterator UI = N->use_begin(),
304 UE = N->use_end(); (UI != UE) && (UseCount < 2); ++UI) {
308 // This user is already selected. Count it as a legitimate use and
310 if (User->isMachineOpcode()) {
315 // We want to count stores of immediates as real uses.
316 if (User->getOpcode() == ISD::STORE &&
317 User->getOperand(1).getNode() == N) {
322 // We don't currently match users that have > 2 operands (except
323 // for stores, which are handled above)
324 // Those instruction won't match in ISEL, for now, and would
325 // be counted incorrectly.
326 // This may change in the future as we add additional instruction
328 if (User->getNumOperands() != 2)
331 // Immediates that are used for offsets as part of stack
332 // manipulation should be left alone. These are typically
333 // used to indicate SP offsets for argument passing and
334 // will get pulled into stores/pushes (implicitly).
335 if (User->getOpcode() == X86ISD::ADD ||
336 User->getOpcode() == ISD::ADD ||
337 User->getOpcode() == X86ISD::SUB ||
338 User->getOpcode() == ISD::SUB) {
340 // Find the other operand of the add/sub.
341 SDValue OtherOp = User->getOperand(0);
342 if (OtherOp.getNode() == N)
343 OtherOp = User->getOperand(1);
345 // Don't count if the other operand is SP.
346 RegisterSDNode *RegNode;
347 if (OtherOp->getOpcode() == ISD::CopyFromReg &&
348 (RegNode = dyn_cast_or_null<RegisterSDNode>(
349 OtherOp->getOperand(1).getNode())))
350 if ((RegNode->getReg() == X86::ESP) ||
351 (RegNode->getReg() == X86::RSP))
355 // ... otherwise, count this and move on.
359 // If we have more than 1 use, then recommend for hoisting.
360 return (UseCount > 1);
363 /// getI8Imm - Return a target constant with the specified value, of type
365 inline SDValue getI8Imm(unsigned Imm, SDLoc DL) {
366 return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
369 /// getI32Imm - Return a target constant with the specified value, of type
371 inline SDValue getI32Imm(unsigned Imm, SDLoc DL) {
372 return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
375 /// getGlobalBaseReg - Return an SDNode that returns the value of
376 /// the global base register. Output instructions required to
377 /// initialize the global base register, if necessary.
379 SDNode *getGlobalBaseReg();
381 /// getTargetMachine - Return a reference to the TargetMachine, casted
382 /// to the target-specific type.
383 const X86TargetMachine &getTargetMachine() const {
384 return static_cast<const X86TargetMachine &>(TM);
387 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
388 /// to the target-specific type.
389 const X86InstrInfo *getInstrInfo() const {
390 return Subtarget->getInstrInfo();
393 /// \brief Address-mode matching performs shift-of-and to and-of-shift
394 /// reassociation in order to expose more scaled addressing
396 bool ComplexPatternFuncMutatesDAG() const override {
404 X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
405 if (OptLevel == CodeGenOpt::None) return false;
410 if (N.getOpcode() != ISD::LOAD)
413 // If N is a load, do additional profitability checks.
415 switch (U->getOpcode()) {
428 SDValue Op1 = U->getOperand(1);
430 // If the other operand is a 8-bit immediate we should fold the immediate
431 // instead. This reduces code size.
433 // movl 4(%esp), %eax
437 // addl 4(%esp), %eax
438 // The former is 2 bytes shorter. In case where the increment is 1, then
439 // the saving can be 4 bytes (by using incl %eax).
440 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
441 if (Imm->getAPIntValue().isSignedIntN(8))
444 // If the other operand is a TLS address, we should fold it instead.
447 // leal i@NTPOFF(%eax), %eax
449 // movl $i@NTPOFF, %eax
451 // if the block also has an access to a second TLS address this will save
453 // FIXME: This is probably also true for non-TLS addresses.
454 if (Op1.getOpcode() == X86ISD::Wrapper) {
455 SDValue Val = Op1.getOperand(0);
456 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
466 /// MoveBelowCallOrigChain - Replace the original chain operand of the call with
467 /// load's chain operand and move load below the call's chain operand.
468 static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
469 SDValue Call, SDValue OrigChain) {
470 SmallVector<SDValue, 8> Ops;
471 SDValue Chain = OrigChain.getOperand(0);
472 if (Chain.getNode() == Load.getNode())
473 Ops.push_back(Load.getOperand(0));
475 assert(Chain.getOpcode() == ISD::TokenFactor &&
476 "Unexpected chain operand");
477 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
478 if (Chain.getOperand(i).getNode() == Load.getNode())
479 Ops.push_back(Load.getOperand(0));
481 Ops.push_back(Chain.getOperand(i));
483 CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops);
485 Ops.push_back(NewChain);
487 Ops.append(OrigChain->op_begin() + 1, OrigChain->op_end());
488 CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
489 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
490 Load.getOperand(1), Load.getOperand(2));
493 Ops.push_back(SDValue(Load.getNode(), 1));
494 Ops.append(Call->op_begin() + 1, Call->op_end());
495 CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
498 /// isCalleeLoad - Return true if call address is a load and it can be
499 /// moved below CALLSEQ_START and the chains leading up to the call.
500 /// Return the CALLSEQ_START by reference as a second output.
501 /// In the case of a tail call, there isn't a callseq node between the call
502 /// chain and the load.
503 static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
504 // The transformation is somewhat dangerous if the call's chain was glued to
505 // the call. After MoveBelowOrigChain the load is moved between the call and
506 // the chain, this can create a cycle if the load is not folded. So it is
507 // *really* important that we are sure the load will be folded.
508 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
510 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
513 LD->getAddressingMode() != ISD::UNINDEXED ||
514 LD->getExtensionType() != ISD::NON_EXTLOAD)
517 // Now let's find the callseq_start.
518 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
519 if (!Chain.hasOneUse())
521 Chain = Chain.getOperand(0);
524 if (!Chain.getNumOperands())
526 // Since we are not checking for AA here, conservatively abort if the chain
527 // writes to memory. It's not safe to move the callee (a load) across a store.
528 if (isa<MemSDNode>(Chain.getNode()) &&
529 cast<MemSDNode>(Chain.getNode())->writeMem())
531 if (Chain.getOperand(0).getNode() == Callee.getNode())
533 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
534 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
535 Callee.getValue(1).hasOneUse())
540 void X86DAGToDAGISel::PreprocessISelDAG() {
541 // OptForSize is used in pattern predicates that isel is matching.
542 OptForSize = MF->getFunction()->optForSize();
544 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
545 E = CurDAG->allnodes_end(); I != E; ) {
546 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
548 if (OptLevel != CodeGenOpt::None &&
549 // Only does this when target favors doesn't favor register indirect
551 ((N->getOpcode() == X86ISD::CALL && !Subtarget->callRegIndirect()) ||
552 (N->getOpcode() == X86ISD::TC_RETURN &&
553 // Only does this if load can be folded into TC_RETURN.
554 (Subtarget->is64Bit() ||
555 getTargetMachine().getRelocationModel() != Reloc::PIC_)))) {
556 /// Also try moving call address load from outside callseq_start to just
557 /// before the call to allow it to be folded.
575 bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
576 SDValue Chain = N->getOperand(0);
577 SDValue Load = N->getOperand(1);
578 if (!isCalleeLoad(Load, Chain, HasCallSeq))
580 MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
585 // Lower fpround and fpextend nodes that target the FP stack to be store and
586 // load to the stack. This is a gross hack. We would like to simply mark
587 // these as being illegal, but when we do that, legalize produces these when
588 // it expands calls, then expands these in the same legalize pass. We would
589 // like dag combine to be able to hack on these between the call expansion
590 // and the node legalization. As such this pass basically does "really
591 // late" legalization of these inline with the X86 isel pass.
592 // FIXME: This should only happen when not compiled with -O0.
593 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
596 MVT SrcVT = N->getOperand(0).getSimpleValueType();
597 MVT DstVT = N->getSimpleValueType(0);
599 // If any of the sources are vectors, no fp stack involved.
600 if (SrcVT.isVector() || DstVT.isVector())
603 // If the source and destination are SSE registers, then this is a legal
604 // conversion that should not be lowered.
605 const X86TargetLowering *X86Lowering =
606 static_cast<const X86TargetLowering *>(TLI);
607 bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
608 bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
609 if (SrcIsSSE && DstIsSSE)
612 if (!SrcIsSSE && !DstIsSSE) {
613 // If this is an FPStack extension, it is a noop.
614 if (N->getOpcode() == ISD::FP_EXTEND)
616 // If this is a value-preserving FPStack truncation, it is a noop.
617 if (N->getConstantOperandVal(1))
621 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
622 // FPStack has extload and truncstore. SSE can fold direct loads into other
623 // operations. Based on this, decide what we want to do.
625 if (N->getOpcode() == ISD::FP_ROUND)
626 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
628 MemVT = SrcIsSSE ? SrcVT : DstVT;
630 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
633 // FIXME: optimize the case where the src/dest is a load or store?
634 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
636 MemTmp, MachinePointerInfo(), MemVT,
638 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
639 MachinePointerInfo(),
640 MemVT, false, false, false, 0);
642 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
643 // extload we created. This will cause general havok on the dag because
644 // anything below the conversion could be folded into other existing nodes.
645 // To avoid invalidating 'I', back it up to the convert node.
647 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
649 // Now that we did that, the node is dead. Increment the iterator to the
650 // next node to process, then delete N.
652 CurDAG->DeleteNode(N);
657 /// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
658 /// the main function.
659 void X86DAGToDAGISel::EmitSpecialCodeForMain() {
660 if (Subtarget->isTargetCygMing()) {
661 TargetLowering::ArgListTy Args;
662 auto &DL = CurDAG->getDataLayout();
664 TargetLowering::CallLoweringInfo CLI(*CurDAG);
665 CLI.setChain(CurDAG->getRoot())
666 .setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()),
667 CurDAG->getExternalSymbol("__main", TLI->getPointerTy(DL)),
669 const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
670 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
671 CurDAG->setRoot(Result.second);
675 void X86DAGToDAGISel::EmitFunctionEntryCode() {
676 // If this is main, emit special code for main.
677 if (const Function *Fn = MF->getFunction())
678 if (Fn->hasExternalLinkage() && Fn->getName() == "main")
679 EmitSpecialCodeForMain();
682 static bool isDispSafeForFrameIndex(int64_t Val) {
683 // On 64-bit platforms, we can run into an issue where a frame index
684 // includes a displacement that, when added to the explicit displacement,
685 // will overflow the displacement field. Assuming that the frame index
686 // displacement fits into a 31-bit integer (which is only slightly more
687 // aggressive than the current fundamental assumption that it fits into
688 // a 32-bit integer), a 31-bit disp should always be safe.
689 return isInt<31>(Val);
692 bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset,
693 X86ISelAddressMode &AM) {
694 // Cannot combine ExternalSymbol displacements with integer offsets.
695 if (Offset != 0 && (AM.ES || AM.MCSym))
697 int64_t Val = AM.Disp + Offset;
698 CodeModel::Model M = TM.getCodeModel();
699 if (Subtarget->is64Bit()) {
700 if (!X86::isOffsetSuitableForCodeModel(Val, M,
701 AM.hasSymbolicDisplacement()))
703 // In addition to the checks required for a register base, check that
704 // we do not try to use an unsafe Disp with a frame index.
705 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
706 !isDispSafeForFrameIndex(Val))
714 bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
715 SDValue Address = N->getOperand(1);
717 // load gs:0 -> GS segment register.
718 // load fs:0 -> FS segment register.
720 // This optimization is valid because the GNU TLS model defines that
721 // gs:0 (or fs:0 on X86-64) contains its own address.
722 // For more information see http://people.redhat.com/drepper/tls.pdf
723 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
724 if (C->getSExtValue() == 0 && AM.Segment.getNode() == nullptr &&
725 Subtarget->isTargetLinux())
726 switch (N->getPointerInfo().getAddrSpace()) {
728 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
731 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
738 /// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
739 /// into an addressing mode. These wrap things that will resolve down into a
740 /// symbol reference. If no match is possible, this returns true, otherwise it
742 bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
743 // If the addressing mode already has a symbol as the displacement, we can
744 // never match another symbol.
745 if (AM.hasSymbolicDisplacement())
748 SDValue N0 = N.getOperand(0);
749 CodeModel::Model M = TM.getCodeModel();
751 // Handle X86-64 rip-relative addresses. We check this before checking direct
752 // folding because RIP is preferable to non-RIP accesses.
753 if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP &&
754 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
755 // they cannot be folded into immediate fields.
756 // FIXME: This can be improved for kernel and other models?
757 (M == CodeModel::Small || M == CodeModel::Kernel)) {
758 // Base and index reg must be 0 in order to use %rip as base.
759 if (AM.hasBaseOrIndexReg())
761 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
762 X86ISelAddressMode Backup = AM;
763 AM.GV = G->getGlobal();
764 AM.SymbolFlags = G->getTargetFlags();
765 if (FoldOffsetIntoAddress(G->getOffset(), AM)) {
769 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
770 X86ISelAddressMode Backup = AM;
771 AM.CP = CP->getConstVal();
772 AM.Align = CP->getAlignment();
773 AM.SymbolFlags = CP->getTargetFlags();
774 if (FoldOffsetIntoAddress(CP->getOffset(), AM)) {
778 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
779 AM.ES = S->getSymbol();
780 AM.SymbolFlags = S->getTargetFlags();
781 } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
782 AM.MCSym = S->getMCSymbol();
783 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
784 AM.JT = J->getIndex();
785 AM.SymbolFlags = J->getTargetFlags();
786 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
787 X86ISelAddressMode Backup = AM;
788 AM.BlockAddr = BA->getBlockAddress();
789 AM.SymbolFlags = BA->getTargetFlags();
790 if (FoldOffsetIntoAddress(BA->getOffset(), AM)) {
795 llvm_unreachable("Unhandled symbol reference node.");
797 if (N.getOpcode() == X86ISD::WrapperRIP)
798 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
802 // Handle the case when globals fit in our immediate field: This is true for
803 // X86-32 always and X86-64 when in -mcmodel=small mode. In 64-bit
804 // mode, this only applies to a non-RIP-relative computation.
805 if (!Subtarget->is64Bit() ||
806 M == CodeModel::Small || M == CodeModel::Kernel) {
807 assert(N.getOpcode() != X86ISD::WrapperRIP &&
808 "RIP-relative addressing already handled");
809 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
810 AM.GV = G->getGlobal();
811 AM.Disp += G->getOffset();
812 AM.SymbolFlags = G->getTargetFlags();
813 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
814 AM.CP = CP->getConstVal();
815 AM.Align = CP->getAlignment();
816 AM.Disp += CP->getOffset();
817 AM.SymbolFlags = CP->getTargetFlags();
818 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
819 AM.ES = S->getSymbol();
820 AM.SymbolFlags = S->getTargetFlags();
821 } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
822 AM.MCSym = S->getMCSymbol();
823 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
824 AM.JT = J->getIndex();
825 AM.SymbolFlags = J->getTargetFlags();
826 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
827 AM.BlockAddr = BA->getBlockAddress();
828 AM.Disp += BA->getOffset();
829 AM.SymbolFlags = BA->getTargetFlags();
831 llvm_unreachable("Unhandled symbol reference node.");
838 /// MatchAddress - Add the specified node to the specified addressing mode,
839 /// returning true if it cannot be done. This just pattern matches for the
841 bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
842 if (MatchAddressRecursively(N, AM, 0))
845 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
846 // a smaller encoding and avoids a scaled-index.
848 AM.BaseType == X86ISelAddressMode::RegBase &&
849 AM.Base_Reg.getNode() == nullptr) {
850 AM.Base_Reg = AM.IndexReg;
854 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
855 // because it has a smaller encoding.
856 // TODO: Which other code models can use this?
857 if (TM.getCodeModel() == CodeModel::Small &&
858 Subtarget->is64Bit() &&
860 AM.BaseType == X86ISelAddressMode::RegBase &&
861 AM.Base_Reg.getNode() == nullptr &&
862 AM.IndexReg.getNode() == nullptr &&
863 AM.SymbolFlags == X86II::MO_NO_FLAG &&
864 AM.hasSymbolicDisplacement())
865 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
870 // Insert a node into the DAG at least before the Pos node's position. This
871 // will reposition the node as needed, and will assign it a node ID that is <=
872 // the Pos node's ID. Note that this does *not* preserve the uniqueness of node
873 // IDs! The selection DAG must no longer depend on their uniqueness when this
875 static void InsertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
876 if (N.getNode()->getNodeId() == -1 ||
877 N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) {
878 DAG.RepositionNode(Pos.getNode(), N.getNode());
879 N.getNode()->setNodeId(Pos.getNode()->getNodeId());
883 // Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if
884 // safe. This allows us to convert the shift and and into an h-register
885 // extract and a scaled index. Returns false if the simplification is
887 static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
889 SDValue Shift, SDValue X,
890 X86ISelAddressMode &AM) {
891 if (Shift.getOpcode() != ISD::SRL ||
892 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
896 int ScaleLog = 8 - Shift.getConstantOperandVal(1);
897 if (ScaleLog <= 0 || ScaleLog >= 4 ||
898 Mask != (0xffu << ScaleLog))
901 MVT VT = N.getSimpleValueType();
903 SDValue Eight = DAG.getConstant(8, DL, MVT::i8);
904 SDValue NewMask = DAG.getConstant(0xff, DL, VT);
905 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
906 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
907 SDValue ShlCount = DAG.getConstant(ScaleLog, DL, MVT::i8);
908 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
910 // Insert the new nodes into the topological ordering. We must do this in
911 // a valid topological ordering as nothing is going to go back and re-sort
912 // these nodes. We continually insert before 'N' in sequence as this is
913 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
914 // hierarchy left to express.
915 InsertDAGNode(DAG, N, Eight);
916 InsertDAGNode(DAG, N, Srl);
917 InsertDAGNode(DAG, N, NewMask);
918 InsertDAGNode(DAG, N, And);
919 InsertDAGNode(DAG, N, ShlCount);
920 InsertDAGNode(DAG, N, Shl);
921 DAG.ReplaceAllUsesWith(N, Shl);
923 AM.Scale = (1 << ScaleLog);
927 // Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
928 // allows us to fold the shift into this addressing mode. Returns false if the
929 // transform succeeded.
930 static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
932 SDValue Shift, SDValue X,
933 X86ISelAddressMode &AM) {
934 if (Shift.getOpcode() != ISD::SHL ||
935 !isa<ConstantSDNode>(Shift.getOperand(1)))
938 // Not likely to be profitable if either the AND or SHIFT node has more
939 // than one use (unless all uses are for address computation). Besides,
940 // isel mechanism requires their node ids to be reused.
941 if (!N.hasOneUse() || !Shift.hasOneUse())
944 // Verify that the shift amount is something we can fold.
945 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
946 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
949 MVT VT = N.getSimpleValueType();
951 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT);
952 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
953 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
955 // Insert the new nodes into the topological ordering. We must do this in
956 // a valid topological ordering as nothing is going to go back and re-sort
957 // these nodes. We continually insert before 'N' in sequence as this is
958 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
959 // hierarchy left to express.
960 InsertDAGNode(DAG, N, NewMask);
961 InsertDAGNode(DAG, N, NewAnd);
962 InsertDAGNode(DAG, N, NewShift);
963 DAG.ReplaceAllUsesWith(N, NewShift);
965 AM.Scale = 1 << ShiftAmt;
966 AM.IndexReg = NewAnd;
970 // Implement some heroics to detect shifts of masked values where the mask can
971 // be replaced by extending the shift and undoing that in the addressing mode
972 // scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
973 // (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
974 // the addressing mode. This results in code such as:
976 // int f(short *y, int *lookup_table) {
978 // return *y + lookup_table[*y >> 11];
982 // movzwl (%rdi), %eax
985 // addl (%rsi,%rcx,4), %eax
988 // movzwl (%rdi), %eax
992 // addl (%rsi,%rcx), %eax
994 // Note that this function assumes the mask is provided as a mask *after* the
995 // value is shifted. The input chain may or may not match that, but computing
996 // such a mask is trivial.
997 static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
999 SDValue Shift, SDValue X,
1000 X86ISelAddressMode &AM) {
1001 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
1002 !isa<ConstantSDNode>(Shift.getOperand(1)))
1005 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
1006 unsigned MaskLZ = countLeadingZeros(Mask);
1007 unsigned MaskTZ = countTrailingZeros(Mask);
1009 // The amount of shift we're trying to fit into the addressing mode is taken
1010 // from the trailing zeros of the mask.
1011 unsigned AMShiftAmt = MaskTZ;
1013 // There is nothing we can do here unless the mask is removing some bits.
1014 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
1015 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
1017 // We also need to ensure that mask is a continuous run of bits.
1018 if (countTrailingOnes(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
1020 // Scale the leading zero count down based on the actual size of the value.
1021 // Also scale it down based on the size of the shift.
1022 MaskLZ -= (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
1024 // The final check is to ensure that any masked out high bits of X are
1025 // already known to be zero. Otherwise, the mask has a semantic impact
1026 // other than masking out a couple of low bits. Unfortunately, because of
1027 // the mask, zero extensions will be removed from operands in some cases.
1028 // This code works extra hard to look through extensions because we can
1029 // replace them with zero extensions cheaply if necessary.
1030 bool ReplacingAnyExtend = false;
1031 if (X.getOpcode() == ISD::ANY_EXTEND) {
1032 unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() -
1033 X.getOperand(0).getSimpleValueType().getSizeInBits();
1034 // Assume that we'll replace the any-extend with a zero-extend, and
1035 // narrow the search to the extended value.
1036 X = X.getOperand(0);
1037 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
1038 ReplacingAnyExtend = true;
1040 APInt MaskedHighBits =
1041 APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
1042 APInt KnownZero, KnownOne;
1043 DAG.computeKnownBits(X, KnownZero, KnownOne);
1044 if (MaskedHighBits != KnownZero) return true;
1046 // We've identified a pattern that can be transformed into a single shift
1047 // and an addressing mode. Make it so.
1048 MVT VT = N.getSimpleValueType();
1049 if (ReplacingAnyExtend) {
1050 assert(X.getValueType() != VT);
1051 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
1052 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X);
1053 InsertDAGNode(DAG, N, NewX);
1057 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
1058 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
1059 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
1060 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
1062 // Insert the new nodes into the topological ordering. We must do this in
1063 // a valid topological ordering as nothing is going to go back and re-sort
1064 // these nodes. We continually insert before 'N' in sequence as this is
1065 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
1066 // hierarchy left to express.
1067 InsertDAGNode(DAG, N, NewSRLAmt);
1068 InsertDAGNode(DAG, N, NewSRL);
1069 InsertDAGNode(DAG, N, NewSHLAmt);
1070 InsertDAGNode(DAG, N, NewSHL);
1071 DAG.ReplaceAllUsesWith(N, NewSHL);
1073 AM.Scale = 1 << AMShiftAmt;
1074 AM.IndexReg = NewSRL;
1078 bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
1082 dbgs() << "MatchAddress: ";
1087 return MatchAddressBase(N, AM);
1089 // If this is already a %rip relative address, we can only merge immediates
1090 // into it. Instead of handling this in every case, we handle it here.
1091 // RIP relative addressing: %rip + 32-bit displacement!
1092 if (AM.isRIPRelative()) {
1093 // FIXME: JumpTable and ExternalSymbol address currently don't like
1094 // displacements. It isn't very important, but this should be fixed for
1096 if (!(AM.ES || AM.MCSym) && AM.JT != -1)
1099 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
1100 if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM))
1105 switch (N.getOpcode()) {
1107 case ISD::LOCAL_RECOVER: {
1108 if (!AM.hasSymbolicDisplacement() && AM.Disp == 0)
1109 if (const auto *ESNode = dyn_cast<MCSymbolSDNode>(N.getOperand(0))) {
1110 // Use the symbol and don't prefix it.
1111 AM.MCSym = ESNode->getMCSymbol();
1116 case ISD::Constant: {
1117 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
1118 if (!FoldOffsetIntoAddress(Val, AM))
1123 case X86ISD::Wrapper:
1124 case X86ISD::WrapperRIP:
1125 if (!MatchWrapper(N, AM))
1130 if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM))
1134 case ISD::FrameIndex:
1135 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1136 AM.Base_Reg.getNode() == nullptr &&
1137 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
1138 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
1139 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
1145 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
1149 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
1150 unsigned Val = CN->getZExtValue();
1151 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
1152 // that the base operand remains free for further matching. If
1153 // the base doesn't end up getting used, a post-processing step
1154 // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
1155 if (Val == 1 || Val == 2 || Val == 3) {
1156 AM.Scale = 1 << Val;
1157 SDValue ShVal = N.getNode()->getOperand(0);
1159 // Okay, we know that we have a scale by now. However, if the scaled
1160 // value is an add of something and a constant, we can fold the
1161 // constant into the disp field here.
1162 if (CurDAG->isBaseWithConstantOffset(ShVal)) {
1163 AM.IndexReg = ShVal.getNode()->getOperand(0);
1164 ConstantSDNode *AddVal =
1165 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
1166 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
1167 if (!FoldOffsetIntoAddress(Disp, AM))
1171 AM.IndexReg = ShVal;
1178 // Scale must not be used already.
1179 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
1181 SDValue And = N.getOperand(0);
1182 if (And.getOpcode() != ISD::AND) break;
1183 SDValue X = And.getOperand(0);
1185 // We only handle up to 64-bit values here as those are what matter for
1186 // addressing mode optimizations.
1187 if (X.getSimpleValueType().getSizeInBits() > 64) break;
1189 // The mask used for the transform is expected to be post-shift, but we
1190 // found the shift first so just apply the shift to the mask before passing
1192 if (!isa<ConstantSDNode>(N.getOperand(1)) ||
1193 !isa<ConstantSDNode>(And.getOperand(1)))
1195 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
1197 // Try to fold the mask and shift into the scale, and return false if we
1199 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
1204 case ISD::SMUL_LOHI:
1205 case ISD::UMUL_LOHI:
1206 // A mul_lohi where we need the low part can be folded as a plain multiply.
1207 if (N.getResNo() != 0) break;
1210 case X86ISD::MUL_IMM:
1211 // X*[3,5,9] -> X+X*[2,4,8]
1212 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1213 AM.Base_Reg.getNode() == nullptr &&
1214 AM.IndexReg.getNode() == nullptr) {
1216 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
1217 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
1218 CN->getZExtValue() == 9) {
1219 AM.Scale = unsigned(CN->getZExtValue())-1;
1221 SDValue MulVal = N.getNode()->getOperand(0);
1224 // Okay, we know that we have a scale by now. However, if the scaled
1225 // value is an add of something and a constant, we can fold the
1226 // constant into the disp field here.
1227 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1228 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
1229 Reg = MulVal.getNode()->getOperand(0);
1230 ConstantSDNode *AddVal =
1231 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
1232 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
1233 if (FoldOffsetIntoAddress(Disp, AM))
1234 Reg = N.getNode()->getOperand(0);
1236 Reg = N.getNode()->getOperand(0);
1239 AM.IndexReg = AM.Base_Reg = Reg;
1246 // Given A-B, if A can be completely folded into the address and
1247 // the index field with the index field unused, use -B as the index.
1248 // This is a win if a has multiple parts that can be folded into
1249 // the address. Also, this saves a mov if the base register has
1250 // other uses, since it avoids a two-address sub instruction, however
1251 // it costs an additional mov if the index register has other uses.
1253 // Add an artificial use to this node so that we can keep track of
1254 // it if it gets CSE'd with a different node.
1255 HandleSDNode Handle(N);
1257 // Test if the LHS of the sub can be folded.
1258 X86ISelAddressMode Backup = AM;
1259 if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) {
1263 // Test if the index field is free for use.
1264 if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
1270 SDValue RHS = Handle.getValue().getNode()->getOperand(1);
1271 // If the RHS involves a register with multiple uses, this
1272 // transformation incurs an extra mov, due to the neg instruction
1273 // clobbering its operand.
1274 if (!RHS.getNode()->hasOneUse() ||
1275 RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
1276 RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
1277 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
1278 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
1279 RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
1281 // If the base is a register with multiple uses, this
1282 // transformation may save a mov.
1283 if ((AM.BaseType == X86ISelAddressMode::RegBase &&
1284 AM.Base_Reg.getNode() &&
1285 !AM.Base_Reg.getNode()->hasOneUse()) ||
1286 AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1288 // If the folded LHS was interesting, this transformation saves
1289 // address arithmetic.
1290 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
1291 ((AM.Disp != 0) && (Backup.Disp == 0)) +
1292 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
1294 // If it doesn't look like it may be an overall win, don't do it.
1300 // Ok, the transformation is legal and appears profitable. Go for it.
1301 SDValue Zero = CurDAG->getConstant(0, dl, N.getValueType());
1302 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
1306 // Insert the new nodes into the topological ordering.
1307 InsertDAGNode(*CurDAG, N, Zero);
1308 InsertDAGNode(*CurDAG, N, Neg);
1313 // Add an artificial use to this node so that we can keep track of
1314 // it if it gets CSE'd with a different node.
1315 HandleSDNode Handle(N);
1317 X86ISelAddressMode Backup = AM;
1318 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1319 !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
1323 // Try again after commuting the operands.
1324 if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&&
1325 !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
1329 // If we couldn't fold both operands into the address at the same time,
1330 // see if we can just put each operand into a register and fold at least
1332 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1333 !AM.Base_Reg.getNode() &&
1334 !AM.IndexReg.getNode()) {
1335 N = Handle.getValue();
1336 AM.Base_Reg = N.getOperand(0);
1337 AM.IndexReg = N.getOperand(1);
1341 N = Handle.getValue();
1346 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
1347 if (CurDAG->isBaseWithConstantOffset(N)) {
1348 X86ISelAddressMode Backup = AM;
1349 ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1));
1351 // Start with the LHS as an addr mode.
1352 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1353 !FoldOffsetIntoAddress(CN->getSExtValue(), AM))
1360 // Perform some heroic transforms on an and of a constant-count shift
1361 // with a constant to enable use of the scaled offset field.
1363 // Scale must not be used already.
1364 if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
1366 SDValue Shift = N.getOperand(0);
1367 if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break;
1368 SDValue X = Shift.getOperand(0);
1370 // We only handle up to 64-bit values here as those are what matter for
1371 // addressing mode optimizations.
1372 if (X.getSimpleValueType().getSizeInBits() > 64) break;
1374 if (!isa<ConstantSDNode>(N.getOperand(1)))
1376 uint64_t Mask = N.getConstantOperandVal(1);
1378 // Try to fold the mask and shift into an extract and scale.
1379 if (!FoldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
1382 // Try to fold the mask and shift directly into the scale.
1383 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
1386 // Try to swap the mask and shift to place shifts which can be done as
1387 // a scale on the outside of the mask.
1388 if (!FoldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM))
1394 return MatchAddressBase(N, AM);
1397 /// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
1398 /// specified addressing mode without any further recursion.
1399 bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
1400 // Is the base register already occupied?
1401 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
1402 // If so, check to see if the scale index register is set.
1403 if (!AM.IndexReg.getNode()) {
1409 // Otherwise, we cannot select it.
1413 // Default, generate it as a register.
1414 AM.BaseType = X86ISelAddressMode::RegBase;
1419 bool X86DAGToDAGISel::SelectVectorAddr(SDNode *Parent, SDValue N, SDValue &Base,
1420 SDValue &Scale, SDValue &Index,
1421 SDValue &Disp, SDValue &Segment) {
1423 MaskedGatherScatterSDNode *Mgs = dyn_cast<MaskedGatherScatterSDNode>(Parent);
1426 X86ISelAddressMode AM;
1427 unsigned AddrSpace = Mgs->getPointerInfo().getAddrSpace();
1428 // AddrSpace 256 -> GS, 257 -> FS.
1429 if (AddrSpace == 256)
1430 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1431 if (AddrSpace == 257)
1432 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1435 Base = Mgs->getBasePtr();
1436 Index = Mgs->getIndex();
1437 unsigned ScalarSize = Mgs->getValue().getValueType().getScalarSizeInBits();
1438 Scale = getI8Imm(ScalarSize/8, DL);
1440 // If Base is 0, the whole address is in index and the Scale is 1
1441 if (isa<ConstantSDNode>(Base)) {
1442 assert(dyn_cast<ConstantSDNode>(Base)->isNullValue() &&
1443 "Unexpected base in gather/scatter");
1444 Scale = getI8Imm(1, DL);
1445 Base = CurDAG->getRegister(0, MVT::i32);
1447 if (AM.Segment.getNode())
1448 Segment = AM.Segment;
1450 Segment = CurDAG->getRegister(0, MVT::i32);
1451 Disp = CurDAG->getTargetConstant(0, DL, MVT::i32);
1455 /// SelectAddr - returns true if it is able pattern match an addressing mode.
1456 /// It returns the operands which make up the maximal addressing mode it can
1457 /// match by reference.
1459 /// Parent is the parent node of the addr operand that is being matched. It
1460 /// is always a load, store, atomic node, or null. It is only null when
1461 /// checking memory operands for inline asm nodes.
1462 bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
1463 SDValue &Scale, SDValue &Index,
1464 SDValue &Disp, SDValue &Segment) {
1465 X86ISelAddressMode AM;
1468 // This list of opcodes are all the nodes that have an "addr:$ptr" operand
1469 // that are not a MemSDNode, and thus don't have proper addrspace info.
1470 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
1471 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
1472 Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
1473 Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
1474 Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
1475 unsigned AddrSpace =
1476 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
1477 // AddrSpace 256 -> GS, 257 -> FS.
1478 if (AddrSpace == 256)
1479 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1480 if (AddrSpace == 257)
1481 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1484 if (MatchAddress(N, AM))
1487 MVT VT = N.getSimpleValueType();
1488 if (AM.BaseType == X86ISelAddressMode::RegBase) {
1489 if (!AM.Base_Reg.getNode())
1490 AM.Base_Reg = CurDAG->getRegister(0, VT);
1493 if (!AM.IndexReg.getNode())
1494 AM.IndexReg = CurDAG->getRegister(0, VT);
1496 getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
1500 /// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
1501 /// match a load whose top elements are either undef or zeros. The load flavor
1502 /// is derived from the type of N, which is either v4f32 or v2f64.
1505 /// PatternChainNode: this is the matched node that has a chain input and
1507 bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
1508 SDValue N, SDValue &Base,
1509 SDValue &Scale, SDValue &Index,
1510 SDValue &Disp, SDValue &Segment,
1511 SDValue &PatternNodeWithChain) {
1512 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
1513 PatternNodeWithChain = N.getOperand(0);
1514 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1515 PatternNodeWithChain.hasOneUse() &&
1516 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1517 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1518 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
1519 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1525 // Also handle the case where we explicitly require zeros in the top
1526 // elements. This is a vector shuffle from the zero vector.
1527 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
1528 // Check to see if the top elements are all zeros (or bitcast of zeros).
1529 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
1530 N.getOperand(0).getNode()->hasOneUse() &&
1531 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
1532 N.getOperand(0).getOperand(0).hasOneUse() &&
1533 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
1534 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
1535 // Okay, this is a zero extending load. Fold it.
1536 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
1537 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
1539 PatternNodeWithChain = SDValue(LD, 0);
1546 bool X86DAGToDAGISel::SelectMOV64Imm32(SDValue N, SDValue &Imm) {
1547 if (const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
1548 uint64_t ImmVal = CN->getZExtValue();
1549 if ((uint32_t)ImmVal != (uint64_t)ImmVal)
1552 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), MVT::i64);
1556 // In static codegen with small code model, we can get the address of a label
1557 // into a register with 'movl'. TableGen has already made sure we're looking
1558 // at a label of some kind.
1559 assert(N->getOpcode() == X86ISD::Wrapper &&
1560 "Unexpected node type for MOV32ri64");
1561 N = N.getOperand(0);
1563 if (N->getOpcode() != ISD::TargetConstantPool &&
1564 N->getOpcode() != ISD::TargetJumpTable &&
1565 N->getOpcode() != ISD::TargetGlobalAddress &&
1566 N->getOpcode() != ISD::TargetExternalSymbol &&
1567 N->getOpcode() != ISD::MCSymbol &&
1568 N->getOpcode() != ISD::TargetBlockAddress)
1572 return TM.getCodeModel() == CodeModel::Small;
1575 bool X86DAGToDAGISel::SelectLEA64_32Addr(SDValue N, SDValue &Base,
1576 SDValue &Scale, SDValue &Index,
1577 SDValue &Disp, SDValue &Segment) {
1578 if (!SelectLEAAddr(N, Base, Scale, Index, Disp, Segment))
1582 RegisterSDNode *RN = dyn_cast<RegisterSDNode>(Base);
1583 if (RN && RN->getReg() == 0)
1584 Base = CurDAG->getRegister(0, MVT::i64);
1585 else if (Base.getValueType() == MVT::i32 && !dyn_cast<FrameIndexSDNode>(Base)) {
1586 // Base could already be %rip, particularly in the x32 ABI.
1587 Base = SDValue(CurDAG->getMachineNode(
1588 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
1589 CurDAG->getTargetConstant(0, DL, MVT::i64),
1591 CurDAG->getTargetConstant(X86::sub_32bit, DL, MVT::i32)),
1595 RN = dyn_cast<RegisterSDNode>(Index);
1596 if (RN && RN->getReg() == 0)
1597 Index = CurDAG->getRegister(0, MVT::i64);
1599 assert(Index.getValueType() == MVT::i32 &&
1600 "Expect to be extending 32-bit registers for use in LEA");
1601 Index = SDValue(CurDAG->getMachineNode(
1602 TargetOpcode::SUBREG_TO_REG, DL, MVT::i64,
1603 CurDAG->getTargetConstant(0, DL, MVT::i64),
1605 CurDAG->getTargetConstant(X86::sub_32bit, DL,
1613 /// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
1614 /// mode it matches can be cost effectively emitted as an LEA instruction.
1615 bool X86DAGToDAGISel::SelectLEAAddr(SDValue N,
1616 SDValue &Base, SDValue &Scale,
1617 SDValue &Index, SDValue &Disp,
1619 X86ISelAddressMode AM;
1621 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
1623 SDValue Copy = AM.Segment;
1624 SDValue T = CurDAG->getRegister(0, MVT::i32);
1626 if (MatchAddress(N, AM))
1628 assert (T == AM.Segment);
1631 MVT VT = N.getSimpleValueType();
1632 unsigned Complexity = 0;
1633 if (AM.BaseType == X86ISelAddressMode::RegBase)
1634 if (AM.Base_Reg.getNode())
1637 AM.Base_Reg = CurDAG->getRegister(0, VT);
1638 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1641 if (AM.IndexReg.getNode())
1644 AM.IndexReg = CurDAG->getRegister(0, VT);
1646 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1651 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1652 // to a LEA. This is determined with some expermentation but is by no means
1653 // optimal (especially for code size consideration). LEA is nice because of
1654 // its three-address nature. Tweak the cost function again when we can run
1655 // convertToThreeAddress() at register allocation time.
1656 if (AM.hasSymbolicDisplacement()) {
1657 // For X86-64, we should always use lea to materialize RIP relative
1659 if (Subtarget->is64Bit())
1665 if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode()))
1668 // If it isn't worth using an LEA, reject it.
1669 if (Complexity <= 2)
1672 getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
1676 /// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
1677 bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base,
1678 SDValue &Scale, SDValue &Index,
1679 SDValue &Disp, SDValue &Segment) {
1680 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
1681 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
1683 X86ISelAddressMode AM;
1684 AM.GV = GA->getGlobal();
1685 AM.Disp += GA->getOffset();
1686 AM.Base_Reg = CurDAG->getRegister(0, N.getValueType());
1687 AM.SymbolFlags = GA->getTargetFlags();
1689 if (N.getValueType() == MVT::i32) {
1691 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
1693 AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
1696 getAddressOperands(AM, SDLoc(N), Base, Scale, Index, Disp, Segment);
1701 bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
1702 SDValue &Base, SDValue &Scale,
1703 SDValue &Index, SDValue &Disp,
1705 if (!ISD::isNON_EXTLoad(N.getNode()) ||
1706 !IsProfitableToFold(N, P, P) ||
1707 !IsLegalToFold(N, P, P, OptLevel))
1710 return SelectAddr(N.getNode(),
1711 N.getOperand(1), Base, Scale, Index, Disp, Segment);
1714 /// getGlobalBaseReg - Return an SDNode that returns the value of
1715 /// the global base register. Output instructions required to
1716 /// initialize the global base register, if necessary.
1718 SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1719 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
1720 auto &DL = MF->getDataLayout();
1721 return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
1724 /// Atomic opcode table
1752 static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
1763 X86::LOCK_ADD64mi32,
1776 X86::LOCK_SUB64mi32,
1828 X86::LOCK_AND64mi32,
1841 X86::LOCK_XOR64mi32,
1846 // Return the target constant operand for atomic-load-op and do simple
1847 // translations, such as from atomic-load-add to lock-sub. The return value is
1848 // one of the following 3 cases:
1849 // + target-constant, the operand could be supported as a target constant.
1850 // + empty, the operand is not needed any more with the new op selected.
1851 // + non-empty, otherwise.
1852 static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG,
1854 enum AtomicOpc &Op, MVT NVT,
1856 const X86Subtarget *Subtarget) {
1857 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val)) {
1858 int64_t CNVal = CN->getSExtValue();
1859 // Quit if not 32-bit imm.
1860 if ((int32_t)CNVal != CNVal)
1862 // Quit if INT32_MIN: it would be negated as it is negative and overflow,
1863 // producing an immediate that does not fit in the 32 bits available for
1864 // an immediate operand to sub. However, it still fits in 32 bits for the
1865 // add (since it is not negated) so we can return target-constant.
1866 if (CNVal == INT32_MIN)
1867 return CurDAG->getTargetConstant(CNVal, dl, NVT);
1868 // For atomic-load-add, we could do some optimizations.
1870 // Translate to INC/DEC if ADD by 1 or -1.
1871 if (((CNVal == 1) || (CNVal == -1)) && !Subtarget->slowIncDec()) {
1872 Op = (CNVal == 1) ? INC : DEC;
1873 // No more constant operand after being translated into INC/DEC.
1876 // Translate to SUB if ADD by negative value.
1882 return CurDAG->getTargetConstant(CNVal, dl, NVT);
1885 // If the value operand is single-used, try to optimize it.
1886 if (Op == ADD && Val.hasOneUse()) {
1887 // Translate (atomic-load-add ptr (sub 0 x)) back to (lock-sub x).
1888 if (Val.getOpcode() == ISD::SUB && X86::isZeroNode(Val.getOperand(0))) {
1890 return Val.getOperand(1);
1892 // A special case for i16, which needs truncating as, in most cases, it's
1893 // promoted to i32. We will translate
1894 // (atomic-load-add (truncate (sub 0 x))) to (lock-sub (EXTRACT_SUBREG x))
1895 if (Val.getOpcode() == ISD::TRUNCATE && NVT == MVT::i16 &&
1896 Val.getOperand(0).getOpcode() == ISD::SUB &&
1897 X86::isZeroNode(Val.getOperand(0).getOperand(0))) {
1899 Val = Val.getOperand(0);
1900 return CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, NVT,
1908 SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, MVT NVT) {
1909 if (Node->hasAnyUseOfValue(0))
1914 // Optimize common patterns for __sync_or_and_fetch and similar arith
1915 // operations where the result is not used. This allows us to use the "lock"
1916 // version of the arithmetic instruction.
1917 SDValue Chain = Node->getOperand(0);
1918 SDValue Ptr = Node->getOperand(1);
1919 SDValue Val = Node->getOperand(2);
1920 SDValue Base, Scale, Index, Disp, Segment;
1921 if (!SelectAddr(Node, Ptr, Base, Scale, Index, Disp, Segment))
1924 // Which index into the table.
1926 switch (Node->getOpcode()) {
1929 case ISD::ATOMIC_LOAD_OR:
1932 case ISD::ATOMIC_LOAD_AND:
1935 case ISD::ATOMIC_LOAD_XOR:
1938 case ISD::ATOMIC_LOAD_ADD:
1943 Val = getAtomicLoadArithTargetConstant(CurDAG, dl, Op, NVT, Val, Subtarget);
1944 bool isUnOp = !Val.getNode();
1945 bool isCN = Val.getNode() && (Val.getOpcode() == ISD::TargetConstant);
1948 switch (NVT.SimpleTy) {
1949 default: return nullptr;
1952 Opc = AtomicOpcTbl[Op][ConstantI8];
1954 Opc = AtomicOpcTbl[Op][I8];
1958 if (immSext8(Val.getNode()))
1959 Opc = AtomicOpcTbl[Op][SextConstantI16];
1961 Opc = AtomicOpcTbl[Op][ConstantI16];
1963 Opc = AtomicOpcTbl[Op][I16];
1967 if (immSext8(Val.getNode()))
1968 Opc = AtomicOpcTbl[Op][SextConstantI32];
1970 Opc = AtomicOpcTbl[Op][ConstantI32];
1972 Opc = AtomicOpcTbl[Op][I32];
1976 if (immSext8(Val.getNode()))
1977 Opc = AtomicOpcTbl[Op][SextConstantI64];
1978 else if (i64immSExt32(Val.getNode()))
1979 Opc = AtomicOpcTbl[Op][ConstantI64];
1981 llvm_unreachable("True 64 bits constant in SelectAtomicLoadArith");
1983 Opc = AtomicOpcTbl[Op][I64];
1987 assert(Opc != 0 && "Invalid arith lock transform!");
1989 // Building the new node.
1992 SDValue Ops[] = { Base, Scale, Index, Disp, Segment, Chain };
1993 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0);
1995 SDValue Ops[] = { Base, Scale, Index, Disp, Segment, Val, Chain };
1996 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops), 0);
1999 // Copying the MachineMemOperand.
2000 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
2001 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
2002 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
2004 // We need to have two outputs as that is what the original instruction had.
2005 // So we add a dummy, undefined output. This is safe as we checked first
2006 // that no-one uses our output anyway.
2007 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
2009 SDValue RetVals[] = { Undef, Ret };
2010 return CurDAG->getMergeValues(RetVals, dl).getNode();
2013 /// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has
2014 /// any uses which require the SF or OF bits to be accurate.
2015 static bool HasNoSignedComparisonUses(SDNode *N) {
2016 // Examine each user of the node.
2017 for (SDNode::use_iterator UI = N->use_begin(),
2018 UE = N->use_end(); UI != UE; ++UI) {
2019 // Only examine CopyToReg uses.
2020 if (UI->getOpcode() != ISD::CopyToReg)
2022 // Only examine CopyToReg uses that copy to EFLAGS.
2023 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
2026 // Examine each user of the CopyToReg use.
2027 for (SDNode::use_iterator FlagUI = UI->use_begin(),
2028 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
2029 // Only examine the Flag result.
2030 if (FlagUI.getUse().getResNo() != 1) continue;
2031 // Anything unusual: assume conservatively.
2032 if (!FlagUI->isMachineOpcode()) return false;
2033 // Examine the opcode of the user.
2034 switch (FlagUI->getMachineOpcode()) {
2035 // These comparisons don't treat the most significant bit specially.
2036 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
2037 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
2038 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
2039 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
2040 case X86::JA_1: case X86::JAE_1: case X86::JB_1: case X86::JBE_1:
2041 case X86::JE_1: case X86::JNE_1: case X86::JP_1: case X86::JNP_1:
2042 case X86::CMOVA16rr: case X86::CMOVA16rm:
2043 case X86::CMOVA32rr: case X86::CMOVA32rm:
2044 case X86::CMOVA64rr: case X86::CMOVA64rm:
2045 case X86::CMOVAE16rr: case X86::CMOVAE16rm:
2046 case X86::CMOVAE32rr: case X86::CMOVAE32rm:
2047 case X86::CMOVAE64rr: case X86::CMOVAE64rm:
2048 case X86::CMOVB16rr: case X86::CMOVB16rm:
2049 case X86::CMOVB32rr: case X86::CMOVB32rm:
2050 case X86::CMOVB64rr: case X86::CMOVB64rm:
2051 case X86::CMOVBE16rr: case X86::CMOVBE16rm:
2052 case X86::CMOVBE32rr: case X86::CMOVBE32rm:
2053 case X86::CMOVBE64rr: case X86::CMOVBE64rm:
2054 case X86::CMOVE16rr: case X86::CMOVE16rm:
2055 case X86::CMOVE32rr: case X86::CMOVE32rm:
2056 case X86::CMOVE64rr: case X86::CMOVE64rm:
2057 case X86::CMOVNE16rr: case X86::CMOVNE16rm:
2058 case X86::CMOVNE32rr: case X86::CMOVNE32rm:
2059 case X86::CMOVNE64rr: case X86::CMOVNE64rm:
2060 case X86::CMOVNP16rr: case X86::CMOVNP16rm:
2061 case X86::CMOVNP32rr: case X86::CMOVNP32rm:
2062 case X86::CMOVNP64rr: case X86::CMOVNP64rm:
2063 case X86::CMOVP16rr: case X86::CMOVP16rm:
2064 case X86::CMOVP32rr: case X86::CMOVP32rm:
2065 case X86::CMOVP64rr: case X86::CMOVP64rm:
2067 // Anything else: assume conservatively.
2068 default: return false;
2075 /// isLoadIncOrDecStore - Check whether or not the chain ending in StoreNode
2076 /// is suitable for doing the {load; increment or decrement; store} to modify
2078 static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc,
2079 SDValue StoredVal, SelectionDAG *CurDAG,
2080 LoadSDNode* &LoadNode, SDValue &InputChain) {
2082 // is the value stored the result of a DEC or INC?
2083 if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false;
2085 // is the stored value result 0 of the load?
2086 if (StoredVal.getResNo() != 0) return false;
2088 // are there other uses of the loaded value than the inc or dec?
2089 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
2091 // is the store non-extending and non-indexed?
2092 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
2095 SDValue Load = StoredVal->getOperand(0);
2096 // Is the stored value a non-extending and non-indexed load?
2097 if (!ISD::isNormalLoad(Load.getNode())) return false;
2099 // Return LoadNode by reference.
2100 LoadNode = cast<LoadSDNode>(Load);
2101 // is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8)
2102 EVT LdVT = LoadNode->getMemoryVT();
2103 if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 &&
2107 // Is store the only read of the loaded value?
2108 if (!Load.hasOneUse())
2111 // Is the address of the store the same as the load?
2112 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
2113 LoadNode->getOffset() != StoreNode->getOffset())
2116 // Check if the chain is produced by the load or is a TokenFactor with
2117 // the load output chain as an operand. Return InputChain by reference.
2118 SDValue Chain = StoreNode->getChain();
2120 bool ChainCheck = false;
2121 if (Chain == Load.getValue(1)) {
2123 InputChain = LoadNode->getChain();
2124 } else if (Chain.getOpcode() == ISD::TokenFactor) {
2125 SmallVector<SDValue, 4> ChainOps;
2126 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
2127 SDValue Op = Chain.getOperand(i);
2128 if (Op == Load.getValue(1)) {
2133 // Make sure using Op as part of the chain would not cause a cycle here.
2134 // In theory, we could check whether the chain node is a predecessor of
2135 // the load. But that can be very expensive. Instead visit the uses and
2136 // make sure they all have smaller node id than the load.
2137 int LoadId = LoadNode->getNodeId();
2138 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
2139 UE = UI->use_end(); UI != UE; ++UI) {
2140 if (UI.getUse().getResNo() != 0)
2142 if (UI->getNodeId() > LoadId)
2146 ChainOps.push_back(Op);
2150 // Make a new TokenFactor with all the other input chains except
2152 InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain),
2153 MVT::Other, ChainOps);
2161 /// getFusedLdStOpcode - Get the appropriate X86 opcode for an in memory
2162 /// increment or decrement. Opc should be X86ISD::DEC or X86ISD::INC.
2163 static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) {
2164 if (Opc == X86ISD::DEC) {
2165 if (LdVT == MVT::i64) return X86::DEC64m;
2166 if (LdVT == MVT::i32) return X86::DEC32m;
2167 if (LdVT == MVT::i16) return X86::DEC16m;
2168 if (LdVT == MVT::i8) return X86::DEC8m;
2170 assert(Opc == X86ISD::INC && "unrecognized opcode");
2171 if (LdVT == MVT::i64) return X86::INC64m;
2172 if (LdVT == MVT::i32) return X86::INC32m;
2173 if (LdVT == MVT::i16) return X86::INC16m;
2174 if (LdVT == MVT::i8) return X86::INC8m;
2176 llvm_unreachable("unrecognized size for LdVT");
2179 /// SelectGather - Customized ISel for GATHER operations.
2181 SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) {
2182 // Operands of Gather: VSrc, Base, VIdx, VMask, Scale
2183 SDValue Chain = Node->getOperand(0);
2184 SDValue VSrc = Node->getOperand(2);
2185 SDValue Base = Node->getOperand(3);
2186 SDValue VIdx = Node->getOperand(4);
2187 SDValue VMask = Node->getOperand(5);
2188 ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6));
2192 SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(),
2197 // Memory Operands: Base, Scale, Index, Disp, Segment
2198 SDValue Disp = CurDAG->getTargetConstant(0, DL, MVT::i32);
2199 SDValue Segment = CurDAG->getRegister(0, MVT::i32);
2200 const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue(), DL), VIdx,
2201 Disp, Segment, VMask, Chain};
2202 SDNode *ResNode = CurDAG->getMachineNode(Opc, DL, VTs, Ops);
2203 // Node has 2 outputs: VDst and MVT::Other.
2204 // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other.
2205 // We replace VDst of Node with VDst of ResNode, and Other of Node with Other
2207 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
2208 ReplaceUses(SDValue(Node, 1), SDValue(ResNode, 2));
2212 // Try to shrink the encoding of an AND by setting additional bits in the mask.
2213 // It is only correct to do so if we know a priori that the other operand of the
2214 // AND already has those bits set to zero.
2215 SDNode *X86DAGToDAGISel::SelectAndWithSExtImmediate(SDNode *Node, MVT NVT) {
2216 SDValue N0 = Node->getOperand(0);
2217 SDValue N1 = Node->getOperand(1);
2219 if (NVT != MVT::i32 && NVT != MVT::i64)
2222 auto *Cst = dyn_cast<ConstantSDNode>(N1);
2226 // As a heuristic, skip over negative constants. It turns out not to be
2227 // productive to widen the mask.
2228 int64_t Val = Cst->getSExtValue();
2232 // Limit ourselves to constants which already have sign bits to save on
2234 if ((int8_t)Val >= 0)
2238 switch (NVT.SimpleTy) {
2240 llvm_unreachable("Unsupported VT!");
2242 Opc = X86::AND32ri8;
2245 Opc = X86::AND64ri8;
2249 APInt Op0Zero, Op0One;
2250 CurDAG->computeKnownBits(N0, Op0Zero, Op0One);
2251 // Grow the mask using the known zero bits.
2253 // See if the mask can be efficiently encoded using at most NumBits.
2254 if (!Op0Zero.isSignedIntN(8))
2259 CurDAG->getTargetConstant(Op0Zero.getSExtValue(), DL, MVT::i8);
2260 return CurDAG->getMachineNode(Opc, DL, NVT, N0, NewCst);
2263 SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
2264 MVT NVT = Node->getSimpleValueType(0);
2266 unsigned Opcode = Node->getOpcode();
2269 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
2271 if (Node->isMachineOpcode()) {
2272 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
2273 Node->setNodeId(-1);
2274 return nullptr; // Already selected.
2280 case ISD::INTRINSIC_W_CHAIN: {
2281 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2284 case Intrinsic::x86_avx2_gather_d_pd:
2285 case Intrinsic::x86_avx2_gather_d_pd_256:
2286 case Intrinsic::x86_avx2_gather_q_pd:
2287 case Intrinsic::x86_avx2_gather_q_pd_256:
2288 case Intrinsic::x86_avx2_gather_d_ps:
2289 case Intrinsic::x86_avx2_gather_d_ps_256:
2290 case Intrinsic::x86_avx2_gather_q_ps:
2291 case Intrinsic::x86_avx2_gather_q_ps_256:
2292 case Intrinsic::x86_avx2_gather_d_q:
2293 case Intrinsic::x86_avx2_gather_d_q_256:
2294 case Intrinsic::x86_avx2_gather_q_q:
2295 case Intrinsic::x86_avx2_gather_q_q_256:
2296 case Intrinsic::x86_avx2_gather_d_d:
2297 case Intrinsic::x86_avx2_gather_d_d_256:
2298 case Intrinsic::x86_avx2_gather_q_d:
2299 case Intrinsic::x86_avx2_gather_q_d_256: {
2300 if (!Subtarget->hasAVX2())
2304 default: llvm_unreachable("Impossible intrinsic");
2305 case Intrinsic::x86_avx2_gather_d_pd: Opc = X86::VGATHERDPDrm; break;
2306 case Intrinsic::x86_avx2_gather_d_pd_256: Opc = X86::VGATHERDPDYrm; break;
2307 case Intrinsic::x86_avx2_gather_q_pd: Opc = X86::VGATHERQPDrm; break;
2308 case Intrinsic::x86_avx2_gather_q_pd_256: Opc = X86::VGATHERQPDYrm; break;
2309 case Intrinsic::x86_avx2_gather_d_ps: Opc = X86::VGATHERDPSrm; break;
2310 case Intrinsic::x86_avx2_gather_d_ps_256: Opc = X86::VGATHERDPSYrm; break;
2311 case Intrinsic::x86_avx2_gather_q_ps: Opc = X86::VGATHERQPSrm; break;
2312 case Intrinsic::x86_avx2_gather_q_ps_256: Opc = X86::VGATHERQPSYrm; break;
2313 case Intrinsic::x86_avx2_gather_d_q: Opc = X86::VPGATHERDQrm; break;
2314 case Intrinsic::x86_avx2_gather_d_q_256: Opc = X86::VPGATHERDQYrm; break;
2315 case Intrinsic::x86_avx2_gather_q_q: Opc = X86::VPGATHERQQrm; break;
2316 case Intrinsic::x86_avx2_gather_q_q_256: Opc = X86::VPGATHERQQYrm; break;
2317 case Intrinsic::x86_avx2_gather_d_d: Opc = X86::VPGATHERDDrm; break;
2318 case Intrinsic::x86_avx2_gather_d_d_256: Opc = X86::VPGATHERDDYrm; break;
2319 case Intrinsic::x86_avx2_gather_q_d: Opc = X86::VPGATHERQDrm; break;
2320 case Intrinsic::x86_avx2_gather_q_d_256: Opc = X86::VPGATHERQDYrm; break;
2322 SDNode *RetVal = SelectGather(Node, Opc);
2324 // We already called ReplaceUses inside SelectGather.
2331 case X86ISD::GlobalBaseReg:
2332 return getGlobalBaseReg();
2334 case X86ISD::SHRUNKBLEND: {
2335 // SHRUNKBLEND selects like a regular VSELECT.
2336 SDValue VSelect = CurDAG->getNode(
2337 ISD::VSELECT, SDLoc(Node), Node->getValueType(0), Node->getOperand(0),
2338 Node->getOperand(1), Node->getOperand(2));
2339 ReplaceUses(SDValue(Node, 0), VSelect);
2340 SelectCode(VSelect.getNode());
2341 // We already called ReplaceUses.
2345 case ISD::ATOMIC_LOAD_XOR:
2346 case ISD::ATOMIC_LOAD_AND:
2347 case ISD::ATOMIC_LOAD_OR:
2348 case ISD::ATOMIC_LOAD_ADD: {
2349 SDNode *RetVal = SelectAtomicLoadArith(Node, NVT);
2355 if (SDNode *NewNode = SelectAndWithSExtImmediate(Node, NVT)) {
2356 ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
2363 // For operations of the form (x << C1) op C2, check if we can use a smaller
2364 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
2365 SDValue N0 = Node->getOperand(0);
2366 SDValue N1 = Node->getOperand(1);
2368 if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse())
2371 // i8 is unshrinkable, i16 should be promoted to i32.
2372 if (NVT != MVT::i32 && NVT != MVT::i64)
2375 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
2376 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
2377 if (!Cst || !ShlCst)
2380 int64_t Val = Cst->getSExtValue();
2381 uint64_t ShlVal = ShlCst->getZExtValue();
2383 // Make sure that we don't change the operation by removing bits.
2384 // This only matters for OR and XOR, AND is unaffected.
2385 uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1;
2386 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
2389 unsigned ShlOp, AddOp, Op;
2392 // Check the minimum bitwidth for the new constant.
2393 // TODO: AND32ri is the same as AND64ri32 with zext imm.
2394 // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr
2395 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
2396 if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal))
2398 else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal))
2401 // Bail if there is no smaller encoding.
2405 switch (NVT.SimpleTy) {
2406 default: llvm_unreachable("Unsupported VT!");
2408 assert(CstVT == MVT::i8);
2409 ShlOp = X86::SHL32ri;
2410 AddOp = X86::ADD32rr;
2413 default: llvm_unreachable("Impossible opcode");
2414 case ISD::AND: Op = X86::AND32ri8; break;
2415 case ISD::OR: Op = X86::OR32ri8; break;
2416 case ISD::XOR: Op = X86::XOR32ri8; break;
2420 assert(CstVT == MVT::i8 || CstVT == MVT::i32);
2421 ShlOp = X86::SHL64ri;
2422 AddOp = X86::ADD64rr;
2425 default: llvm_unreachable("Impossible opcode");
2426 case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break;
2427 case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break;
2428 case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break;
2433 // Emit the smaller op and the shift.
2434 SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, dl, CstVT);
2435 SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
2437 return CurDAG->SelectNodeTo(Node, AddOp, NVT, SDValue(New, 0),
2439 return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
2440 getI8Imm(ShlVal, dl));
2443 case X86ISD::SMUL8: {
2444 SDValue N0 = Node->getOperand(0);
2445 SDValue N1 = Node->getOperand(1);
2447 Opc = (Opcode == X86ISD::SMUL8 ? X86::IMUL8r : X86::MUL8r);
2449 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::AL,
2450 N0, SDValue()).getValue(1);
2452 SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32);
2453 SDValue Ops[] = {N1, InFlag};
2454 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2456 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
2457 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
2461 case X86ISD::UMUL: {
2462 SDValue N0 = Node->getOperand(0);
2463 SDValue N1 = Node->getOperand(1);
2466 switch (NVT.SimpleTy) {
2467 default: llvm_unreachable("Unsupported VT!");
2468 case MVT::i8: LoReg = X86::AL; Opc = X86::MUL8r; break;
2469 case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break;
2470 case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
2471 case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
2474 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
2475 N0, SDValue()).getValue(1);
2477 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
2478 SDValue Ops[] = {N1, InFlag};
2479 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2481 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
2482 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
2483 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2));
2487 case ISD::SMUL_LOHI:
2488 case ISD::UMUL_LOHI: {
2489 SDValue N0 = Node->getOperand(0);
2490 SDValue N1 = Node->getOperand(1);
2492 bool isSigned = Opcode == ISD::SMUL_LOHI;
2493 bool hasBMI2 = Subtarget->hasBMI2();
2495 switch (NVT.SimpleTy) {
2496 default: llvm_unreachable("Unsupported VT!");
2497 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
2498 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
2499 case MVT::i32: Opc = hasBMI2 ? X86::MULX32rr : X86::MUL32r;
2500 MOpc = hasBMI2 ? X86::MULX32rm : X86::MUL32m; break;
2501 case MVT::i64: Opc = hasBMI2 ? X86::MULX64rr : X86::MUL64r;
2502 MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break;
2505 switch (NVT.SimpleTy) {
2506 default: llvm_unreachable("Unsupported VT!");
2507 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
2508 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
2509 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
2510 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
2514 unsigned SrcReg, LoReg, HiReg;
2516 default: llvm_unreachable("Unknown MUL opcode!");
2519 SrcReg = LoReg = X86::AL; HiReg = X86::AH;
2523 SrcReg = LoReg = X86::AX; HiReg = X86::DX;
2527 SrcReg = LoReg = X86::EAX; HiReg = X86::EDX;
2531 SrcReg = LoReg = X86::RAX; HiReg = X86::RDX;
2534 SrcReg = X86::EDX; LoReg = HiReg = 0;
2537 SrcReg = X86::RDX; LoReg = HiReg = 0;
2541 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2542 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2543 // Multiply is commmutative.
2545 foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2550 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg,
2551 N0, SDValue()).getValue(1);
2552 SDValue ResHi, ResLo;
2556 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2558 if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) {
2559 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue);
2560 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
2561 ResHi = SDValue(CNode, 0);
2562 ResLo = SDValue(CNode, 1);
2563 Chain = SDValue(CNode, 2);
2564 InFlag = SDValue(CNode, 3);
2566 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
2567 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
2568 Chain = SDValue(CNode, 0);
2569 InFlag = SDValue(CNode, 1);
2572 // Update the chain.
2573 ReplaceUses(N1.getValue(1), Chain);
2575 SDValue Ops[] = { N1, InFlag };
2576 if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) {
2577 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue);
2578 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2579 ResHi = SDValue(CNode, 0);
2580 ResLo = SDValue(CNode, 1);
2581 InFlag = SDValue(CNode, 2);
2583 SDVTList VTs = CurDAG->getVTList(MVT::Glue);
2584 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
2585 InFlag = SDValue(CNode, 0);
2589 // Prevent use of AH in a REX instruction by referencing AX instead.
2590 if (HiReg == X86::AH && Subtarget->is64Bit() &&
2591 !SDValue(Node, 1).use_empty()) {
2592 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2593 X86::AX, MVT::i16, InFlag);
2594 InFlag = Result.getValue(2);
2595 // Get the low part if needed. Don't use getCopyFromReg for aliasing
2597 if (!SDValue(Node, 0).use_empty())
2598 ReplaceUses(SDValue(Node, 1),
2599 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2601 // Shift AX down 8 bits.
2602 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2604 CurDAG->getTargetConstant(8, dl, MVT::i8)),
2606 // Then truncate it down to i8.
2607 ReplaceUses(SDValue(Node, 1),
2608 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2610 // Copy the low half of the result, if it is needed.
2611 if (!SDValue(Node, 0).use_empty()) {
2612 if (!ResLo.getNode()) {
2613 assert(LoReg && "Register for low half is not defined!");
2614 ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT,
2616 InFlag = ResLo.getValue(2);
2618 ReplaceUses(SDValue(Node, 0), ResLo);
2619 DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); dbgs() << '\n');
2621 // Copy the high half of the result, if it is needed.
2622 if (!SDValue(Node, 1).use_empty()) {
2623 if (!ResHi.getNode()) {
2624 assert(HiReg && "Register for high half is not defined!");
2625 ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT,
2627 InFlag = ResHi.getValue(2);
2629 ReplaceUses(SDValue(Node, 1), ResHi);
2630 DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n');
2638 case X86ISD::SDIVREM8_SEXT_HREG:
2639 case X86ISD::UDIVREM8_ZEXT_HREG: {
2640 SDValue N0 = Node->getOperand(0);
2641 SDValue N1 = Node->getOperand(1);
2643 bool isSigned = (Opcode == ISD::SDIVREM ||
2644 Opcode == X86ISD::SDIVREM8_SEXT_HREG);
2646 switch (NVT.SimpleTy) {
2647 default: llvm_unreachable("Unsupported VT!");
2648 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
2649 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
2650 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
2651 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
2654 switch (NVT.SimpleTy) {
2655 default: llvm_unreachable("Unsupported VT!");
2656 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
2657 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
2658 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
2659 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
2663 unsigned LoReg, HiReg, ClrReg;
2664 unsigned SExtOpcode;
2665 switch (NVT.SimpleTy) {
2666 default: llvm_unreachable("Unsupported VT!");
2668 LoReg = X86::AL; ClrReg = HiReg = X86::AH;
2669 SExtOpcode = X86::CBW;
2672 LoReg = X86::AX; HiReg = X86::DX;
2674 SExtOpcode = X86::CWD;
2677 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
2678 SExtOpcode = X86::CDQ;
2681 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
2682 SExtOpcode = X86::CQO;
2686 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
2687 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
2688 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
2691 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
2692 // Special case for div8, just use a move with zero extension to AX to
2693 // clear the upper 8 bits (AH).
2694 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
2695 if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
2696 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
2698 SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32,
2699 MVT::Other, Ops), 0);
2700 Chain = Move.getValue(1);
2701 ReplaceUses(N0.getValue(1), Chain);
2704 SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0);
2705 Chain = CurDAG->getEntryNode();
2707 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue());
2708 InFlag = Chain.getValue(1);
2711 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
2712 LoReg, N0, SDValue()).getValue(1);
2713 if (isSigned && !signBitIsZero) {
2714 // Sign extend the low part into the high part.
2716 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
2718 // Zero out the high part, effectively zero extending the input.
2719 SDValue ClrNode = SDValue(CurDAG->getMachineNode(X86::MOV32r0, dl, NVT), 0);
2720 switch (NVT.SimpleTy) {
2723 SDValue(CurDAG->getMachineNode(
2724 TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode,
2725 CurDAG->getTargetConstant(X86::sub_16bit, dl,
2733 SDValue(CurDAG->getMachineNode(
2734 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
2735 CurDAG->getTargetConstant(0, dl, MVT::i64), ClrNode,
2736 CurDAG->getTargetConstant(X86::sub_32bit, dl,
2741 llvm_unreachable("Unexpected division source");
2744 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
2745 ClrNode, InFlag).getValue(1);
2750 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2753 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops);
2754 InFlag = SDValue(CNode, 1);
2755 // Update the chain.
2756 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
2759 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
2762 // Prevent use of AH in a REX instruction by explicitly copying it to
2763 // an ABCD_L register.
2765 // The current assumption of the register allocator is that isel
2766 // won't generate explicit references to the GR8_ABCD_H registers. If
2767 // the allocator and/or the backend get enhanced to be more robust in
2768 // that regard, this can be, and should be, removed.
2769 if (HiReg == X86::AH && !SDValue(Node, 1).use_empty()) {
2770 SDValue AHCopy = CurDAG->getRegister(X86::AH, MVT::i8);
2771 unsigned AHExtOpcode =
2772 isSigned ? X86::MOVSX32_NOREXrr8 : X86::MOVZX32_NOREXrr8;
2774 SDNode *RNode = CurDAG->getMachineNode(AHExtOpcode, dl, MVT::i32,
2775 MVT::Glue, AHCopy, InFlag);
2776 SDValue Result(RNode, 0);
2777 InFlag = SDValue(RNode, 1);
2779 if (Opcode == X86ISD::UDIVREM8_ZEXT_HREG ||
2780 Opcode == X86ISD::SDIVREM8_SEXT_HREG) {
2781 if (Node->getValueType(1) == MVT::i64) {
2782 // It's not possible to directly movsx AH to a 64bit register, because
2783 // the latter needs the REX prefix, but the former can't have it.
2784 assert(Opcode != X86ISD::SDIVREM8_SEXT_HREG &&
2785 "Unexpected i64 sext of h-register");
2787 SDValue(CurDAG->getMachineNode(
2788 TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
2789 CurDAG->getTargetConstant(0, dl, MVT::i64), Result,
2790 CurDAG->getTargetConstant(X86::sub_32bit, dl,
2796 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result);
2798 ReplaceUses(SDValue(Node, 1), Result);
2799 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2801 // Copy the division (low) result, if it is needed.
2802 if (!SDValue(Node, 0).use_empty()) {
2803 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2804 LoReg, NVT, InFlag);
2805 InFlag = Result.getValue(2);
2806 ReplaceUses(SDValue(Node, 0), Result);
2807 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2809 // Copy the remainder (high) result, if it is needed.
2810 if (!SDValue(Node, 1).use_empty()) {
2811 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2812 HiReg, NVT, InFlag);
2813 InFlag = Result.getValue(2);
2814 ReplaceUses(SDValue(Node, 1), Result);
2815 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
2822 // Sometimes a SUB is used to perform comparison.
2823 if (Opcode == X86ISD::SUB && Node->hasAnyUseOfValue(0))
2824 // This node is not a CMP.
2826 SDValue N0 = Node->getOperand(0);
2827 SDValue N1 = Node->getOperand(1);
2829 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
2830 HasNoSignedComparisonUses(Node))
2831 N0 = N0.getOperand(0);
2833 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
2834 // use a smaller encoding.
2835 // Look past the truncate if CMP is the only use of it.
2836 if ((N0.getNode()->getOpcode() == ISD::AND ||
2837 (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) &&
2838 N0.getNode()->hasOneUse() &&
2839 N0.getValueType() != MVT::i8 &&
2840 X86::isZeroNode(N1)) {
2841 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1));
2844 // For example, convert "testl %eax, $8" to "testb %al, $8"
2845 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 &&
2846 (!(C->getZExtValue() & 0x80) ||
2847 HasNoSignedComparisonUses(Node))) {
2848 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), dl, MVT::i8);
2849 SDValue Reg = N0.getNode()->getOperand(0);
2851 // On x86-32, only the ABCD registers have 8-bit subregisters.
2852 if (!Subtarget->is64Bit()) {
2853 const TargetRegisterClass *TRC;
2854 switch (N0.getSimpleValueType().SimpleTy) {
2855 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2856 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2857 default: llvm_unreachable("Unsupported TEST operand type!");
2859 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i32);
2860 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2861 Reg.getValueType(), Reg, RC), 0);
2864 // Extract the l-register.
2865 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
2869 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32,
2871 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2872 // one, do not call ReplaceAllUsesWith.
2873 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2874 SDValue(NewNode, 0));
2878 // For example, "testl %eax, $2048" to "testb %ah, $8".
2879 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 &&
2880 (!(C->getZExtValue() & 0x8000) ||
2881 HasNoSignedComparisonUses(Node))) {
2882 // Shift the immediate right by 8 bits.
2883 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8,
2885 SDValue Reg = N0.getNode()->getOperand(0);
2887 // Put the value in an ABCD register.
2888 const TargetRegisterClass *TRC;
2889 switch (N0.getSimpleValueType().SimpleTy) {
2890 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break;
2891 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2892 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2893 default: llvm_unreachable("Unsupported TEST operand type!");
2895 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i32);
2896 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2897 Reg.getValueType(), Reg, RC), 0);
2899 // Extract the h-register.
2900 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl,
2903 // Emit a testb. The EXTRACT_SUBREG becomes a COPY that can only
2904 // target GR8_NOREX registers, so make sure the register class is
2906 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl,
2907 MVT::i32, Subreg, ShiftedImm);
2908 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2909 // one, do not call ReplaceAllUsesWith.
2910 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2911 SDValue(NewNode, 0));
2915 // For example, "testl %eax, $32776" to "testw %ax, $32776".
2916 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 &&
2917 N0.getValueType() != MVT::i16 &&
2918 (!(C->getZExtValue() & 0x8000) ||
2919 HasNoSignedComparisonUses(Node))) {
2920 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), dl,
2922 SDValue Reg = N0.getNode()->getOperand(0);
2924 // Extract the 16-bit subregister.
2925 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl,
2929 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32,
2931 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2932 // one, do not call ReplaceAllUsesWith.
2933 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2934 SDValue(NewNode, 0));
2938 // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
2939 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 &&
2940 N0.getValueType() == MVT::i64 &&
2941 (!(C->getZExtValue() & 0x80000000) ||
2942 HasNoSignedComparisonUses(Node))) {
2943 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), dl,
2945 SDValue Reg = N0.getNode()->getOperand(0);
2947 // Extract the 32-bit subregister.
2948 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl,
2952 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32,
2954 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2955 // one, do not call ReplaceAllUsesWith.
2956 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2957 SDValue(NewNode, 0));
2964 // Change a chain of {load; incr or dec; store} of the same value into
2965 // a simple increment or decrement through memory of that value, if the
2966 // uses of the modified value and its address are suitable.
2967 // The DEC64m tablegen pattern is currently not able to match the case where
2968 // the EFLAGS on the original DEC are used. (This also applies to
2969 // {INC,DEC}X{64,32,16,8}.)
2970 // We'll need to improve tablegen to allow flags to be transferred from a
2971 // node in the pattern to the result node. probably with a new keyword
2972 // for example, we have this
2973 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2974 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2975 // (implicit EFLAGS)]>;
2976 // but maybe need something like this
2977 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2978 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2979 // (transferrable EFLAGS)]>;
2981 StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
2982 SDValue StoredVal = StoreNode->getOperand(1);
2983 unsigned Opc = StoredVal->getOpcode();
2985 LoadSDNode *LoadNode = nullptr;
2987 if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG,
2988 LoadNode, InputChain))
2991 SDValue Base, Scale, Index, Disp, Segment;
2992 if (!SelectAddr(LoadNode, LoadNode->getBasePtr(),
2993 Base, Scale, Index, Disp, Segment))
2996 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2);
2997 MemOp[0] = StoreNode->getMemOperand();
2998 MemOp[1] = LoadNode->getMemOperand();
2999 const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain };
3000 EVT LdVT = LoadNode->getMemoryVT();
3001 unsigned newOpc = getFusedLdStOpcode(LdVT, Opc);
3002 MachineSDNode *Result = CurDAG->getMachineNode(newOpc,
3004 MVT::i32, MVT::Other, Ops);
3005 Result->setMemRefs(MemOp, MemOp + 2);
3007 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
3008 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
3014 SDNode *ResNode = SelectCode(Node);
3016 DEBUG(dbgs() << "=> ";
3017 if (ResNode == nullptr || ResNode == Node)
3020 ResNode->dump(CurDAG);
3026 bool X86DAGToDAGISel::
3027 SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID,
3028 std::vector<SDValue> &OutOps) {
3029 SDValue Op0, Op1, Op2, Op3, Op4;
3030 switch (ConstraintID) {
3032 llvm_unreachable("Unexpected asm memory constraint");
3033 case InlineAsm::Constraint_i:
3034 // FIXME: It seems strange that 'i' is needed here since it's supposed to
3035 // be an immediate and not a memory constraint.
3037 case InlineAsm::Constraint_o: // offsetable ??
3038 case InlineAsm::Constraint_v: // not offsetable ??
3039 case InlineAsm::Constraint_m: // memory
3040 case InlineAsm::Constraint_X:
3041 if (!SelectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4))
3046 OutOps.push_back(Op0);
3047 OutOps.push_back(Op1);
3048 OutOps.push_back(Op2);
3049 OutOps.push_back(Op3);
3050 OutOps.push_back(Op4);
3054 /// createX86ISelDag - This pass converts a legalized DAG into a
3055 /// X86-specific DAG, ready for instruction scheduling.
3057 FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
3058 CodeGenOpt::Level OptLevel) {
3059 return new X86DAGToDAGISel(TM, OptLevel);