1 //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef X86ISELLOWERING_H
16 #define X86ISELLOWERING_H
18 #include "X86MachineFunctionInfo.h"
19 #include "X86RegisterInfo.h"
20 #include "X86Subtarget.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/FastISel.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/Target/TargetLowering.h"
25 #include "llvm/Target/TargetOptions.h"
29 // X86 Specific DAG Nodes
31 // Start the numbering where the builtin ops leave off.
32 FIRST_NUMBER = ISD::BUILTIN_OP_END,
34 /// BSF - Bit scan forward.
35 /// BSR - Bit scan reverse.
39 /// SHLD, SHRD - Double shift instructions. These correspond to
40 /// X86::SHLDxx and X86::SHRDxx instructions.
44 /// FAND - Bitwise logical AND of floating point values. This corresponds
45 /// to X86::ANDPS or X86::ANDPD.
48 /// FOR - Bitwise logical OR of floating point values. This corresponds
49 /// to X86::ORPS or X86::ORPD.
52 /// FXOR - Bitwise logical XOR of floating point values. This corresponds
53 /// to X86::XORPS or X86::XORPD.
56 /// FANDN - Bitwise logical ANDNOT of floating point values. This
57 /// corresponds to X86::ANDNPS or X86::ANDNPD.
60 /// FSRL - Bitwise logical right shift of floating point values. These
61 /// corresponds to X86::PSRLDQ.
64 /// CALL - These operations represent an abstract X86 call
65 /// instruction, which includes a bunch of information. In particular the
66 /// operands of these node are:
68 /// #0 - The incoming token chain
70 /// #2 - The number of arg bytes the caller pushes on the stack.
71 /// #3 - The number of arg bytes the callee pops off the stack.
72 /// #4 - The value to pass in AL/AX/EAX (optional)
73 /// #5 - The value to pass in DL/DX/EDX (optional)
75 /// The result values of these nodes are:
77 /// #0 - The outgoing token chain
78 /// #1 - The first register result value (optional)
79 /// #2 - The second register result value (optional)
83 /// RDTSC_DAG - This operation implements the lowering for
87 /// X86 compare and logical compare instructions.
90 /// X86 bit-test instructions.
93 /// X86 SetCC. Operand 0 is condition code, and operand 1 is the EFLAGS
94 /// operand, usually produced by a CMP instruction.
100 // Same as SETCC except it's materialized with a sbb and the value is all
101 // one's or all zero's.
102 SETCC_CARRY, // R = carry_bit ? ~0 : 0
104 /// X86 FP SETCC, implemented with CMP{cc}SS/CMP{cc}SD.
105 /// Operands are two FP values to compare; result is a mask of
106 /// 0s or 1s. Generally DTRT for C/C++ with NaNs.
109 /// X86 MOVMSK{pd|ps}, extracts sign bits of two or four FP values,
110 /// result in an integer GPR. Needs masking for scalar result.
113 /// X86 conditional moves. Operand 0 and operand 1 are the two values
114 /// to select from. Operand 2 is the condition code, and operand 3 is the
115 /// flag operand produced by a CMP or TEST instruction. It also writes a
119 /// X86 conditional branches. Operand 0 is the chain operand, operand 1
120 /// is the block to branch if condition is true, operand 2 is the
121 /// condition code, and operand 3 is the flag operand produced by a CMP
122 /// or TEST instruction.
125 /// Return with a flag operand. Operand 0 is the chain operand, operand
126 /// 1 is the number of bytes of stack to pop.
129 /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx.
132 /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx.
135 /// GlobalBaseReg - On Darwin, this node represents the result of the popl
136 /// at function entry, used for PIC code.
139 /// Wrapper - A wrapper node for TargetConstantPool,
140 /// TargetExternalSymbol, and TargetGlobalAddress.
143 /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP
144 /// relative displacements.
147 /// MOVDQ2Q - Copies a 64-bit value from the low word of an XMM vector
148 /// to an MMX vector. If you think this is too close to the previous
149 /// mnemonic, so do I; blame Intel.
152 /// MMX_MOVD2W - Copies a 32-bit value from the low word of a MMX
156 /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to
157 /// i32, corresponds to X86::PEXTRB.
160 /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to
161 /// i32, corresponds to X86::PEXTRW.
164 /// INSERTPS - Insert any element of a 4 x float vector into any element
165 /// of a destination 4 x floatvector.
168 /// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector,
169 /// corresponds to X86::PINSRB.
172 /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector,
173 /// corresponds to X86::PINSRW.
176 /// PSHUFB - Shuffle 16 8-bit values within a vector.
179 /// ANDNP - Bitwise Logical AND NOT of Packed FP values.
182 /// PSIGN - Copy integer sign.
185 /// BLENDV - Blend where the selector is a register.
188 /// BLENDI - Blend where the selector is an immediate.
191 // SUBUS - Integer sub with unsigned saturation.
194 /// HADD - Integer horizontal add.
197 /// HSUB - Integer horizontal sub.
200 /// FHADD - Floating point horizontal add.
203 /// FHSUB - Floating point horizontal sub.
206 /// UMAX, UMIN - Unsigned integer max and min.
209 /// SMAX, SMIN - Signed integer max and min.
212 /// FMAX, FMIN - Floating point max and min.
216 /// FMAXC, FMINC - Commutative FMIN and FMAX.
219 /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal
220 /// approximation. Note that these typically require refinement
221 /// in order to obtain suitable precision.
224 // TLSADDR - Thread Local Storage.
227 // TLSBASEADDR - Thread Local Storage. A call to get the start address
228 // of the TLS block for the current module.
231 // TLSCALL - Thread Local Storage. When calling to an OS provided
232 // thunk at the address from an earlier relocation.
235 // EH_RETURN - Exception Handling helpers.
238 // EH_SJLJ_SETJMP - SjLj exception handling setjmp.
241 // EH_SJLJ_LONGJMP - SjLj exception handling longjmp.
244 /// TC_RETURN - Tail call return. See X86TargetLowering::LowerCall for
245 /// the list of operands.
248 // VZEXT_MOVL - Vector move to low scalar and zero higher vector elements.
251 // VZEXT - Vector integer zero-extend.
254 // VSEXT - Vector integer signed-extend.
257 // VTRUNC - Vector integer truncate.
260 // VTRUNC - Vector integer truncate with mask.
263 // VFPEXT - Vector FP extend.
266 // VFPROUND - Vector FP round.
269 // VSHL, VSRL - 128-bit vector logical left / right shift
272 // VSHL, VSRL, VSRA - Vector shift elements
275 // VSHLI, VSRLI, VSRAI - Vector shift elements by immediate
278 // CMPP - Vector packed double/float comparison.
281 // PCMP* - Vector integer comparisons.
283 // PCMP*M - Vector integer comparisons, the result is in a mask vector.
286 /// CMPM, CMPMU - Vector comparison generating mask bits for fp and
287 /// integer signed and unsigned data types.
291 // ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results.
292 ADD, SUB, ADC, SBB, SMUL,
293 INC, DEC, OR, XOR, AND,
295 BZHI, // BZHI - Zero high bits
296 BEXTR, // BEXTR - Bit field extract
298 UMUL, // LOW, HI, FLAGS = umul LHS, RHS
300 // MUL_IMM - X86 specific multiply by immediate.
303 // PTEST - Vector bitwise comparisons.
306 // TESTP - Vector packed fp sign bitwise comparisons.
309 // TESTM, TESTNM - Vector "test" in AVX-512, the result is in a mask vector.
313 // OR/AND test for masks
316 // Several flavors of instructions with vector shuffle behaviors.
345 // PMULUDQ - Vector multiply packed unsigned doubleword integers
356 // VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack,
357 // according to %al. An operator is needed so that this can be expanded
358 // with control flow.
359 VASTART_SAVE_XMM_REGS,
361 // WIN_ALLOCA - Windows's _chkstk call to do stack probing.
364 // SEG_ALLOCA - For allocating variable amounts of stack space when using
365 // segmented stacks. Check if the current stacklet has enough space, and
366 // falls back to heap allocation if not.
369 // WIN_FTOL - Windows's _ftol2 runtime routine to do fptoui.
378 // FNSTSW16r - Store FP status word into i16 register.
381 // SAHF - Store contents of %ah into %eflags.
384 // RDRAND - Get a random integer and indicate whether it is valid in CF.
387 // RDSEED - Get a NIST SP800-90B & C compliant random integer and
388 // indicate whether it is valid in CF.
395 // XTEST - Test if in transactional execution.
398 // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG,
399 // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG -
400 // Atomic 64-bit binary operations.
401 ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
413 // LCMPXCHG_DAG, LCMPXCHG8_DAG, LCMPXCHG16_DAG - Compare and swap.
418 // VZEXT_LOAD - Load, scalar_to_vector, and zero extend.
421 // FNSTCW16m - Store FP control world into i16 memory.
424 /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the
425 /// integer destination in memory and a FP reg source. This corresponds
426 /// to the X86::FIST*m instructions and the rounding mode change stuff. It
427 /// has two inputs (token chain and address) and two outputs (int value
428 /// and token chain).
433 /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the
434 /// integer source in memory and FP reg result. This corresponds to the
435 /// X86::FILD*m instructions. It has three inputs (token chain, address,
436 /// and source type) and two outputs (FP value and token chain). FILD_FLAG
437 /// also produces a flag).
441 /// FLD - This instruction implements an extending load to FP stack slots.
442 /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
443 /// operand, ptr to load from, and a ValueType node indicating the type
447 /// FST - This instruction implements a truncating store to FP stack
448 /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
449 /// chain operand, value to store, address, and a ValueType to store it
453 /// VAARG_64 - This instruction grabs the address of the next argument
454 /// from a va_list. (reads and modifies the va_list in memory)
457 // WARNING: Do not add anything in the end unless you want the node to
458 // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be
459 // thought as target memory ops!
463 /// Define some predicates that are used for node matching.
465 /// isVEXTRACT128Index - Return true if the specified
466 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
467 /// suitable for input to VEXTRACTF128, VEXTRACTI128 instructions.
468 bool isVEXTRACT128Index(SDNode *N);
470 /// isVINSERT128Index - Return true if the specified
471 /// INSERT_SUBVECTOR operand specifies a subvector insert that is
472 /// suitable for input to VINSERTF128, VINSERTI128 instructions.
473 bool isVINSERT128Index(SDNode *N);
475 /// isVEXTRACT256Index - Return true if the specified
476 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
477 /// suitable for input to VEXTRACTF64X4, VEXTRACTI64X4 instructions.
478 bool isVEXTRACT256Index(SDNode *N);
480 /// isVINSERT256Index - Return true if the specified
481 /// INSERT_SUBVECTOR operand specifies a subvector insert that is
482 /// suitable for input to VINSERTF64X4, VINSERTI64X4 instructions.
483 bool isVINSERT256Index(SDNode *N);
485 /// getExtractVEXTRACT128Immediate - Return the appropriate
486 /// immediate to extract the specified EXTRACT_SUBVECTOR index
487 /// with VEXTRACTF128, VEXTRACTI128 instructions.
488 unsigned getExtractVEXTRACT128Immediate(SDNode *N);
490 /// getInsertVINSERT128Immediate - Return the appropriate
491 /// immediate to insert at the specified INSERT_SUBVECTOR index
492 /// with VINSERTF128, VINSERT128 instructions.
493 unsigned getInsertVINSERT128Immediate(SDNode *N);
495 /// getExtractVEXTRACT256Immediate - Return the appropriate
496 /// immediate to extract the specified EXTRACT_SUBVECTOR index
497 /// with VEXTRACTF64X4, VEXTRACTI64x4 instructions.
498 unsigned getExtractVEXTRACT256Immediate(SDNode *N);
500 /// getInsertVINSERT256Immediate - Return the appropriate
501 /// immediate to insert at the specified INSERT_SUBVECTOR index
502 /// with VINSERTF64x4, VINSERTI64x4 instructions.
503 unsigned getInsertVINSERT256Immediate(SDNode *N);
505 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
507 bool isZeroNode(SDValue Elt);
509 /// isOffsetSuitableForCodeModel - Returns true of the given offset can be
510 /// fit into displacement field of the instruction.
511 bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
512 bool hasSymbolicDisplacement = true);
515 /// isCalleePop - Determines whether the callee is required to pop its
516 /// own arguments. Callee pop is necessary to support tail calls.
517 bool isCalleePop(CallingConv::ID CallingConv,
518 bool is64Bit, bool IsVarArg, bool TailCallOpt);
521 //===--------------------------------------------------------------------===//
522 // X86TargetLowering - X86 Implementation of the TargetLowering interface
523 class X86TargetLowering : public TargetLowering {
525 explicit X86TargetLowering(X86TargetMachine &TM);
527 virtual unsigned getJumpTableEncoding() const;
529 virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i8; }
531 virtual const MCExpr *
532 LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
533 const MachineBasicBlock *MBB, unsigned uid,
534 MCContext &Ctx) const;
536 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
538 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
539 SelectionDAG &DAG) const;
540 virtual const MCExpr *
541 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
542 unsigned JTI, MCContext &Ctx) const;
544 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
545 /// function arguments in the caller parameter area. For X86, aggregates
546 /// that contains are placed at 16-byte boundaries while the rest are at
547 /// 4-byte boundaries.
548 virtual unsigned getByValTypeAlignment(Type *Ty) const;
550 /// getOptimalMemOpType - Returns the target specific optimal type for load
551 /// and store operations as a result of memset, memcpy, and memmove
552 /// lowering. If DstAlign is zero that means it's safe to destination
553 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
554 /// means there isn't a need to check it against alignment requirement,
555 /// probably because the source does not need to be loaded. If 'IsMemset' is
556 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
557 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
558 /// source is constant so it does not need to be loaded.
559 /// It returns EVT::Other if the type should be determined using generic
560 /// target-independent logic.
562 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
563 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
564 MachineFunction &MF) const;
566 /// isSafeMemOpType - Returns true if it's safe to use load / store of the
567 /// specified type to expand memcpy / memset inline. This is mostly true
568 /// for all types except for some special cases. For example, on X86
569 /// targets without SSE2 f64 load / store are done with fldl / fstpl which
570 /// also does type conversion. Note the specified type doesn't have to be
571 /// legal as the hook is used before type legalization.
572 virtual bool isSafeMemOpType(MVT VT) const;
574 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
575 /// unaligned memory accesses. of the specified type. Returns whether it
576 /// is "fast" by reference in the second argument.
577 virtual bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AS,
580 /// LowerOperation - Provide custom lowering hooks for some operations.
582 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
584 /// ReplaceNodeResults - Replace the results of node with an illegal result
585 /// type with new values built out of custom code.
587 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
588 SelectionDAG &DAG) const;
591 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
593 /// isTypeDesirableForOp - Return true if the target has native support for
594 /// the specified value type and it is 'desirable' to use the type for the
595 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
596 /// instruction encodings are longer and some i16 instructions are slow.
597 virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const;
599 /// isTypeDesirable - Return true if the target has native support for the
600 /// specified value type and it is 'desirable' to use the type. e.g. On x86
601 /// i16 is legal, but undesirable since i16 instruction encodings are longer
602 /// and some i16 instructions are slow.
603 virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const;
605 virtual MachineBasicBlock *
606 EmitInstrWithCustomInserter(MachineInstr *MI,
607 MachineBasicBlock *MBB) const;
610 /// getTargetNodeName - This method returns the name of a target specific
612 virtual const char *getTargetNodeName(unsigned Opcode) const;
614 /// getSetCCResultType - Return the value type to use for ISD::SETCC.
615 virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
617 /// computeMaskedBitsForTargetNode - Determine which of the bits specified
618 /// in Mask are known to be either zero or one and return them in the
619 /// KnownZero/KnownOne bitsets.
620 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
623 const SelectionDAG &DAG,
624 unsigned Depth = 0) const;
626 // ComputeNumSignBitsForTargetNode - Determine the number of bits in the
627 // operation that are sign bits.
628 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
629 unsigned Depth) const;
632 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
634 SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
636 virtual bool ExpandInlineAsm(CallInst *CI) const;
638 ConstraintType getConstraintType(const std::string &Constraint) const;
640 /// Examine constraint string and operand type and determine a weight value.
641 /// The operand object must already have been set up with the operand type.
642 virtual ConstraintWeight getSingleConstraintMatchWeight(
643 AsmOperandInfo &info, const char *constraint) const;
645 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
647 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
648 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
649 /// true it means one of the asm constraint of the inline asm instruction
650 /// being processed is 'm'.
651 virtual void LowerAsmOperandForConstraint(SDValue Op,
652 std::string &Constraint,
653 std::vector<SDValue> &Ops,
654 SelectionDAG &DAG) const;
656 /// getRegForInlineAsmConstraint - Given a physical register constraint
657 /// (e.g. {edx}), return the register number and the register class for the
658 /// register. This should only be used for C_Register constraints. On
659 /// error, this returns a register number of 0.
660 std::pair<unsigned, const TargetRegisterClass*>
661 getRegForInlineAsmConstraint(const std::string &Constraint,
664 /// isLegalAddressingMode - Return true if the addressing mode represented
665 /// by AM is legal for this target, for a load/store of the specified type.
666 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
668 /// isLegalICmpImmediate - Return true if the specified immediate is legal
669 /// icmp immediate, that is the target has icmp instructions which can
670 /// compare a register against the immediate without having to materialize
671 /// the immediate into a register.
672 virtual bool isLegalICmpImmediate(int64_t Imm) const;
674 /// isLegalAddImmediate - Return true if the specified immediate is legal
675 /// add immediate, that is the target has add instructions which can
676 /// add a register and the immediate without having to materialize
677 /// the immediate into a register.
678 virtual bool isLegalAddImmediate(int64_t Imm) const;
680 /// isTruncateFree - Return true if it's free to truncate a value of
681 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
682 /// register EAX to i16 by referencing its sub-register AX.
683 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
684 virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
686 virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const;
688 /// isZExtFree - Return true if any actual instruction that defines a
689 /// value of type Ty1 implicit zero-extends the value to Ty2 in the result
690 /// register. This does not necessarily include registers defined in
691 /// unknown ways, such as incoming arguments, or copies from unknown
692 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
693 /// does not necessarily apply to truncate instructions. e.g. on x86-64,
694 /// all instructions that define 32-bit values implicit zero-extend the
695 /// result out to 64 bits.
696 virtual bool isZExtFree(Type *Ty1, Type *Ty2) const;
697 virtual bool isZExtFree(EVT VT1, EVT VT2) const;
698 virtual bool isZExtFree(SDValue Val, EVT VT2) const;
700 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
701 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
702 /// expanded to FMAs when this method returns true, otherwise fmuladd is
703 /// expanded to fmul + fadd.
704 virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
706 /// isNarrowingProfitable - Return true if it's profitable to narrow
707 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
708 /// from i32 to i8 but not from i32 to i16.
709 virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const;
711 /// isFPImmLegal - Returns true if the target can instruction select the
712 /// specified FP immediate natively. If false, the legalizer will
713 /// materialize the FP immediate as a load from a constant pool.
714 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
716 /// isShuffleMaskLegal - Targets can use this to indicate that they only
717 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
718 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask
719 /// values are assumed to be legal.
720 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
723 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
724 /// used by Targets can use this to indicate if there is a suitable
725 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
727 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
730 /// ShouldShrinkFPConstant - If true, then instruction selection should
731 /// seek to shrink the FP constant of the specified type to a smaller type
732 /// in order to save space and / or reduce runtime.
733 virtual bool ShouldShrinkFPConstant(EVT VT) const {
734 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
735 // expensive than a straight movsd. On the other hand, it's important to
736 // shrink long double fp constant since fldt is very slow.
737 return !X86ScalarSSEf64 || VT == MVT::f80;
740 const X86Subtarget* getSubtarget() const {
744 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
745 /// computed in an SSE register, not on the X87 floating point stack.
746 bool isScalarFPTypeInSSEReg(EVT VT) const {
747 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
748 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
751 /// isTargetFTOL - Return true if the target uses the MSVC _ftol2 routine
753 bool isTargetFTOL() const {
754 return Subtarget->isTargetWindows() && !Subtarget->is64Bit();
757 /// isIntegerTypeFTOL - Return true if the MSVC _ftol2 routine should be
758 /// used for fptoui to the given type.
759 bool isIntegerTypeFTOL(EVT VT) const {
760 return isTargetFTOL() && VT == MVT::i64;
763 /// \brief Returns true if it is beneficial to convert a load of a constant
764 /// to just the constant itself.
765 virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
768 /// createFastISel - This method returns a target specific FastISel object,
769 /// or null if the target does not support "fast" ISel.
770 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
771 const TargetLibraryInfo *libInfo) const;
773 /// getStackCookieLocation - Return true if the target stores stack
774 /// protector cookies at a fixed offset in some non-standard address
775 /// space, and populates the address space and offset as
777 virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const;
779 SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
780 SelectionDAG &DAG) const;
782 virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const LLVM_OVERRIDE;
784 /// \brief Reset the operation actions based on target options.
785 virtual void resetOperationActions();
788 std::pair<const TargetRegisterClass*, uint8_t>
789 findRepresentativeClass(MVT VT) const;
792 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
793 /// make the right decision when generating code for different targets.
794 const X86Subtarget *Subtarget;
795 const DataLayout *TD;
797 /// Used to store the TargetOptions so that we don't waste time resetting
798 /// the operation actions unless we have to.
801 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
802 /// floating point ops.
803 /// When SSE is available, use it for f32 operations.
804 /// When SSE2 is available, use it for f64 operations.
805 bool X86ScalarSSEf32;
806 bool X86ScalarSSEf64;
808 /// LegalFPImmediates - A list of legal fp immediates.
809 std::vector<APFloat> LegalFPImmediates;
811 /// addLegalFPImmediate - Indicate that this x86 target can instruction
812 /// select the specified FP immediate natively.
813 void addLegalFPImmediate(const APFloat& Imm) {
814 LegalFPImmediates.push_back(Imm);
817 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
818 CallingConv::ID CallConv, bool isVarArg,
819 const SmallVectorImpl<ISD::InputArg> &Ins,
820 SDLoc dl, SelectionDAG &DAG,
821 SmallVectorImpl<SDValue> &InVals) const;
822 SDValue LowerMemArgument(SDValue Chain,
823 CallingConv::ID CallConv,
824 const SmallVectorImpl<ISD::InputArg> &ArgInfo,
825 SDLoc dl, SelectionDAG &DAG,
826 const CCValAssign &VA, MachineFrameInfo *MFI,
828 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
829 SDLoc dl, SelectionDAG &DAG,
830 const CCValAssign &VA,
831 ISD::ArgFlagsTy Flags) const;
833 // Call lowering helpers.
835 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
836 /// for tail call optimization. Targets which want to do tail call
837 /// optimization should implement this function.
838 bool IsEligibleForTailCallOptimization(SDValue Callee,
839 CallingConv::ID CalleeCC,
841 bool isCalleeStructRet,
842 bool isCallerStructRet,
844 const SmallVectorImpl<ISD::OutputArg> &Outs,
845 const SmallVectorImpl<SDValue> &OutVals,
846 const SmallVectorImpl<ISD::InputArg> &Ins,
847 SelectionDAG& DAG) const;
848 bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const;
849 SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
850 SDValue Chain, bool IsTailCall, bool Is64Bit,
851 int FPDiff, SDLoc dl) const;
853 unsigned GetAlignedArgumentStackSize(unsigned StackSize,
854 SelectionDAG &DAG) const;
856 std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
858 bool isReplace) const;
860 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
861 SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const;
862 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
863 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
864 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
865 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
866 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
867 SDValue LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
868 int64_t Offset, SelectionDAG &DAG) const;
869 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
870 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
871 SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
872 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
873 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
874 SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;
875 SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const;
876 SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG) const;
877 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
878 SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
879 SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
880 SDValue LowerToBT(SDValue And, ISD::CondCode CC,
881 SDLoc dl, SelectionDAG &DAG) const;
882 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
883 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
884 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
885 SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG) const;
886 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
887 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
888 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
889 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
890 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
891 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
892 SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
893 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
894 SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
895 SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
896 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
897 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
898 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
901 LowerFormalArguments(SDValue Chain,
902 CallingConv::ID CallConv, bool isVarArg,
903 const SmallVectorImpl<ISD::InputArg> &Ins,
904 SDLoc dl, SelectionDAG &DAG,
905 SmallVectorImpl<SDValue> &InVals) const;
907 LowerCall(CallLoweringInfo &CLI,
908 SmallVectorImpl<SDValue> &InVals) const;
911 LowerReturn(SDValue Chain,
912 CallingConv::ID CallConv, bool isVarArg,
913 const SmallVectorImpl<ISD::OutputArg> &Outs,
914 const SmallVectorImpl<SDValue> &OutVals,
915 SDLoc dl, SelectionDAG &DAG) const;
917 virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const;
919 virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
922 getTypeForExtArgOrReturn(MVT VT, ISD::NodeType ExtendKind) const;
925 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
927 const SmallVectorImpl<ISD::OutputArg> &Outs,
928 LLVMContext &Context) const;
930 virtual const uint16_t *getScratchRegisters(CallingConv::ID CC) const;
932 /// Utility function to emit atomic-load-arith operations (and, or, xor,
933 /// nand, max, min, umax, umin). It takes the corresponding instruction to
934 /// expand, the associated machine basic block, and the associated X86
935 /// opcodes for reg/reg.
936 MachineBasicBlock *EmitAtomicLoadArith(MachineInstr *MI,
937 MachineBasicBlock *MBB) const;
939 /// Utility function to emit atomic-load-arith operations (and, or, xor,
940 /// nand, add, sub, swap) for 64-bit operands on 32-bit target.
941 MachineBasicBlock *EmitAtomicLoadArith6432(MachineInstr *MI,
942 MachineBasicBlock *MBB) const;
944 // Utility function to emit the low-level va_arg code for X86-64.
945 MachineBasicBlock *EmitVAARG64WithCustomInserter(
947 MachineBasicBlock *MBB) const;
949 /// Utility function to emit the xmm reg save portion of va_start.
950 MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter(
951 MachineInstr *BInstr,
952 MachineBasicBlock *BB) const;
954 MachineBasicBlock *EmitLoweredSelect(MachineInstr *I,
955 MachineBasicBlock *BB) const;
957 MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI,
958 MachineBasicBlock *BB) const;
960 MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr *MI,
961 MachineBasicBlock *BB,
964 MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI,
965 MachineBasicBlock *BB) const;
967 MachineBasicBlock *emitLoweredTLSAddr(MachineInstr *MI,
968 MachineBasicBlock *BB) const;
970 MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr *MI,
971 MachineBasicBlock *MBB) const;
973 MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI,
974 MachineBasicBlock *MBB) const;
976 MachineBasicBlock *emitFMA3Instr(MachineInstr *MI,
977 MachineBasicBlock *MBB) const;
979 /// Emit nodes that will be selected as "test Op0,Op0", or something
980 /// equivalent, for use with the given x86 condition code.
981 SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const;
983 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
984 /// equivalent, for use with the given x86 condition code.
985 SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
986 SelectionDAG &DAG) const;
988 /// Convert a comparison if required by the subtarget.
989 SDValue ConvertCmpIfNecessary(SDValue Cmp, SelectionDAG &DAG) const;
993 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
994 const TargetLibraryInfo *libInfo);
998 #endif // X86ISELLOWERING_H