1 //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef X86ISELLOWERING_H
16 #define X86ISELLOWERING_H
18 #include "X86Subtarget.h"
19 #include "X86RegisterInfo.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "llvm/Target/TargetLowering.h"
22 #include "llvm/Target/TargetOptions.h"
23 #include "llvm/CodeGen/FastISel.h"
24 #include "llvm/CodeGen/SelectionDAG.h"
25 #include "llvm/CodeGen/CallingConvLower.h"
29 // X86 Specific DAG Nodes
31 // Start the numbering where the builtin ops leave off.
32 FIRST_NUMBER = ISD::BUILTIN_OP_END,
34 /// BSF - Bit scan forward.
35 /// BSR - Bit scan reverse.
39 /// SHLD, SHRD - Double shift instructions. These correspond to
40 /// X86::SHLDxx and X86::SHRDxx instructions.
44 /// FAND - Bitwise logical AND of floating point values. This corresponds
45 /// to X86::ANDPS or X86::ANDPD.
48 /// FOR - Bitwise logical OR of floating point values. This corresponds
49 /// to X86::ORPS or X86::ORPD.
52 /// FXOR - Bitwise logical XOR of floating point values. This corresponds
53 /// to X86::XORPS or X86::XORPD.
56 /// FSRL - Bitwise logical right shift of floating point values. These
57 /// corresponds to X86::PSRLDQ.
60 /// CALL - These operations represent an abstract X86 call
61 /// instruction, which includes a bunch of information. In particular the
62 /// operands of these node are:
64 /// #0 - The incoming token chain
66 /// #2 - The number of arg bytes the caller pushes on the stack.
67 /// #3 - The number of arg bytes the callee pops off the stack.
68 /// #4 - The value to pass in AL/AX/EAX (optional)
69 /// #5 - The value to pass in DL/DX/EDX (optional)
71 /// The result values of these nodes are:
73 /// #0 - The outgoing token chain
74 /// #1 - The first register result value (optional)
75 /// #2 - The second register result value (optional)
79 /// RDTSC_DAG - This operation implements the lowering for
83 /// X86 compare and logical compare instructions.
86 /// X86 bit-test instructions.
89 /// X86 SetCC. Operand 0 is condition code, and operand 1 is the EFLAGS
90 /// operand, usually produced by a CMP instruction.
93 // Same as SETCC except it's materialized with a sbb and the value is all
94 // one's or all zero's.
95 SETCC_CARRY, // R = carry_bit ? ~0 : 0
97 /// X86 FP SETCC, implemented with CMP{cc}SS/CMP{cc}SD.
98 /// Operands are two FP values to compare; result is a mask of
99 /// 0s or 1s. Generally DTRT for C/C++ with NaNs.
102 /// X86 MOVMSK{pd|ps}, extracts sign bits of two or four FP values,
103 /// result in an integer GPR. Needs masking for scalar result.
106 /// X86 conditional moves. Operand 0 and operand 1 are the two values
107 /// to select from. Operand 2 is the condition code, and operand 3 is the
108 /// flag operand produced by a CMP or TEST instruction. It also writes a
112 /// X86 conditional branches. Operand 0 is the chain operand, operand 1
113 /// is the block to branch if condition is true, operand 2 is the
114 /// condition code, and operand 3 is the flag operand produced by a CMP
115 /// or TEST instruction.
118 /// Return with a flag operand. Operand 0 is the chain operand, operand
119 /// 1 is the number of bytes of stack to pop.
122 /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx.
125 /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx.
128 /// GlobalBaseReg - On Darwin, this node represents the result of the popl
129 /// at function entry, used for PIC code.
132 /// Wrapper - A wrapper node for TargetConstantPool,
133 /// TargetExternalSymbol, and TargetGlobalAddress.
136 /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP
137 /// relative displacements.
140 /// MOVQ2DQ - Copies a 64-bit value from an MMX vector to the low word
141 /// of an XMM vector, with the high word zero filled.
144 /// MOVDQ2Q - Copies a 64-bit value from the low word of an XMM vector
145 /// to an MMX vector. If you think this is too close to the previous
146 /// mnemonic, so do I; blame Intel.
149 /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to
150 /// i32, corresponds to X86::PEXTRB.
153 /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to
154 /// i32, corresponds to X86::PEXTRW.
157 /// INSERTPS - Insert any element of a 4 x float vector into any element
158 /// of a destination 4 x floatvector.
161 /// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector,
162 /// corresponds to X86::PINSRB.
165 /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector,
166 /// corresponds to X86::PINSRW.
169 /// PSHUFB - Shuffle 16 8-bit values within a vector.
172 /// ANDNP - Bitwise Logical AND NOT of Packed FP values.
175 /// PSIGN - Copy integer sign.
178 /// BLEND family of opcodes
181 /// HADD - Integer horizontal add.
184 /// HSUB - Integer horizontal sub.
187 /// FHADD - Floating point horizontal add.
190 /// FHSUB - Floating point horizontal sub.
193 /// FMAX, FMIN - Floating point max and min.
197 /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal
198 /// approximation. Note that these typically require refinement
199 /// in order to obtain suitable precision.
202 // TLSADDR - Thread Local Storage.
205 // TLSCALL - Thread Local Storage. When calling to an OS provided
206 // thunk at the address from an earlier relocation.
209 // EH_RETURN - Exception Handling helpers.
212 /// TC_RETURN - Tail call return.
214 /// operand #1 callee (register or absolute)
215 /// operand #2 stack adjustment
216 /// operand #3 optional in flag
219 // VZEXT_MOVL - Vector move low and zero extend.
222 // VSHL, VSRL - Vector logical left / right shift.
225 // CMPPD, CMPPS - Vector double/float comparison.
226 // CMPPD, CMPPS - Vector double/float comparison.
229 // PCMP* - Vector integer comparisons.
230 PCMPEQB, PCMPEQW, PCMPEQD, PCMPEQQ,
231 PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ,
233 // ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results.
234 ADD, SUB, ADC, SBB, SMUL,
235 INC, DEC, OR, XOR, AND,
237 ANDN, // ANDN - Bitwise AND NOT with FLAGS results.
239 BLSI, // BLSI - Extract lowest set isolated bit
240 BLSMSK, // BLSMSK - Get mask up to lowest set bit
241 BLSR, // BLSR - Reset lowest set bit
243 UMUL, // LOW, HI, FLAGS = umul LHS, RHS
245 // MUL_IMM - X86 specific multiply by immediate.
248 // PTEST - Vector bitwise comparisons
251 // TESTP - Vector packed fp sign bitwise comparisons
254 // Several flavors of instructions with vector shuffle behaviors.
282 // VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack,
283 // according to %al. An operator is needed so that this can be expanded
284 // with control flow.
285 VASTART_SAVE_XMM_REGS,
287 // WIN_ALLOCA - Windows's _chkstk call to do stack probing.
290 // SEG_ALLOCA - For allocating variable amounts of stack space when using
291 // segmented stacks. Check if the current stacklet has enough space, and
292 // falls back to heap allocation if not.
301 // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG,
302 // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG -
303 // Atomic 64-bit binary operations.
304 ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
312 // LCMPXCHG_DAG, LCMPXCHG8_DAG, LCMPXCHG16_DAG - Compare and swap.
317 // VZEXT_LOAD - Load, scalar_to_vector, and zero extend.
320 // FNSTCW16m - Store FP control world into i16 memory.
323 /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the
324 /// integer destination in memory and a FP reg source. This corresponds
325 /// to the X86::FIST*m instructions and the rounding mode change stuff. It
326 /// has two inputs (token chain and address) and two outputs (int value
327 /// and token chain).
332 /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the
333 /// integer source in memory and FP reg result. This corresponds to the
334 /// X86::FILD*m instructions. It has three inputs (token chain, address,
335 /// and source type) and two outputs (FP value and token chain). FILD_FLAG
336 /// also produces a flag).
340 /// FLD - This instruction implements an extending load to FP stack slots.
341 /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
342 /// operand, ptr to load from, and a ValueType node indicating the type
346 /// FST - This instruction implements a truncating store to FP stack
347 /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
348 /// chain operand, value to store, address, and a ValueType to store it
352 /// VAARG_64 - This instruction grabs the address of the next argument
353 /// from a va_list. (reads and modifies the va_list in memory)
356 // WARNING: Do not add anything in the end unless you want the node to
357 // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be
358 // thought as target memory ops!
362 /// Define some predicates that are used for node matching.
364 /// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
365 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
366 bool isPSHUFDMask(ShuffleVectorSDNode *N);
368 /// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
369 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
370 bool isPSHUFHWMask(ShuffleVectorSDNode *N);
372 /// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
373 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
374 bool isPSHUFLWMask(ShuffleVectorSDNode *N);
376 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
377 /// specifies a shuffle of elements that is suitable for input to SHUFP*.
378 bool isSHUFPMask(ShuffleVectorSDNode *N);
380 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
381 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
382 bool isMOVHLPSMask(ShuffleVectorSDNode *N);
384 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
385 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
387 bool isMOVHLPS_v_undef_Mask(ShuffleVectorSDNode *N);
389 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
390 /// specifies a shuffle of elements that is suitable for MOVLP{S|D}.
391 bool isMOVLPMask(ShuffleVectorSDNode *N);
393 /// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
394 /// specifies a shuffle of elements that is suitable for MOVHP{S|D}.
395 /// as well as MOVLHPS.
396 bool isMOVLHPSMask(ShuffleVectorSDNode *N);
398 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
399 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
400 bool isUNPCKLMask(ShuffleVectorSDNode *N, bool HasAVX2,
401 bool V2IsSplat = false);
403 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
404 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
405 bool isUNPCKHMask(ShuffleVectorSDNode *N, bool HasAVX2,
406 bool V2IsSplat = false);
408 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
409 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
411 bool isUNPCKL_v_undef_Mask(ShuffleVectorSDNode *N, bool HasAVX2);
413 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
414 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
416 bool isUNPCKH_v_undef_Mask(ShuffleVectorSDNode *N, bool HasAVX2);
418 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
419 /// specifies a shuffle of elements that is suitable for input to MOVSS,
420 /// MOVSD, and MOVD, i.e. setting the lowest element.
421 bool isMOVLMask(ShuffleVectorSDNode *N);
423 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
424 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
425 bool isMOVSHDUPMask(ShuffleVectorSDNode *N, const X86Subtarget *Subtarget);
427 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
428 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
429 bool isMOVSLDUPMask(ShuffleVectorSDNode *N, const X86Subtarget *Subtarget);
431 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
432 /// specifies a shuffle of elements that is suitable for input to MOVDDUP.
433 bool isMOVDDUPMask(ShuffleVectorSDNode *N);
435 /// isVEXTRACTF128Index - Return true if the specified
436 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
437 /// suitable for input to VEXTRACTF128.
438 bool isVEXTRACTF128Index(SDNode *N);
440 /// isVINSERTF128Index - Return true if the specified
441 /// INSERT_SUBVECTOR operand specifies a subvector insert that is
442 /// suitable for input to VINSERTF128.
443 bool isVINSERTF128Index(SDNode *N);
445 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
446 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
448 unsigned getShuffleSHUFImmediate(SDNode *N);
450 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
451 /// the specified VECTOR_SHUFFLE mask with PSHUFHW instruction.
452 unsigned getShufflePSHUFHWImmediate(SDNode *N);
454 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
455 /// the specified VECTOR_SHUFFLE mask with PSHUFLW instruction.
456 unsigned getShufflePSHUFLWImmediate(SDNode *N);
458 /// getExtractVEXTRACTF128Immediate - Return the appropriate
459 /// immediate to extract the specified EXTRACT_SUBVECTOR index
460 /// with VEXTRACTF128 instructions.
461 unsigned getExtractVEXTRACTF128Immediate(SDNode *N);
463 /// getInsertVINSERTF128Immediate - Return the appropriate
464 /// immediate to insert at the specified INSERT_SUBVECTOR index
465 /// with VINSERTF128 instructions.
466 unsigned getInsertVINSERTF128Immediate(SDNode *N);
468 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
470 bool isZeroNode(SDValue Elt);
472 /// isOffsetSuitableForCodeModel - Returns true of the given offset can be
473 /// fit into displacement field of the instruction.
474 bool isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
475 bool hasSymbolicDisplacement = true);
478 /// isCalleePop - Determines whether the callee is required to pop its
479 /// own arguments. Callee pop is necessary to support tail calls.
480 bool isCalleePop(CallingConv::ID CallingConv,
481 bool is64Bit, bool IsVarArg, bool TailCallOpt);
484 //===--------------------------------------------------------------------===//
485 // X86TargetLowering - X86 Implementation of the TargetLowering interface
486 class X86TargetLowering : public TargetLowering {
488 explicit X86TargetLowering(X86TargetMachine &TM);
490 virtual unsigned getJumpTableEncoding() const;
492 virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i8; }
494 virtual const MCExpr *
495 LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
496 const MachineBasicBlock *MBB, unsigned uid,
497 MCContext &Ctx) const;
499 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
501 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
502 SelectionDAG &DAG) const;
503 virtual const MCExpr *
504 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
505 unsigned JTI, MCContext &Ctx) const;
507 /// getStackPtrReg - Return the stack pointer register we are using: either
509 unsigned getStackPtrReg() const { return X86StackPtr; }
511 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
512 /// function arguments in the caller parameter area. For X86, aggregates
513 /// that contains are placed at 16-byte boundaries while the rest are at
514 /// 4-byte boundaries.
515 virtual unsigned getByValTypeAlignment(Type *Ty) const;
517 /// getOptimalMemOpType - Returns the target specific optimal type for load
518 /// and store operations as a result of memset, memcpy, and memmove
519 /// lowering. If DstAlign is zero that means it's safe to destination
520 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
521 /// means there isn't a need to check it against alignment requirement,
522 /// probably because the source does not need to be loaded. If
523 /// 'IsZeroVal' is true, that means it's safe to return a
524 /// non-scalar-integer type, e.g. empty string source, constant, or loaded
525 /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is
526 /// constant so it does not need to be loaded.
527 /// It returns EVT::Other if the type should be determined using generic
528 /// target-independent logic.
530 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
531 bool IsZeroVal, bool MemcpyStrSrc,
532 MachineFunction &MF) const;
534 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
535 /// unaligned memory accesses. of the specified type.
536 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const {
540 /// LowerOperation - Provide custom lowering hooks for some operations.
542 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
544 /// ReplaceNodeResults - Replace the results of node with an illegal result
545 /// type with new values built out of custom code.
547 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
548 SelectionDAG &DAG) const;
551 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
553 /// isTypeDesirableForOp - Return true if the target has native support for
554 /// the specified value type and it is 'desirable' to use the type for the
555 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
556 /// instruction encodings are longer and some i16 instructions are slow.
557 virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const;
559 /// isTypeDesirable - Return true if the target has native support for the
560 /// specified value type and it is 'desirable' to use the type. e.g. On x86
561 /// i16 is legal, but undesirable since i16 instruction encodings are longer
562 /// and some i16 instructions are slow.
563 virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const;
565 virtual MachineBasicBlock *
566 EmitInstrWithCustomInserter(MachineInstr *MI,
567 MachineBasicBlock *MBB) const;
570 /// getTargetNodeName - This method returns the name of a target specific
572 virtual const char *getTargetNodeName(unsigned Opcode) const;
574 /// getSetCCResultType - Return the value type to use for ISD::SETCC.
575 virtual EVT getSetCCResultType(EVT VT) const;
577 /// computeMaskedBitsForTargetNode - Determine which of the bits specified
578 /// in Mask are known to be either zero or one and return them in the
579 /// KnownZero/KnownOne bitsets.
580 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
584 const SelectionDAG &DAG,
585 unsigned Depth = 0) const;
587 // ComputeNumSignBitsForTargetNode - Determine the number of bits in the
588 // operation that are sign bits.
589 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
590 unsigned Depth) const;
593 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
595 SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
597 virtual bool ExpandInlineAsm(CallInst *CI) const;
599 ConstraintType getConstraintType(const std::string &Constraint) const;
601 /// Examine constraint string and operand type and determine a weight value.
602 /// The operand object must already have been set up with the operand type.
603 virtual ConstraintWeight getSingleConstraintMatchWeight(
604 AsmOperandInfo &info, const char *constraint) const;
606 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
608 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
609 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
610 /// true it means one of the asm constraint of the inline asm instruction
611 /// being processed is 'm'.
612 virtual void LowerAsmOperandForConstraint(SDValue Op,
613 std::string &Constraint,
614 std::vector<SDValue> &Ops,
615 SelectionDAG &DAG) const;
617 /// getRegForInlineAsmConstraint - Given a physical register constraint
618 /// (e.g. {edx}), return the register number and the register class for the
619 /// register. This should only be used for C_Register constraints. On
620 /// error, this returns a register number of 0.
621 std::pair<unsigned, const TargetRegisterClass*>
622 getRegForInlineAsmConstraint(const std::string &Constraint,
625 /// isLegalAddressingMode - Return true if the addressing mode represented
626 /// by AM is legal for this target, for a load/store of the specified type.
627 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
629 /// isTruncateFree - Return true if it's free to truncate a value of
630 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
631 /// register EAX to i16 by referencing its sub-register AX.
632 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
633 virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
635 /// isZExtFree - Return true if any actual instruction that defines a
636 /// value of type Ty1 implicit zero-extends the value to Ty2 in the result
637 /// register. This does not necessarily include registers defined in
638 /// unknown ways, such as incoming arguments, or copies from unknown
639 /// virtual registers. Also, if isTruncateFree(Ty2, Ty1) is true, this
640 /// does not necessarily apply to truncate instructions. e.g. on x86-64,
641 /// all instructions that define 32-bit values implicit zero-extend the
642 /// result out to 64 bits.
643 virtual bool isZExtFree(Type *Ty1, Type *Ty2) const;
644 virtual bool isZExtFree(EVT VT1, EVT VT2) const;
646 /// isNarrowingProfitable - Return true if it's profitable to narrow
647 /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
648 /// from i32 to i8 but not from i32 to i16.
649 virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const;
651 /// isFPImmLegal - Returns true if the target can instruction select the
652 /// specified FP immediate natively. If false, the legalizer will
653 /// materialize the FP immediate as a load from a constant pool.
654 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
656 /// isShuffleMaskLegal - Targets can use this to indicate that they only
657 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
658 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask
659 /// values are assumed to be legal.
660 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
663 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
664 /// used by Targets can use this to indicate if there is a suitable
665 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
667 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
670 /// ShouldShrinkFPConstant - If true, then instruction selection should
671 /// seek to shrink the FP constant of the specified type to a smaller type
672 /// in order to save space and / or reduce runtime.
673 virtual bool ShouldShrinkFPConstant(EVT VT) const {
674 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
675 // expensive than a straight movsd. On the other hand, it's important to
676 // shrink long double fp constant since fldt is very slow.
677 return !X86ScalarSSEf64 || VT == MVT::f80;
680 const X86Subtarget* getSubtarget() const {
684 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
685 /// computed in an SSE register, not on the X87 floating point stack.
686 bool isScalarFPTypeInSSEReg(EVT VT) const {
687 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
688 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
691 /// createFastISel - This method returns a target specific FastISel object,
692 /// or null if the target does not support "fast" ISel.
693 virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const;
695 /// getStackCookieLocation - Return true if the target stores stack
696 /// protector cookies at a fixed offset in some non-standard address
697 /// space, and populates the address space and offset as
699 virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const;
701 SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
702 SelectionDAG &DAG) const;
705 std::pair<const TargetRegisterClass*, uint8_t>
706 findRepresentativeClass(EVT VT) const;
709 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
710 /// make the right decision when generating code for different targets.
711 const X86Subtarget *Subtarget;
712 const X86RegisterInfo *RegInfo;
713 const TargetData *TD;
715 /// X86StackPtr - X86 physical register used as stack ptr.
716 unsigned X86StackPtr;
718 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
719 /// floating point ops.
720 /// When SSE is available, use it for f32 operations.
721 /// When SSE2 is available, use it for f64 operations.
722 bool X86ScalarSSEf32;
723 bool X86ScalarSSEf64;
725 /// LegalFPImmediates - A list of legal fp immediates.
726 std::vector<APFloat> LegalFPImmediates;
728 /// addLegalFPImmediate - Indicate that this x86 target can instruction
729 /// select the specified FP immediate natively.
730 void addLegalFPImmediate(const APFloat& Imm) {
731 LegalFPImmediates.push_back(Imm);
734 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
735 CallingConv::ID CallConv, bool isVarArg,
736 const SmallVectorImpl<ISD::InputArg> &Ins,
737 DebugLoc dl, SelectionDAG &DAG,
738 SmallVectorImpl<SDValue> &InVals) const;
739 SDValue LowerMemArgument(SDValue Chain,
740 CallingConv::ID CallConv,
741 const SmallVectorImpl<ISD::InputArg> &ArgInfo,
742 DebugLoc dl, SelectionDAG &DAG,
743 const CCValAssign &VA, MachineFrameInfo *MFI,
745 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
746 DebugLoc dl, SelectionDAG &DAG,
747 const CCValAssign &VA,
748 ISD::ArgFlagsTy Flags) const;
750 // Call lowering helpers.
752 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
753 /// for tail call optimization. Targets which want to do tail call
754 /// optimization should implement this function.
755 bool IsEligibleForTailCallOptimization(SDValue Callee,
756 CallingConv::ID CalleeCC,
758 bool isCalleeStructRet,
759 bool isCallerStructRet,
760 const SmallVectorImpl<ISD::OutputArg> &Outs,
761 const SmallVectorImpl<SDValue> &OutVals,
762 const SmallVectorImpl<ISD::InputArg> &Ins,
763 SelectionDAG& DAG) const;
764 bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const;
765 SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
766 SDValue Chain, bool IsTailCall, bool Is64Bit,
767 int FPDiff, DebugLoc dl) const;
769 unsigned GetAlignedArgumentStackSize(unsigned StackSize,
770 SelectionDAG &DAG) const;
772 std::pair<SDValue,SDValue> FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
773 bool isSigned) const;
775 SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
776 SelectionDAG &DAG) const;
777 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
778 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
779 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
780 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
781 SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const;
782 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
783 SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const;
784 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
785 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
786 SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
787 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
788 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
789 SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
790 int64_t Offset, SelectionDAG &DAG) const;
791 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
792 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
793 SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
794 SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) const;
795 SDValue LowerBITCAST(SDValue op, SelectionDAG &DAG) const;
796 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
797 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
798 SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;
799 SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const;
800 SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
801 SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
802 SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const;
803 SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const;
804 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
805 SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) const;
806 SDValue LowerToBT(SDValue And, ISD::CondCode CC,
807 DebugLoc dl, SelectionDAG &DAG) const;
808 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
809 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
810 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
811 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
812 SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG) const;
813 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
814 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
815 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
816 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
817 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
818 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
819 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
820 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
821 SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
822 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
823 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
824 SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
825 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
826 SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) const;
827 SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const;
828 SDValue LowerADD(SDValue Op, SelectionDAG &DAG) const;
829 SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) const;
830 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
831 SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
832 SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const;
834 SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
835 SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
836 SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
837 SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const;
838 SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
839 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
841 // Utility functions to help LowerVECTOR_SHUFFLE
842 SDValue LowerVECTOR_SHUFFLEv8i16(SDValue Op, SelectionDAG &DAG) const;
845 LowerFormalArguments(SDValue Chain,
846 CallingConv::ID CallConv, bool isVarArg,
847 const SmallVectorImpl<ISD::InputArg> &Ins,
848 DebugLoc dl, SelectionDAG &DAG,
849 SmallVectorImpl<SDValue> &InVals) const;
851 LowerCall(SDValue Chain, SDValue Callee,
852 CallingConv::ID CallConv, bool isVarArg, bool &isTailCall,
853 const SmallVectorImpl<ISD::OutputArg> &Outs,
854 const SmallVectorImpl<SDValue> &OutVals,
855 const SmallVectorImpl<ISD::InputArg> &Ins,
856 DebugLoc dl, SelectionDAG &DAG,
857 SmallVectorImpl<SDValue> &InVals) const;
860 LowerReturn(SDValue Chain,
861 CallingConv::ID CallConv, bool isVarArg,
862 const SmallVectorImpl<ISD::OutputArg> &Outs,
863 const SmallVectorImpl<SDValue> &OutVals,
864 DebugLoc dl, SelectionDAG &DAG) const;
866 virtual bool isUsedByReturnOnly(SDNode *N) const;
868 virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
871 getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
872 ISD::NodeType ExtendKind) const;
875 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
877 const SmallVectorImpl<ISD::OutputArg> &Outs,
878 LLVMContext &Context) const;
880 void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results,
881 SelectionDAG &DAG, unsigned NewOp) const;
883 /// Utility function to emit string processing sse4.2 instructions
884 /// that return in xmm0.
885 /// This takes the instruction to expand, the associated machine basic
886 /// block, the number of args, and whether or not the second arg is
887 /// in memory or not.
888 MachineBasicBlock *EmitPCMP(MachineInstr *BInstr, MachineBasicBlock *BB,
889 unsigned argNum, bool inMem) const;
891 /// Utility functions to emit monitor and mwait instructions. These
892 /// need to make sure that the arguments to the intrinsic are in the
893 /// correct registers.
894 MachineBasicBlock *EmitMonitor(MachineInstr *MI,
895 MachineBasicBlock *BB) const;
896 MachineBasicBlock *EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const;
898 /// Utility function to emit atomic bitwise operations (and, or, xor).
899 /// It takes the bitwise instruction to expand, the associated machine basic
900 /// block, and the associated X86 opcodes for reg/reg and reg/imm.
901 MachineBasicBlock *EmitAtomicBitwiseWithCustomInserter(
902 MachineInstr *BInstr,
903 MachineBasicBlock *BB,
910 TargetRegisterClass *RC,
911 bool invSrc = false) const;
913 MachineBasicBlock *EmitAtomicBit6432WithCustomInserter(
914 MachineInstr *BInstr,
915 MachineBasicBlock *BB,
920 bool invSrc = false) const;
922 /// Utility function to emit atomic min and max. It takes the min/max
923 /// instruction to expand, the associated basic block, and the associated
924 /// cmov opcode for moving the min or max value.
925 MachineBasicBlock *EmitAtomicMinMaxWithCustomInserter(MachineInstr *BInstr,
926 MachineBasicBlock *BB,
927 unsigned cmovOpc) const;
929 // Utility function to emit the low-level va_arg code for X86-64.
930 MachineBasicBlock *EmitVAARG64WithCustomInserter(
932 MachineBasicBlock *MBB) const;
934 /// Utility function to emit the xmm reg save portion of va_start.
935 MachineBasicBlock *EmitVAStartSaveXMMRegsWithCustomInserter(
936 MachineInstr *BInstr,
937 MachineBasicBlock *BB) const;
939 MachineBasicBlock *EmitLoweredSelect(MachineInstr *I,
940 MachineBasicBlock *BB) const;
942 MachineBasicBlock *EmitLoweredWinAlloca(MachineInstr *MI,
943 MachineBasicBlock *BB) const;
945 MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr *MI,
946 MachineBasicBlock *BB,
949 MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI,
950 MachineBasicBlock *BB) const;
952 MachineBasicBlock *emitLoweredTLSAddr(MachineInstr *MI,
953 MachineBasicBlock *BB) const;
955 /// Emit nodes that will be selected as "test Op0,Op0", or something
956 /// equivalent, for use with the given x86 condition code.
957 SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const;
959 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
960 /// equivalent, for use with the given x86 condition code.
961 SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
962 SelectionDAG &DAG) const;
966 FastISel *createFastISel(FunctionLoweringInfo &funcInfo);
970 #endif // X86ISELLOWERING_H