1 //===-- X86ISelLowering.h - X86 DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef X86ISELLOWERING_H
16 #define X86ISELLOWERING_H
18 #include "X86Subtarget.h"
19 #include "X86RegisterInfo.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "llvm/Target/TargetLowering.h"
22 #include "llvm/CodeGen/FastISel.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/CallingConvLower.h"
28 // X86 Specific DAG Nodes
30 // Start the numbering where the builtin ops leave off.
31 FIRST_NUMBER = ISD::BUILTIN_OP_END,
33 /// BSF - Bit scan forward.
34 /// BSR - Bit scan reverse.
38 /// SHLD, SHRD - Double shift instructions. These correspond to
39 /// X86::SHLDxx and X86::SHRDxx instructions.
43 /// FAND - Bitwise logical AND of floating point values. This corresponds
44 /// to X86::ANDPS or X86::ANDPD.
47 /// FOR - Bitwise logical OR of floating point values. This corresponds
48 /// to X86::ORPS or X86::ORPD.
51 /// FXOR - Bitwise logical XOR of floating point values. This corresponds
52 /// to X86::XORPS or X86::XORPD.
55 /// FSRL - Bitwise logical right shift of floating point values. These
56 /// corresponds to X86::PSRLDQ.
59 /// FILD, FILD_FLAG - This instruction implements SINT_TO_FP with the
60 /// integer source in memory and FP reg result. This corresponds to the
61 /// X86::FILD*m instructions. It has three inputs (token chain, address,
62 /// and source type) and two outputs (FP value and token chain). FILD_FLAG
63 /// also produces a flag).
67 /// FP_TO_INT*_IN_MEM - This instruction implements FP_TO_SINT with the
68 /// integer destination in memory and a FP reg source. This corresponds
69 /// to the X86::FIST*m instructions and the rounding mode change stuff. It
70 /// has two inputs (token chain and address) and two outputs (int value
76 /// FLD - This instruction implements an extending load to FP stack slots.
77 /// This corresponds to the X86::FLD32m / X86::FLD64m. It takes a chain
78 /// operand, ptr to load from, and a ValueType node indicating the type
82 /// FST - This instruction implements a truncating store to FP stack
83 /// slots. This corresponds to the X86::FST32m / X86::FST64m. It takes a
84 /// chain operand, value to store, address, and a ValueType to store it
88 /// CALL/TAILCALL - These operations represent an abstract X86 call
89 /// instruction, which includes a bunch of information. In particular the
90 /// operands of these node are:
92 /// #0 - The incoming token chain
94 /// #2 - The number of arg bytes the caller pushes on the stack.
95 /// #3 - The number of arg bytes the callee pops off the stack.
96 /// #4 - The value to pass in AL/AX/EAX (optional)
97 /// #5 - The value to pass in DL/DX/EDX (optional)
99 /// The result values of these nodes are:
101 /// #0 - The outgoing token chain
102 /// #1 - The first register result value (optional)
103 /// #2 - The second register result value (optional)
105 /// The CALL vs TAILCALL distinction boils down to whether the callee is
106 /// known not to modify the caller's stack frame, as is standard with
111 /// RDTSC_DAG - This operation implements the lowering for
115 /// X86 compare and logical compare instructions.
118 /// X86 SetCC. Operand 1 is condition code, and operand 2 is the flag
119 /// operand produced by a CMP instruction.
122 /// X86 conditional moves. Operand 1 and operand 2 are the two values
123 /// to select from (operand 1 is a R/W operand). Operand 3 is the
124 /// condition code, and operand 4 is the flag operand produced by a CMP
125 /// or TEST instruction. It also writes a flag result.
128 /// X86 conditional branches. Operand 1 is the chain operand, operand 2
129 /// is the block to branch if condition is true, operand 3 is the
130 /// condition code, and operand 4 is the flag operand produced by a CMP
131 /// or TEST instruction.
134 /// Return with a flag operand. Operand 1 is the chain operand, operand
135 /// 2 is the number of bytes of stack to pop.
138 /// REP_STOS - Repeat fill, corresponds to X86::REP_STOSx.
141 /// REP_MOVS - Repeat move, corresponds to X86::REP_MOVSx.
144 /// GlobalBaseReg - On Darwin, this node represents the result of the popl
145 /// at function entry, used for PIC code.
148 /// Wrapper - A wrapper node for TargetConstantPool,
149 /// TargetExternalSymbol, and TargetGlobalAddress.
152 /// WrapperRIP - Special wrapper used under X86-64 PIC mode for RIP
153 /// relative displacements.
156 /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to
157 /// i32, corresponds to X86::PEXTRB.
160 /// PEXTRW - Extract a 16-bit value from a vector and zero extend it to
161 /// i32, corresponds to X86::PEXTRW.
164 /// INSERTPS - Insert any element of a 4 x float vector into any element
165 /// of a destination 4 x floatvector.
168 /// PINSRB - Insert the lower 8-bits of a 32-bit value to a vector,
169 /// corresponds to X86::PINSRB.
172 /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector,
173 /// corresponds to X86::PINSRW.
176 /// FMAX, FMIN - Floating point max and min.
180 /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal
181 /// approximation. Note that these typically require refinement
182 /// in order to obtain suitable precision.
185 // TLSADDR, THREAThread - Thread Local Storage.
186 TLSADDR, THREAD_POINTER,
188 // EH_RETURN - Exception Handling helpers.
191 /// TC_RETURN - Tail call return.
193 /// operand #1 callee (register or absolute)
194 /// operand #2 stack adjustment
195 /// operand #3 optional in flag
198 // LCMPXCHG_DAG, LCMPXCHG8_DAG - Compare and swap.
202 // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG,
203 // ATOMXOR64_DAG, ATOMNAND64_DAG - Atomic 64-bit binary operations.
211 // FNSTCW16m - Store FP control world into i16 memory.
214 // VZEXT_MOVL - Vector move low and zero extend.
217 // VZEXT_LOAD - Load, scalar_to_vector, and zero extend.
220 // VSHL, VSRL - Vector logical left / right shift.
223 // CMPPD, CMPPS - Vector double/float comparison.
226 // PCMP* - Vector integer comparisons.
227 PCMPEQB, PCMPEQW, PCMPEQD, PCMPEQQ,
228 PCMPGTB, PCMPGTW, PCMPGTD, PCMPGTQ
232 /// Define some predicates that are used for node matching.
234 /// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
235 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
236 bool isPSHUFDMask(SDNode *N);
238 /// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
239 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
240 bool isPSHUFHWMask(SDNode *N);
242 /// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
243 /// specifies a shuffle of elements that is suitable for input to PSHUFD.
244 bool isPSHUFLWMask(SDNode *N);
246 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
247 /// specifies a shuffle of elements that is suitable for input to SHUFP*.
248 bool isSHUFPMask(SDNode *N);
250 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
251 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
252 bool isMOVHLPSMask(SDNode *N);
254 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
255 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
257 bool isMOVHLPS_v_undef_Mask(SDNode *N);
259 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
260 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
261 bool isMOVLPMask(SDNode *N);
263 /// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
264 /// specifies a shuffle of elements that is suitable for input to MOVHP{S|D}
265 /// as well as MOVLHPS.
266 bool isMOVHPMask(SDNode *N);
268 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
269 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
270 bool isUNPCKLMask(SDNode *N, bool V2IsSplat = false);
272 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
273 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
274 bool isUNPCKHMask(SDNode *N, bool V2IsSplat = false);
276 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
277 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
279 bool isUNPCKL_v_undef_Mask(SDNode *N);
281 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
282 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
284 bool isUNPCKH_v_undef_Mask(SDNode *N);
286 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
287 /// specifies a shuffle of elements that is suitable for input to MOVSS,
288 /// MOVSD, and MOVD, i.e. setting the lowest element.
289 bool isMOVLMask(SDNode *N);
291 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
292 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
293 bool isMOVSHDUPMask(SDNode *N);
295 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
296 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
297 bool isMOVSLDUPMask(SDNode *N);
299 /// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand
300 /// specifies a splat of a single element.
301 bool isSplatMask(SDNode *N);
303 /// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand
304 /// specifies a splat of zero element.
305 bool isSplatLoMask(SDNode *N);
307 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
308 /// specifies a shuffle of elements that is suitable for input to MOVDDUP.
309 bool isMOVDDUPMask(SDNode *N);
311 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
312 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
314 unsigned getShuffleSHUFImmediate(SDNode *N);
316 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
317 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW
319 unsigned getShufflePSHUFHWImmediate(SDNode *N);
321 /// getShufflePSHUFKWImmediate - Return the appropriate immediate to shuffle
322 /// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW
324 unsigned getShufflePSHUFLWImmediate(SDNode *N);
327 //===--------------------------------------------------------------------===//
328 // X86TargetLowering - X86 Implementation of the TargetLowering interface
329 class X86TargetLowering : public TargetLowering {
330 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
331 int RegSaveFrameIndex; // X86-64 vararg func register save area.
332 unsigned VarArgsGPOffset; // X86-64 vararg func int reg offset.
333 unsigned VarArgsFPOffset; // X86-64 vararg func fp reg offset.
334 int BytesToPopOnReturn; // Number of arg bytes ret should pop.
335 int BytesCallerReserves; // Number of arg bytes caller makes.
338 explicit X86TargetLowering(X86TargetMachine &TM);
340 /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
342 SDValue getPICJumpTableRelocBase(SDValue Table,
343 SelectionDAG &DAG) const;
345 // Return the number of bytes that a function should pop when it returns (in
346 // addition to the space used by the return address).
348 unsigned getBytesToPopOnReturn() const { return BytesToPopOnReturn; }
350 // Return the number of bytes that the caller reserves for arguments passed
352 unsigned getBytesCallerReserves() const { return BytesCallerReserves; }
354 /// getStackPtrReg - Return the stack pointer register we are using: either
356 unsigned getStackPtrReg() const { return X86StackPtr; }
358 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
359 /// function arguments in the caller parameter area. For X86, aggregates
360 /// that contains are placed at 16-byte boundaries while the rest are at
361 /// 4-byte boundaries.
362 virtual unsigned getByValTypeAlignment(const Type *Ty) const;
364 /// getOptimalMemOpType - Returns the target specific optimal type for load
365 /// and store operations as a result of memset, memcpy, and memmove
366 /// lowering. It returns MVT::iAny if SelectionDAG should be responsible for
369 MVT getOptimalMemOpType(uint64_t Size, unsigned Align,
370 bool isSrcConst, bool isSrcStr) const;
372 /// LowerOperation - Provide custom lowering hooks for some operations.
374 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG);
376 /// ReplaceNodeResults - Replace a node with an illegal result type
377 /// with a new node built out of custom code.
379 virtual SDNode *ReplaceNodeResults(SDNode *N, SelectionDAG &DAG);
382 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
384 virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
385 MachineBasicBlock *MBB);
388 /// getTargetNodeName - This method returns the name of a target specific
390 virtual const char *getTargetNodeName(unsigned Opcode) const;
392 /// getSetCCResultType - Return the ISD::SETCC ValueType
393 virtual MVT getSetCCResultType(const SDValue &) const;
395 /// computeMaskedBitsForTargetNode - Determine which of the bits specified
396 /// in Mask are known to be either zero or one and return them in the
397 /// KnownZero/KnownOne bitsets.
398 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
402 const SelectionDAG &DAG,
403 unsigned Depth = 0) const;
406 isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) const;
408 SDValue getReturnAddressFrameIndex(SelectionDAG &DAG);
410 ConstraintType getConstraintType(const std::string &Constraint) const;
412 std::vector<unsigned>
413 getRegClassForInlineAsmConstraint(const std::string &Constraint,
416 virtual const char *LowerXConstraint(MVT ConstraintVT) const;
418 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
419 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
420 /// true it means one of the asm constraint of the inline asm instruction
421 /// being processed is 'm'.
422 virtual void LowerAsmOperandForConstraint(SDValue Op,
423 char ConstraintLetter,
425 std::vector<SDValue> &Ops,
426 SelectionDAG &DAG) const;
428 /// getRegForInlineAsmConstraint - Given a physical register constraint
429 /// (e.g. {edx}), return the register number and the register class for the
430 /// register. This should only be used for C_Register constraints. On
431 /// error, this returns a register number of 0.
432 std::pair<unsigned, const TargetRegisterClass*>
433 getRegForInlineAsmConstraint(const std::string &Constraint,
436 /// isLegalAddressingMode - Return true if the addressing mode represented
437 /// by AM is legal for this target, for a load/store of the specified type.
438 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
440 /// isTruncateFree - Return true if it's free to truncate a value of
441 /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
442 /// register EAX to i16 by referencing its sub-register AX.
443 virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const;
444 virtual bool isTruncateFree(MVT VT1, MVT VT2) const;
446 /// isShuffleMaskLegal - Targets can use this to indicate that they only
447 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
448 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask
449 /// values are assumed to be legal.
450 virtual bool isShuffleMaskLegal(SDValue Mask, MVT VT) const;
452 /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
453 /// used by Targets can use this to indicate if there is a suitable
454 /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
456 virtual bool isVectorClearMaskLegal(const std::vector<SDValue> &BVOps,
457 MVT EVT, SelectionDAG &DAG) const;
459 /// ShouldShrinkFPConstant - If true, then instruction selection should
460 /// seek to shrink the FP constant of the specified type to a smaller type
461 /// in order to save space and / or reduce runtime.
462 virtual bool ShouldShrinkFPConstant(MVT VT) const {
463 // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
464 // expensive than a straight movsd. On the other hand, it's important to
465 // shrink long double fp constant since fldt is very slow.
466 return !X86ScalarSSEf64 || VT == MVT::f80;
469 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
470 /// for tail call optimization. Target which want to do tail call
471 /// optimization should implement this function.
472 virtual bool IsEligibleForTailCallOptimization(CallSDNode *TheCall,
474 SelectionDAG &DAG) const;
476 virtual const X86Subtarget* getSubtarget() {
480 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
481 /// computed in an SSE register, not on the X87 floating point stack.
482 bool isScalarFPTypeInSSEReg(MVT VT) const {
483 return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
484 (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
487 /// createFastISel - This method returns a target specific FastISel object,
488 /// or null if the target does not support "fast" ISel.
490 createFastISel(MachineFunction &mf,
491 MachineModuleInfo *mmi,
492 DenseMap<const Value *, unsigned> &,
493 DenseMap<const BasicBlock *, MachineBasicBlock *> &,
494 DenseMap<const AllocaInst *, int> &);
497 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
498 /// make the right decision when generating code for different targets.
499 const X86Subtarget *Subtarget;
500 const X86RegisterInfo *RegInfo;
501 const TargetData *TD;
503 /// X86StackPtr - X86 physical register used as stack ptr.
504 unsigned X86StackPtr;
506 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
507 /// floating point ops.
508 /// When SSE is available, use it for f32 operations.
509 /// When SSE2 is available, use it for f64 operations.
510 bool X86ScalarSSEf32;
511 bool X86ScalarSSEf64;
513 SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
514 unsigned CallingConv, SelectionDAG &DAG);
516 SDValue LowerMemArgument(SDValue Op, SelectionDAG &DAG,
517 const CCValAssign &VA, MachineFrameInfo *MFI,
518 unsigned CC, SDValue Root, unsigned i);
520 SDValue LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG,
521 const SDValue &StackPtr,
522 const CCValAssign &VA, SDValue Chain,
523 SDValue Arg, ISD::ArgFlagsTy Flags);
525 // Call lowering helpers.
526 bool IsCalleePop(bool isVarArg, unsigned CallingConv);
527 bool CallRequiresGOTPtrInReg(bool Is64Bit, bool IsTailCall);
528 bool CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall);
529 SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr,
530 SDValue Chain, bool IsTailCall, bool Is64Bit,
533 CCAssignFn *CCAssignFnForNode(unsigned CallingConv) const;
534 NameDecorationStyle NameDecorationForFORMAL_ARGUMENTS(SDValue Op);
535 unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG &DAG);
537 std::pair<SDValue,SDValue> FP_TO_SINTHelper(SDValue Op,
540 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG);
541 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG);
542 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG);
543 SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG);
544 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG);
545 SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG);
546 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG);
547 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG);
548 SDValue LowerGlobalAddress(const GlobalValue *GV, SelectionDAG &DAG) const;
549 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG);
550 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG);
551 SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG);
552 SDValue LowerShift(SDValue Op, SelectionDAG &DAG);
553 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG);
554 SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG);
555 SDValue LowerFABS(SDValue Op, SelectionDAG &DAG);
556 SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG);
557 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG);
558 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG);
559 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG);
560 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG);
561 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG);
562 SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG);
563 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG);
564 SDValue LowerCALL(SDValue Op, SelectionDAG &DAG);
565 SDValue LowerRET(SDValue Op, SelectionDAG &DAG);
566 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG);
567 SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG);
568 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG);
569 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG);
570 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG);
571 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG);
572 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG);
573 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG);
574 SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG);
575 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG);
576 SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG);
577 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG);
578 SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG);
579 SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG);
580 SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG);
581 SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG);
582 SDValue LowerATOMIC_BINARY_64(SDValue Op, SelectionDAG &DAG,
584 SDNode *ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG);
585 SDNode *ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG);
586 SDNode *ExpandATOMIC_CMP_SWAP(SDNode *N, SelectionDAG &DAG);
588 SDValue EmitTargetCodeForMemset(SelectionDAG &DAG,
590 SDValue Dst, SDValue Src,
591 SDValue Size, unsigned Align,
592 const Value *DstSV, uint64_t DstSVOff);
593 SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG,
595 SDValue Dst, SDValue Src,
596 SDValue Size, unsigned Align,
598 const Value *DstSV, uint64_t DstSVOff,
599 const Value *SrcSV, uint64_t SrcSVOff);
601 /// Utility function to emit atomic bitwise operations (and, or, xor).
602 // It takes the bitwise instruction to expand, the associated machine basic
603 // block, and the associated X86 opcodes for reg/reg and reg/imm.
604 MachineBasicBlock *EmitAtomicBitwiseWithCustomInserter(
605 MachineInstr *BInstr,
606 MachineBasicBlock *BB,
614 TargetRegisterClass *RC,
615 bool invSrc = false);
617 MachineBasicBlock *EmitAtomicBit6432WithCustomInserter(
618 MachineInstr *BInstr,
619 MachineBasicBlock *BB,
624 bool invSrc = false);
626 /// Utility function to emit atomic min and max. It takes the min/max
627 // instruction to expand, the associated basic block, and the associated
628 // cmov opcode for moving the min or max value.
629 MachineBasicBlock *EmitAtomicMinMaxWithCustomInserter(MachineInstr *BInstr,
630 MachineBasicBlock *BB,
635 FastISel *createFastISel(MachineFunction &mf,
636 MachineModuleInfo *mmi,
637 DenseMap<const Value *, unsigned> &,
638 DenseMap<const BasicBlock *, MachineBasicBlock *> &,
639 DenseMap<const AllocaInst *, int> &);
643 #endif // X86ISELLOWERING_H