X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86ISelLowering.h;h=1809de5344e37e620a1560a2b7decbaf59fe1895;hb=9a9d275dc7897dfba7f41ce1b3770ca27ac149e8;hp=7b59b81f81a11ec784d2bcdcbdd33a22e5621af9;hpb=f705adbe5ecd156de3f5a3017ddf4017e3890777;p=oota-llvm.git diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 7b59b81f81a..1809de5344e 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -19,6 +19,7 @@ #include "X86RegisterInfo.h" #include "X86MachineFunctionInfo.h" #include "llvm/Target/TargetLowering.h" +#include "llvm/Target/TargetOptions.h" #include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/CallingConvLower.h" @@ -118,6 +119,10 @@ namespace llvm { /// operand produced by a CMP instruction. SETCC, + // Same as SETCC except it's materialized with a sbb and the value is all + // one's or all zero's. + SETCC_CARRY, + /// X86 conditional moves. Operand 0 and operand 1 are the two values /// to select from. Operand 2 is the condition code, and operand 3 is the /// flag operand produced by a CMP or TEST instruction. It also writes a @@ -152,6 +157,11 @@ namespace llvm { /// relative displacements. WrapperRIP, + /// MOVQ2DQ - Copies a 64-bit value from a vector to another vector. + /// Can be used to move a vector value from a MMX register to a XMM + /// register. + MOVQ2DQ, + /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to /// i32, corresponds to X86::PEXTRB. PEXTRB, @@ -170,7 +180,7 @@ namespace llvm { /// PINSRW - Insert the lower 16-bits of a 32-bit value to a vector, /// corresponds to X86::PINSRW. - PINSRW, + PINSRW, MMX_PINSRW, /// PSHUFB - Shuffle 16 8-bit values within a vector. PSHUFB, @@ -186,6 +196,10 @@ namespace llvm { // TLSADDR - Thread Local Storage. TLSADDR, + + // TLSCALL - Thread Local Storage. When calling to an OS provided + // thunk at the address from an earlier relocation. + TLSCALL, // SegmentBaseAddress - The address segment:0 SegmentBaseAddress, @@ -239,6 +253,9 @@ namespace llvm { // with control flow. VASTART_SAVE_XMM_REGS, + // MINGW_ALLOCA - MingW's __alloca call to do stack probing. + MINGW_ALLOCA, + // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG, // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG - // Atomic 64-bit binary operations. @@ -248,7 +265,17 @@ namespace llvm { ATOMXOR64_DAG, ATOMAND64_DAG, ATOMNAND64_DAG, - ATOMSWAP64_DAG + ATOMSWAP64_DAG, + + // Memory barrier + MEMBARRIER, + MFENCE, + SFENCE, + LFENCE + + // WARNING: Do not add anything in the end unless you want the node to + // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be + // thought as target memory ops! }; } @@ -286,7 +313,7 @@ namespace llvm { /// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand /// specifies a shuffle of elements that is suitable for MOVHP{S|D}. /// as well as MOVLHPS. - bool isMOVHPMask(ShuffleVectorSDNode *N); + bool isMOVLHPSMask(ShuffleVectorSDNode *N); /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand /// specifies a shuffle of elements that is suitable for input to UNPCKL. @@ -357,30 +384,27 @@ namespace llvm { //===--------------------------------------------------------------------===// // X86TargetLowering - X86 Implementation of the TargetLowering interface class X86TargetLowering : public TargetLowering { - int VarArgsFrameIndex; // FrameIndex for start of varargs area. - int RegSaveFrameIndex; // X86-64 vararg func register save area. - unsigned VarArgsGPOffset; // X86-64 vararg func int reg offset. - unsigned VarArgsFPOffset; // X86-64 vararg func fp reg offset. - int BytesToPopOnReturn; // Number of arg bytes ret should pop. - int BytesCallerReserves; // Number of arg bytes caller makes. - public: explicit X86TargetLowering(X86TargetMachine &TM); + /// getPICBaseSymbol - Return the X86-32 PIC base. + MCSymbol *getPICBaseSymbol(const MachineFunction *MF, MCContext &Ctx) const; + + virtual unsigned getJumpTableEncoding() const; + + virtual const MCExpr * + LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, + const MachineBasicBlock *MBB, unsigned uid, + MCContext &Ctx) const; + /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC /// jumptable. - SDValue getPICJumpTableRelocBase(SDValue Table, - SelectionDAG &DAG) const; - - // Return the number of bytes that a function should pop when it returns (in - // addition to the space used by the return address). - // - unsigned getBytesToPopOnReturn() const { return BytesToPopOnReturn; } - - // Return the number of bytes that the caller reserves for arguments passed - // to this function. - unsigned getBytesCallerReserves() const { return BytesCallerReserves; } - + virtual SDValue getPICJumpTableRelocBase(SDValue Table, + SelectionDAG &DAG) const; + virtual const MCExpr * + getPICJumpTableRelocBaseExpr(const MachineFunction *MF, + unsigned JTI, MCContext &Ctx) const; + /// getStackPtrReg - Return the stack pointer register we are using: either /// ESP or RSP. unsigned getStackPtrReg() const { return X86StackPtr; } @@ -393,11 +417,20 @@ namespace llvm { /// getOptimalMemOpType - Returns the target specific optimal type for load /// and store operations as a result of memset, memcpy, and memmove - /// lowering. It returns EVT::iAny if SelectionDAG should be responsible for - /// determining it. - virtual EVT getOptimalMemOpType(uint64_t Size, unsigned Align, - bool isSrcConst, bool isSrcStr, - SelectionDAG &DAG) const; + /// lowering. If DstAlign is zero that means it's safe to destination + /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it + /// means there isn't a need to check it against alignment requirement, + /// probably because the source does not need to be loaded. If + /// 'NonScalarIntSafe' is true, that means it's safe to return a + /// non-scalar-integer type, e.g. empty string source, constant, or loaded + /// from memory. 'MemcpyStrSrc' indicates whether the memcpy source is + /// constant so it does not need to be loaded. + /// It returns EVT::Other if the type should be determined using generic + /// target-independent logic. + virtual EVT + getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, + bool NonScalarIntSafe, bool MemcpyStrSrc, + MachineFunction &MF) const; /// allowsUnalignedMemoryAccesses - Returns true if the target allows /// unaligned memory accesses. of the specified type. @@ -407,20 +440,32 @@ namespace llvm { /// LowerOperation - Provide custom lowering hooks for some operations. /// - virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG); + virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; /// ReplaceNodeResults - Replace the results of node with an illegal result /// type with new values built out of custom code. /// virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl&Results, - SelectionDAG &DAG); + SelectionDAG &DAG) const; virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; - virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, - MachineBasicBlock *MBB, - DenseMap *EM) const; + /// isTypeDesirableForOp - Return true if the target has native support for + /// the specified value type and it is 'desirable' to use the type for the + /// given node type. e.g. On x86 i16 is legal, but undesirable since i16 + /// instruction encodings are longer and some i16 instructions are slow. + virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const; + + /// isTypeDesirable - Return true if the target has native support for the + /// specified value type and it is 'desirable' to use the type. e.g. On x86 + /// i16 is legal, but undesirable since i16 instruction encodings are longer + /// and some i16 instructions are slow. + virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const; + + virtual MachineBasicBlock * + EmitInstrWithCustomInserter(MachineInstr *MI, + MachineBasicBlock *MBB) const; /// getTargetNodeName - This method returns the name of a target specific @@ -441,9 +486,9 @@ namespace llvm { unsigned Depth = 0) const; virtual bool - isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) const; + isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; - SDValue getReturnAddressFrameIndex(SelectionDAG &DAG); + SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const; virtual bool ExpandInlineAsm(CallInst *CI) const; @@ -461,7 +506,6 @@ namespace llvm { /// being processed is 'm'. virtual void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter, - bool hasMemory, std::vector &Ops, SelectionDAG &DAG) const; @@ -528,17 +572,7 @@ namespace llvm { return !X86ScalarSSEf64 || VT == MVT::f80; } - /// IsEligibleForTailCallOptimization - Check whether the call is eligible - /// for tail call optimization. Targets which want to do tail call - /// optimization should implement this function. - virtual bool - IsEligibleForTailCallOptimization(SDValue Callee, - CallingConv::ID CalleeCC, - bool isVarArg, - const SmallVectorImpl &Ins, - SelectionDAG& DAG) const; - - virtual const X86Subtarget* getSubtarget() { + const X86Subtarget* getSubtarget() const { return Subtarget; } @@ -549,29 +583,19 @@ namespace llvm { (VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1 } - /// getWidenVectorType: given a vector type, returns the type to widen - /// to (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself. - /// If there is no vector type that we want to widen to, returns EVT::Other - /// When and were to widen is target dependent based on the cost of - /// scalarizing vs using the wider vector type. - virtual EVT getWidenVectorType(EVT VT) const; - /// createFastISel - This method returns a target specific FastISel object, /// or null if the target does not support "fast" ISel. - virtual FastISel * - createFastISel(MachineFunction &mf, - MachineModuleInfo *mmi, DwarfWriter *dw, - DenseMap &, - DenseMap &, - DenseMap & -#ifndef NDEBUG - , SmallSet & -#endif - ); + virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo) const; /// getFunctionAlignment - Return the Log2 alignment of this function. virtual unsigned getFunctionAlignment(const Function *F) const; + /// getStackCookieLocation - Return true if the target stores stack + /// protector cookies at a fixed offset in some non-standard address + /// space, and populates the address space and offset as + /// appropriate. + virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const; + private: /// Subtarget - Keep a pointer to the X86Subtarget around so that we can /// make the right decision when generating code for different targets. @@ -602,126 +626,140 @@ namespace llvm { CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl &Ins, DebugLoc dl, SelectionDAG &DAG, - SmallVectorImpl &InVals); + SmallVectorImpl &InVals) const; SDValue LowerMemArgument(SDValue Chain, CallingConv::ID CallConv, const SmallVectorImpl &ArgInfo, DebugLoc dl, SelectionDAG &DAG, const CCValAssign &VA, MachineFrameInfo *MFI, - unsigned i); + unsigned i) const; SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg, DebugLoc dl, SelectionDAG &DAG, const CCValAssign &VA, - ISD::ArgFlagsTy Flags); + ISD::ArgFlagsTy Flags) const; // Call lowering helpers. - bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv); + + /// IsEligibleForTailCallOptimization - Check whether the call is eligible + /// for tail call optimization. Targets which want to do tail call + /// optimization should implement this function. + bool IsEligibleForTailCallOptimization(SDValue Callee, + CallingConv::ID CalleeCC, + bool isVarArg, + bool isCalleeStructRet, + bool isCallerStructRet, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, + const SmallVectorImpl &Ins, + SelectionDAG& DAG) const; + bool IsCalleePop(bool isVarArg, CallingConv::ID CallConv) const; SDValue EmitTailCallLoadRetAddr(SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall, bool Is64Bit, - int FPDiff, DebugLoc dl); + int FPDiff, DebugLoc dl) const; CCAssignFn *CCAssignFnForNode(CallingConv::ID CallConv) const; - NameDecorationStyle NameDecorationForCallConv(CallingConv::ID CallConv); - unsigned GetAlignedArgumentStackSize(unsigned StackSize, SelectionDAG &DAG); + unsigned GetAlignedArgumentStackSize(unsigned StackSize, + SelectionDAG &DAG) const; std::pair FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, - bool isSigned); - - SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG); - SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG); - SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG); - SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG); - SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG); - SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG); - SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG); - SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG); - SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG); + bool isSigned) const; + + SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, + SelectionDAG &DAG) const; + SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, int64_t Offset, SelectionDAG &DAG) const; - SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG); - SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG); - SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG); - SDValue LowerShift(SDValue Op, SelectionDAG &DAG); + SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const; SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot, - SelectionDAG &DAG); - SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG); - SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG); - SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG); - SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG); - SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG); - SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG); - SDValue LowerFABS(SDValue Op, SelectionDAG &DAG); - SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG); - SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG); - SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG); - SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG); - SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG); - SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG); - SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG); - SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG); - SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG); - SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG); - SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG); - SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG); - SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG); - SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG); - SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG); - SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG); - SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG); - SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG); - SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG); - SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG); - SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG); - SDValue LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG); - SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG); - - SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG); - SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG); - SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG); + SelectionDAG &DAG) const; + SDValue LowerBIT_CONVERT(SDValue op, SelectionDAG &DAG) const; + SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerToBT(SDValue And, ISD::CondCode CC, + DebugLoc dl, SelectionDAG &DAG) const; + SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerMEMSET(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const; + + SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const; virtual SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl &Ins, DebugLoc dl, SelectionDAG &DAG, - SmallVectorImpl &InVals); + SmallVectorImpl &InVals) const; virtual SDValue LowerCall(SDValue Chain, SDValue Callee, - CallingConv::ID CallConv, bool isVarArg, bool isTailCall, + CallingConv::ID CallConv, bool isVarArg, bool &isTailCall, const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, const SmallVectorImpl &Ins, DebugLoc dl, SelectionDAG &DAG, - SmallVectorImpl &InVals); + SmallVectorImpl &InVals) const; virtual SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl &Outs, - DebugLoc dl, SelectionDAG &DAG); + const SmallVectorImpl &OutVals, + DebugLoc dl, SelectionDAG &DAG) const; + + virtual bool + CanLowerReturn(CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl &Outs, + LLVMContext &Context) const; void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl &Results, - SelectionDAG &DAG, unsigned NewOp); - - SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, - SDValue Chain, - SDValue Dst, SDValue Src, - SDValue Size, unsigned Align, - const Value *DstSV, uint64_t DstSVOff); - SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, - SDValue Chain, - SDValue Dst, SDValue Src, - SDValue Size, unsigned Align, - bool AlwaysInline, - const Value *DstSV, uint64_t DstSVOff, - const Value *SrcSV, uint64_t SrcSVOff); - + SelectionDAG &DAG, unsigned NewOp) const; + /// Utility function to emit string processing sse4.2 instructions /// that return in xmm0. /// This takes the instruction to expand, the associated machine basic /// block, the number of args, and whether or not the second arg is /// in memory or not. MachineBasicBlock *EmitPCMP(MachineInstr *BInstr, MachineBasicBlock *BB, - unsigned argNum, bool inMem) const; + unsigned argNum, bool inMem) const; /// Utility function to emit atomic bitwise operations (and, or, xor). /// It takes the bitwise instruction to expand, the associated machine basic @@ -733,7 +771,6 @@ namespace llvm { unsigned immOpc, unsigned loadOpc, unsigned cxchgOpc, - unsigned copyOpc, unsigned notOpc, unsigned EAXreg, TargetRegisterClass *RC, @@ -761,29 +798,26 @@ namespace llvm { MachineBasicBlock *BB) const; MachineBasicBlock *EmitLoweredSelect(MachineInstr *I, - MachineBasicBlock *BB, - DenseMap *EM) const; + MachineBasicBlock *BB) const; + + MachineBasicBlock *EmitLoweredMingwAlloca(MachineInstr *MI, + MachineBasicBlock *BB) const; + MachineBasicBlock *EmitLoweredTLSCall(MachineInstr *MI, + MachineBasicBlock *BB) const; + /// Emit nodes that will be selected as "test Op0,Op0", or something /// equivalent, for use with the given x86 condition code. - SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG); + SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const; /// Emit nodes that will be selected as "cmp Op0,Op1", or something /// equivalent, for use with the given x86 condition code. SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, - SelectionDAG &DAG); + SelectionDAG &DAG) const; }; namespace X86 { - FastISel *createFastISel(MachineFunction &mf, - MachineModuleInfo *mmi, DwarfWriter *dw, - DenseMap &, - DenseMap &, - DenseMap & -#ifndef NDEBUG - , SmallSet & -#endif - ); + FastISel *createFastISel(FunctionLoweringInfo &funcInfo); } }