X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86FastISel.cpp;h=2cda8211ba91a6fc0bd623f83d79c8187391cfd8;hb=e123ed65877ee3e2a26188b821b700cd51521b19;hp=0d7037258b1896f80f7869616342fbbc94924650;hpb=a98f7c7720f967e32d0721fb30887658857d757a;p=oota-llvm.git diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index 0d7037258b1..2cda8211ba9 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -37,6 +37,8 @@ #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Operator.h" +#include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCSymbol.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Target/TargetOptions.h" using namespace llvm; @@ -58,13 +60,13 @@ class X86FastISel final : public FastISel { public: explicit X86FastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) - : FastISel(funcInfo, libInfo) { - Subtarget = &TM.getSubtarget(); + : FastISel(funcInfo, libInfo) { + Subtarget = &funcInfo.MF->getSubtarget(); X86ScalarSSEf64 = Subtarget->hasSSE2(); X86ScalarSSEf32 = Subtarget->hasSSE1(); } - bool TargetSelectInstruction(const Instruction *I) override; + bool fastSelectInstruction(const Instruction *I) override; /// \brief The specified machine instr operand is a vreg, and that /// vreg is being provided by the specified load instruction. If possible, @@ -73,20 +75,22 @@ public: bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, const LoadInst *LI) override; - bool FastLowerArguments() override; + bool fastLowerArguments() override; + bool fastLowerCall(CallLoweringInfo &CLI) override; + bool fastLowerIntrinsicCall(const IntrinsicInst *II) override; #include "X86GenFastISel.inc" private: - bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT); + bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT, DebugLoc DL); - bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, MachineMemOperand *MMO, - unsigned &ResultReg); + bool X86FastEmitLoad(EVT VT, X86AddressMode &AM, MachineMemOperand *MMO, + unsigned &ResultReg, unsigned Alignment = 1); - bool X86FastEmitStore(EVT VT, const Value *Val, const X86AddressMode &AM, + bool X86FastEmitStore(EVT VT, const Value *Val, X86AddressMode &AM, MachineMemOperand *MMO = nullptr, bool Aligned = false); bool X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill, - const X86AddressMode &AM, + X86AddressMode &AM, MachineMemOperand *MMO = nullptr, bool Aligned = false); bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, @@ -111,26 +115,25 @@ private: bool X86SelectDivRem(const Instruction *I); - bool X86FastEmitCMoveSelect(const Instruction *I); + bool X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I); - bool X86FastEmitSSESelect(const Instruction *I); + bool X86FastEmitSSESelect(MVT RetVT, const Instruction *I); - bool X86FastEmitPseudoSelect(const Instruction *I); + bool X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I); bool X86SelectSelect(const Instruction *I); bool X86SelectTrunc(const Instruction *I); + bool X86SelectFPExtOrFPTrunc(const Instruction *I, unsigned Opc, + const TargetRegisterClass *RC); + bool X86SelectFPExt(const Instruction *I); bool X86SelectFPTrunc(const Instruction *I); - - bool X86VisitIntrinsicCall(const IntrinsicInst &I); - bool X86SelectCall(const Instruction *I); - - bool DoSelectCall(const Instruction *I, const char *MemIntName); + bool X86SelectSIToFP(const Instruction *I); const X86InstrInfo *getInstrInfo() const { - return getTargetMachine()->getInstrInfo(); + return Subtarget->getInstrInfo(); } const X86TargetMachine *getTargetMachine() const { return static_cast(&TM); @@ -138,11 +141,14 @@ private: bool handleConstantAddresses(const Value *V, X86AddressMode &AM); - unsigned TargetMaterializeConstant(const Constant *C) override; + unsigned X86MaterializeInt(const ConstantInt *CI, MVT VT); + unsigned X86MaterializeFP(const ConstantFP *CFP, MVT VT); + unsigned X86MaterializeGV(const GlobalValue *GV, MVT VT); + unsigned fastMaterializeConstant(const Constant *C) override; - unsigned TargetMaterializeAlloca(const AllocaInst *C) override; + unsigned fastMaterializeAlloca(const AllocaInst *C) override; - unsigned TargetMaterializeFloatZero(const ConstantFP *CF) override; + unsigned fastMaterializeFloatZero(const ConstantFP *CF) override; /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is /// computed in an SSE register, not on the X87 floating point stack. @@ -158,54 +164,17 @@ private: bool TryEmitSmallMemcpy(X86AddressMode DestAM, X86AddressMode SrcAM, uint64_t Len); - std::pair - foldX86XALUIntrinsic(const Instruction *I, const Value *Cond); + bool foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I, + const Value *Cond); + + const MachineInstrBuilder &addFullAddress(const MachineInstrBuilder &MIB, + X86AddressMode &AM); }; } // end anonymous namespace. -static CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) { - // If both operands are the same, then try to optimize or fold the cmp. - CmpInst::Predicate Predicate = CI->getPredicate(); - if (CI->getOperand(0) != CI->getOperand(1)) - return Predicate; - - switch (Predicate) { - default: llvm_unreachable("Invalid predicate!"); - case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break; - case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break; - case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break; - case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break; - case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break; - case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break; - case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break; - case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; - case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break; - case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; - case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break; - case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break; - - case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break; - case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; - case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; - case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break; - case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break; - case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break; - } - - return Predicate; -} - static std::pair -getX86ConditonCode(CmpInst::Predicate Predicate) { +getX86ConditionCode(CmpInst::Predicate Predicate) { X86::CondCode CC = X86::COND_INVALID; bool NeedSwap = false; switch (Predicate) { @@ -243,7 +212,7 @@ getX86ConditonCode(CmpInst::Predicate Predicate) { } static std::pair -getX86SSECondtionCode(CmpInst::Predicate Predicate) { +getX86SSEConditionCode(CmpInst::Predicate Predicate) { unsigned CC; bool NeedSwap = false; @@ -277,16 +246,30 @@ getX86SSECondtionCode(CmpInst::Predicate Predicate) { return std::make_pair(CC, NeedSwap); } +/// \brief Adds a complex addressing mode to the given machine instr builder. +/// Note, this will constrain the index register. If its not possible to +/// constrain the given index register, then a new one will be created. The +/// IndexReg field of the addressing mode will be updated to match in this case. +const MachineInstrBuilder & +X86FastISel::addFullAddress(const MachineInstrBuilder &MIB, + X86AddressMode &AM) { + // First constrain the index register. It needs to be a GR64_NOSP. + AM.IndexReg = constrainOperandRegClass(MIB->getDesc(), AM.IndexReg, + MIB->getNumOperands() + + X86::AddrIndexReg); + return ::addFullAddress(MIB, AM); +} + /// \brief Check if it is possible to fold the condition from the XALU intrinsic -/// into the user. -std::pair -X86FastISel::foldX86XALUIntrinsic(const Instruction *I, const Value *Cond) { +/// into the user. The condition code will only be updated on success. +bool X86FastISel::foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I, + const Value *Cond) { if (!isa(Cond)) - return std::make_pair(false, X86::COND_INVALID); + return false; const auto *EV = cast(Cond); if (!isa(EV->getAggregateOperand())) - return std::make_pair(false, X86::COND_INVALID); + return false; const auto *II = cast(EV->getAggregateOperand()); MVT RetVT; @@ -294,46 +277,47 @@ X86FastISel::foldX86XALUIntrinsic(const Instruction *I, const Value *Cond) { Type *RetTy = cast(Callee->getReturnType())->getTypeAtIndex(0U); if (!isTypeLegal(RetTy, RetVT)) - return std::make_pair(false, X86::COND_INVALID); + return false; if (RetVT != MVT::i32 && RetVT != MVT::i64) - return std::make_pair(false, X86::COND_INVALID); + return false; - X86::CondCode CC; + X86::CondCode TmpCC; switch (II->getIntrinsicID()) { - default: return std::make_pair(false, X86::COND_INVALID); + default: return false; case Intrinsic::sadd_with_overflow: case Intrinsic::ssub_with_overflow: case Intrinsic::smul_with_overflow: - case Intrinsic::umul_with_overflow: CC = X86::COND_O; break; + case Intrinsic::umul_with_overflow: TmpCC = X86::COND_O; break; case Intrinsic::uadd_with_overflow: - case Intrinsic::usub_with_overflow: CC = X86::COND_B; break; + case Intrinsic::usub_with_overflow: TmpCC = X86::COND_B; break; } // Check if both instructions are in the same basic block. if (II->getParent() != I->getParent()) - return std::make_pair(false, X86::COND_INVALID); + return false; // Make sure nothing is in the way - BasicBlock::const_iterator Start = I; - BasicBlock::const_iterator End = II; + BasicBlock::const_iterator Start(I); + BasicBlock::const_iterator End(II); for (auto Itr = std::prev(Start); Itr != End; --Itr) { // We only expect extractvalue instructions between the intrinsic and the // instruction to be selected. if (!isa(Itr)) - return std::make_pair(false, X86::COND_INVALID); + return false; // Check that the extractvalue operand comes from the intrinsic. const auto *EVI = cast(Itr); if (EVI->getAggregateOperand() != II) - return std::make_pair(false, X86::COND_INVALID); + return false; } - return std::make_pair(true, CC); + CC = TmpCC; + return true; } bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) { - EVT evt = TLI.getValueType(Ty, /*HandleUnknown=*/true); + EVT evt = TLI.getValueType(DL, Ty, /*HandleUnknown=*/true); if (evt == MVT::Other || !evt.isSimple()) // Unhandled type. Halt "fast" selection and bail. return false; @@ -360,8 +344,9 @@ bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) { /// X86FastEmitLoad - Emit a machine instruction to load a value of type VT. /// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV. /// Return true and the result register by reference if it is possible. -bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM, - MachineMemOperand *MMO, unsigned &ResultReg) { +bool X86FastISel::X86FastEmitLoad(EVT VT, X86AddressMode &AM, + MachineMemOperand *MMO, unsigned &ResultReg, + unsigned Alignment) { // Get opcode and regclass of the output for the given load instruction. unsigned Opc = 0; const TargetRegisterClass *RC = nullptr; @@ -406,6 +391,30 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM, case MVT::f80: // No f80 support yet. return false; + case MVT::v4f32: + if (Alignment >= 16) + Opc = Subtarget->hasAVX() ? X86::VMOVAPSrm : X86::MOVAPSrm; + else + Opc = Subtarget->hasAVX() ? X86::VMOVUPSrm : X86::MOVUPSrm; + RC = &X86::VR128RegClass; + break; + case MVT::v2f64: + if (Alignment >= 16) + Opc = Subtarget->hasAVX() ? X86::VMOVAPDrm : X86::MOVAPDrm; + else + Opc = Subtarget->hasAVX() ? X86::VMOVUPDrm : X86::MOVUPDrm; + RC = &X86::VR128RegClass; + break; + case MVT::v4i32: + case MVT::v2i64: + case MVT::v8i16: + case MVT::v16i8: + if (Alignment >= 16) + Opc = Subtarget->hasAVX() ? X86::VMOVDQArm : X86::MOVDQArm; + else + Opc = Subtarget->hasAVX() ? X86::VMOVDQUrm : X86::MOVDQUrm; + RC = &X86::VR128RegClass; + break; } ResultReg = createResultReg(RC); @@ -422,8 +431,13 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM, /// and a displacement offset, or a GlobalAddress, /// i.e. V. Return true if it is possible. bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill, - const X86AddressMode &AM, + X86AddressMode &AM, MachineMemOperand *MMO, bool Aligned) { + bool HasSSE2 = Subtarget->hasSSE2(); + bool HasSSE4A = Subtarget->hasSSE4A(); + bool HasAVX = Subtarget->hasAVX(); + bool IsNonTemporal = MMO && MMO->isNonTemporal(); + // Get opcode and regclass of the output for the given store instruction. unsigned Opc = 0; switch (VT.getSimpleVT().SimpleTy) { @@ -440,35 +454,59 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill, // FALLTHROUGH, handling i1 as i8. case MVT::i8: Opc = X86::MOV8mr; break; case MVT::i16: Opc = X86::MOV16mr; break; - case MVT::i32: Opc = X86::MOV32mr; break; - case MVT::i64: Opc = X86::MOV64mr; break; // Must be in x86-64 mode. + case MVT::i32: + Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTImr : X86::MOV32mr; + break; + case MVT::i64: + // Must be in x86-64 mode. + Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTI_64mr : X86::MOV64mr; + break; case MVT::f32: - Opc = X86ScalarSSEf32 ? - (Subtarget->hasAVX() ? X86::VMOVSSmr : X86::MOVSSmr) : X86::ST_Fp32m; + if (X86ScalarSSEf32) { + if (IsNonTemporal && HasSSE4A) + Opc = X86::MOVNTSS; + else + Opc = HasAVX ? X86::VMOVSSmr : X86::MOVSSmr; + } else + Opc = X86::ST_Fp32m; break; case MVT::f64: - Opc = X86ScalarSSEf64 ? - (Subtarget->hasAVX() ? X86::VMOVSDmr : X86::MOVSDmr) : X86::ST_Fp64m; + if (X86ScalarSSEf32) { + if (IsNonTemporal && HasSSE4A) + Opc = X86::MOVNTSD; + else + Opc = HasAVX ? X86::VMOVSDmr : X86::MOVSDmr; + } else + Opc = X86::ST_Fp64m; break; case MVT::v4f32: - if (Aligned) - Opc = Subtarget->hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr; - else - Opc = Subtarget->hasAVX() ? X86::VMOVUPSmr : X86::MOVUPSmr; + if (Aligned) { + if (IsNonTemporal) + Opc = HasAVX ? X86::VMOVNTPSmr : X86::MOVNTPSmr; + else + Opc = HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr; + } else + Opc = HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr; break; case MVT::v2f64: - if (Aligned) - Opc = Subtarget->hasAVX() ? X86::VMOVAPDmr : X86::MOVAPDmr; - else - Opc = Subtarget->hasAVX() ? X86::VMOVUPDmr : X86::MOVUPDmr; + if (Aligned) { + if (IsNonTemporal) + Opc = HasAVX ? X86::VMOVNTPDmr : X86::MOVNTPDmr; + else + Opc = HasAVX ? X86::VMOVAPDmr : X86::MOVAPDmr; + } else + Opc = HasAVX ? X86::VMOVUPDmr : X86::MOVUPDmr; break; case MVT::v4i32: case MVT::v2i64: case MVT::v8i16: case MVT::v16i8: - if (Aligned) - Opc = Subtarget->hasAVX() ? X86::VMOVDQAmr : X86::MOVDQAmr; - else + if (Aligned) { + if (IsNonTemporal) + Opc = HasAVX ? X86::VMOVNTDQmr : X86::MOVNTDQmr; + else + Opc = HasAVX ? X86::VMOVDQAmr : X86::MOVDQAmr; + } else Opc = Subtarget->hasAVX() ? X86::VMOVDQUmr : X86::MOVDQUmr; break; } @@ -483,7 +521,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill, } bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val, - const X86AddressMode &AM, + X86AddressMode &AM, MachineMemOperand *MMO, bool Aligned) { // Handle 'null' like i32/i64 0. if (isa(Val)) @@ -531,7 +569,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val, bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, unsigned &ResultReg) { - unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, + unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src, /*TODO: Kill=*/false); if (RR == 0) return false; @@ -583,7 +621,7 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) { // Ok, we need to do a load from a stub. If we've already loaded from // this stub, reuse the loaded pointer, otherwise emit the load now. - DenseMap::iterator I = LocalValueMap.find(V); + DenseMap::iterator I = LocalValueMap.find(V); unsigned LoadReg; if (I != LocalValueMap.end() && I->second != 0) { LoadReg = I->second; @@ -599,7 +637,7 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) { // Prepare for inserting code in the local-value area. SavePoint SaveInsertPt = enterLocalValueArea(); - if (TLI.getPointerTy() == MVT::i64) { + if (TLI.getPointerTy(DL) == MVT::i64) { Opc = X86::MOV64rm; RC = &X86::GR64RegClass; @@ -681,20 +719,21 @@ redo_gep: case Instruction::IntToPtr: // Look past no-op inttoptrs. - if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) + if (TLI.getValueType(DL, U->getOperand(0)->getType()) == + TLI.getPointerTy(DL)) return X86SelectAddress(U->getOperand(0), AM); break; case Instruction::PtrToInt: // Look past no-op ptrtoints. - if (TLI.getValueType(U->getType()) == TLI.getPointerTy()) + if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL)) return X86SelectAddress(U->getOperand(0), AM); break; case Instruction::Alloca: { // Do static allocas. const AllocaInst *A = cast(V); - DenseMap::iterator SI = + DenseMap::iterator SI = FuncInfo.StaticAllocaMap.find(A); if (SI != FuncInfo.StaticAllocaMap.end()) { AM.BaseType = X86AddressMode::FrameIndexBase; @@ -857,14 +896,14 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) { case Instruction::IntToPtr: // Look past no-op inttoptrs if its operand is in the same BB. if (InMBB && - TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy()) + TLI.getValueType(DL, U->getOperand(0)->getType()) == + TLI.getPointerTy(DL)) return X86SelectCallAddress(U->getOperand(0), AM); break; case Instruction::PtrToInt: // Look past no-op ptrtoints if its operand is in the same BB. - if (InMBB && - TLI.getValueType(U->getType()) == TLI.getPointerTy()) + if (InMBB && TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL)) return X86SelectCallAddress(U->getOperand(0), AM); break; } @@ -880,7 +919,7 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) { (AM.Base.Reg != 0 || AM.IndexReg != 0)) return false; - // Can't handle DbgLocLImport. + // Can't handle DLL Import. if (GV->hasDLLImportStorageClass()) return false; @@ -942,7 +981,7 @@ bool X86FastISel::X86SelectStore(const Instruction *I) { unsigned Alignment = S->getAlignment(); unsigned ABIAlignment = DL.getABITypeAlignment(Val->getType()); - if (Alignment == 0) // Ensure that codegen never sees alignment 0 + if (Alignment == 0) // Ensure that codegen never sees alignment 0 Alignment = ABIAlignment; bool Aligned = Alignment >= ABIAlignment; @@ -991,12 +1030,11 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { if (Ret->getNumOperands() > 0) { SmallVector Outs; - GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI); + GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL); // Analyze operands of the call, assigning locations to each operand. SmallVector ValLocs; - CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs, - I->getContext()); + CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext()); CCInfo.AnalyzeReturn(Outs, RetCC_X86); const Value *RV = Ret->getOperand(0); @@ -1019,11 +1057,11 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { // The calling-convention tables for x87 returns don't tell // the whole story. - if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1) + if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) return false; unsigned SrcReg = Reg + VA.getValNo(); - EVT SrcVT = TLI.getValueType(RV->getType()); + EVT SrcVT = TLI.getValueType(DL, RV->getType()); EVT DstVT = VA.getValVT(); // Special handling for extended integers. if (SrcVT != DstVT) { @@ -1038,23 +1076,23 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { if (SrcVT == MVT::i1) { if (Outs[0].Flags.isSExt()) return false; - SrcReg = FastEmitZExtFromI1(MVT::i8, SrcReg, /*TODO: Kill=*/false); + SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg, /*TODO: Kill=*/false); SrcVT = MVT::i8; } unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND; - SrcReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, + SrcReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, SrcReg, /*TODO: Kill=*/false); } // Make the copy. unsigned DstReg = VA.getLocReg(); - const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg); + const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg); // Avoid a cross-class copy. This is very unlikely. if (!SrcRC->contains(DstReg)) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), - DstReg).addReg(SrcReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg); // Add register to return instruction. RetRegs.push_back(VA.getLocReg()); @@ -1070,14 +1108,15 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { assert(Reg && "SRetReturnReg should have been set in LowerFormalArguments()!"); unsigned RetReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), - RetReg).addReg(Reg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), RetReg).addReg(Reg); RetRegs.push_back(RetReg); } // Now emit the RET. MachineInstrBuilder MIB = - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL)); for (unsigned i = 0, e = RetRegs.size(); i != e; ++i) MIB.addReg(RetRegs[i], RegState::Implicit); return true; @@ -1102,11 +1141,17 @@ bool X86FastISel::X86SelectLoad(const Instruction *I) { if (!X86SelectAddress(Ptr, AM)) return false; + unsigned Alignment = LI->getAlignment(); + unsigned ABIAlignment = DL.getABITypeAlignment(LI->getType()); + if (Alignment == 0) // Ensure that codegen never sees alignment 0 + Alignment = ABIAlignment; + unsigned ResultReg = 0; - if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg)) + if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg, + Alignment)) return false; - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } @@ -1128,27 +1173,37 @@ static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) { } } -/// X86ChooseCmpImmediateOpcode - If we have a comparison with RHS as the RHS -/// of the comparison, return an opcode that works for the compare (e.g. -/// CMP32ri) otherwise return 0. +/// If we have a comparison with RHS as the RHS of the comparison, return an +/// opcode that works for the compare (e.g. CMP32ri) otherwise return 0. static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) { + int64_t Val = RHSC->getSExtValue(); switch (VT.getSimpleVT().SimpleTy) { // Otherwise, we can't fold the immediate into this comparison. - default: return 0; - case MVT::i8: return X86::CMP8ri; - case MVT::i16: return X86::CMP16ri; - case MVT::i32: return X86::CMP32ri; + default: + return 0; + case MVT::i8: + return X86::CMP8ri; + case MVT::i16: + if (isInt<8>(Val)) + return X86::CMP16ri8; + return X86::CMP16ri; + case MVT::i32: + if (isInt<8>(Val)) + return X86::CMP32ri8; + return X86::CMP32ri; case MVT::i64: + if (isInt<8>(Val)) + return X86::CMP64ri8; // 64-bit comparisons are only valid if the immediate fits in a 32-bit sext // field. - if ((int)RHSC->getSExtValue() == RHSC->getSExtValue()) + if (isInt<32>(Val)) return X86::CMP64ri32; return 0; } } bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, - EVT VT) { + EVT VT, DebugLoc CurDbgLoc) { unsigned Op0Reg = getRegForValue(Op0); if (Op0Reg == 0) return false; @@ -1161,7 +1216,7 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, // CMPri, otherwise use CMPrr. if (const ConstantInt *Op1C = dyn_cast(Op1)) { if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CompareImmOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareImmOpc)) .addReg(Op0Reg) .addImm(Op1C->getSExtValue()); return true; @@ -1173,7 +1228,7 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, unsigned Op1Reg = getRegForValue(Op1); if (Op1Reg == 0) return false; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CompareOpc)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurDbgLoc, TII.get(CompareOpc)) .addReg(Op0Reg) .addReg(Op1Reg); @@ -1196,7 +1251,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { ResultReg = createResultReg(&X86::GR32RegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV32r0), ResultReg); - ResultReg = FastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true, + ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, /*Kill=*/true, X86::sub_8bit); if (!ResultReg) return false; @@ -1211,7 +1266,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { } if (ResultReg) { - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } @@ -1241,7 +1296,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { ResultReg = createResultReg(&X86::GR8RegClass); if (SETFOpc) { - if (!X86FastEmitCompare(LHS, RHS, VT)) + if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc())) return false; unsigned FlagReg1 = createResultReg(&X86::GR8RegClass); @@ -1252,30 +1307,30 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) { FlagReg2); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[2]), ResultReg).addReg(FlagReg1).addReg(FlagReg2); - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } X86::CondCode CC; bool SwapArgs; - std::tie(CC, SwapArgs) = getX86ConditonCode(Predicate); - assert(CC <= X86::LAST_VALID_COND && "Unexpected conditon code."); + std::tie(CC, SwapArgs) = getX86ConditionCode(Predicate); + assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code."); unsigned Opc = X86::getSETFromCond(CC); if (SwapArgs) std::swap(LHS, RHS); // Emit a compare of LHS/RHS. - if (!X86FastEmitCompare(LHS, RHS, VT)) + if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc())) return false; BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg); - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } bool X86FastISel::X86SelectZExt(const Instruction *I) { - EVT DstVT = TLI.getValueType(I->getType()); + EVT DstVT = TLI.getValueType(DL, I->getType()); if (!TLI.isTypeLegal(DstVT)) return false; @@ -1284,10 +1339,10 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) { return false; // Handle zero-extension from i1 to i8, which is common. - MVT SrcVT = TLI.getSimpleValueType(I->getOperand(0)->getType()); + MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType()); if (SrcVT.SimpleTy == MVT::i1) { // Set the high bits to zero. - ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false); + ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false); SrcVT = MVT::i8; if (ResultReg == 0) @@ -1314,17 +1369,16 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) { ResultReg) .addImm(0).addReg(Result32).addImm(X86::sub_32bit); } else if (DstVT != MVT::i8) { - ResultReg = FastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND, + ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND, ResultReg, /*Kill=*/true); if (ResultReg == 0) return false; } - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } - bool X86FastISel::X86SelectBranch(const Instruction *I) { // Unconditional branches are selected by tablegen-generated code. // Handle a conditional branch. @@ -1335,16 +1389,17 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { // Fold the common case of a conditional branch with a comparison // in the same block (values defined on other blocks may not have // initialized registers). + X86::CondCode CC; if (const CmpInst *CI = dyn_cast(BI->getCondition())) { if (CI->hasOneUse() && CI->getParent() == I->getParent()) { - EVT VT = TLI.getValueType(CI->getOperand(0)->getType()); + EVT VT = TLI.getValueType(DL, CI->getOperand(0)->getType()); // Try to optimize or fold the cmp. CmpInst::Predicate Predicate = optimizeCmpPredicate(CI); switch (Predicate) { default: break; - case CmpInst::FCMP_FALSE: FastEmitBranch(FalseMBB, DbgLoc); return true; - case CmpInst::FCMP_TRUE: FastEmitBranch(TrueMBB, DbgLoc); return true; + case CmpInst::FCMP_FALSE: fastEmitBranch(FalseMBB, DbgLoc); return true; + case CmpInst::FCMP_TRUE: fastEmitBranch(TrueMBB, DbgLoc); return true; } const Value *CmpLHS = CI->getOperand(0); @@ -1366,9 +1421,9 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { Predicate = CmpInst::getInversePredicate(Predicate); } - // FCMP_OEQ and FCMP_UNE cannot be expressed with a single flag/conditon + // FCMP_OEQ and FCMP_UNE cannot be expressed with a single flag/condition // code check. Instead two branch instructions are required to check all - // the flags. First we change the predicate to a supported conditon code, + // the flags. First we change the predicate to a supported condition code, // which will be the first branch. Later one we will emit the second // branch. bool NeedExtraBranch = false; @@ -1382,18 +1437,17 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { break; } - X86::CondCode CC; bool SwapArgs; unsigned BranchOpc; - std::tie(CC, SwapArgs) = getX86ConditonCode(Predicate); - assert(CC <= X86::LAST_VALID_COND && "Unexpected conditon code."); + std::tie(CC, SwapArgs) = getX86ConditionCode(Predicate); + assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code."); BranchOpc = X86::GetCondBranchFromCond(CC); if (SwapArgs) std::swap(CmpLHS, CmpRHS); // Emit a compare of the LHS and RHS, setting the flags. - if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT)) + if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT, CI->getDebugLoc())) return false; BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BranchOpc)) @@ -1402,21 +1456,11 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { // X86 requires a second branch to handle UNE (and OEQ, which is mapped // to UNE above). if (NeedExtraBranch) { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JP_4)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JP_1)) .addMBB(TrueMBB); } - // Obtain the branch weight and add the TrueBB to the successor list. - uint32_t BranchWeight = 0; - if (FuncInfo.BPI) - BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(), - TrueMBB->getBasicBlock()); - FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight); - - // Emits an unconditional branch to the FalseBB, obtains the branch - // weight, and adds it to the successor list. - FastEmitBranch(FalseMBB, DbgLoc); - + finishCondBranch(BI->getParent(), TrueMBB, FalseMBB); return true; } } else if (TruncInst *TI = dyn_cast(BI->getCondition())) { @@ -1439,46 +1483,32 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TestOpc)) .addReg(OpReg).addImm(1); - unsigned JmpOpc = X86::JNE_4; + unsigned JmpOpc = X86::JNE_1; if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) { std::swap(TrueMBB, FalseMBB); - JmpOpc = X86::JE_4; + JmpOpc = X86::JE_1; } BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(JmpOpc)) .addMBB(TrueMBB); - FastEmitBranch(FalseMBB, DbgLoc); - uint32_t BranchWeight = 0; - if (FuncInfo.BPI) - BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(), - TrueMBB->getBasicBlock()); - FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight); + + finishCondBranch(BI->getParent(), TrueMBB, FalseMBB); return true; } } - } else { - bool FoldIntrinsic; - X86::CondCode CC; - std::tie(FoldIntrinsic, CC) = foldX86XALUIntrinsic(BI, BI->getCondition()); - if (FoldIntrinsic) { - // Fake request the condition, otherwise the intrinsic might be completely - // optimized away. - unsigned TmpReg = getRegForValue(BI->getCondition()); - if (TmpReg == 0) - return false; + } else if (foldX86XALUIntrinsic(CC, BI, BI->getCondition())) { + // Fake request the condition, otherwise the intrinsic might be completely + // optimized away. + unsigned TmpReg = getRegForValue(BI->getCondition()); + if (TmpReg == 0) + return false; - unsigned BranchOpc = X86::GetCondBranchFromCond(CC); + unsigned BranchOpc = X86::GetCondBranchFromCond(CC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BranchOpc)) - .addMBB(TrueMBB); - FastEmitBranch(FalseMBB, DbgLoc); - uint32_t BranchWeight = 0; - if (FuncInfo.BPI) - BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(), - TrueMBB->getBasicBlock()); - FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight); - return true; - } + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BranchOpc)) + .addMBB(TrueMBB); + finishCondBranch(BI->getParent(), TrueMBB, FalseMBB); + return true; } // Otherwise do a clumsy setcc and re-test it. @@ -1489,14 +1519,9 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) .addReg(OpReg).addImm(1); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JNE_4)) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JNE_1)) .addMBB(TrueMBB); - FastEmitBranch(FalseMBB, DbgLoc); - uint32_t BranchWeight = 0; - if (FuncInfo.BPI) - BranchWeight = FuncInfo.BPI->getEdgeWeight(BI->getParent(), - TrueMBB->getBasicBlock()); - FuncInfo.MBB->addSuccessor(TrueMBB, BranchWeight); + finishCondBranch(BI->getParent(), TrueMBB, FalseMBB); return true; } @@ -1565,7 +1590,7 @@ bool X86FastISel::X86SelectShift(const Instruction *I) { unsigned ResultReg = createResultReg(RC); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(OpReg), ResultReg) .addReg(Op0Reg); - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } @@ -1677,8 +1702,8 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) { TII.get(X86::MOV32r0), Zero32); // Copy the zero into the appropriate sub/super/identical physical - // register. Unfortunately the operations needed are not uniform enough to - // fit neatly into the table above. + // register. Unfortunately the operations needed are not uniform enough + // to fit neatly into the table above. if (VT.SimpleTy == MVT::i16) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Copy), TypeEntry.HighInReg) @@ -1719,7 +1744,7 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) { ResultSuperReg).addReg(SourceSuperReg).addImm(8); // Now reference the 8-bit subreg of the result. - ResultReg = FastEmitInst_extractsubreg(MVT::i8, ResultSuperReg, + ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg, /*Kill=*/true, X86::sub_8bit); } // Copy the result out of the physreg if we haven't already. @@ -1728,36 +1753,28 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Copy), ResultReg) .addReg(OpEntry.DivRemResultReg); } - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } /// \brief Emit a conditional move instruction (if the are supported) to lower /// the select. -bool X86FastISel::X86FastEmitCMoveSelect(const Instruction *I) { - MVT RetVT; - if (!isTypeLegal(I->getType(), RetVT)) - return false; - +bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) { // Check if the subtarget supports these instructions. if (!Subtarget->hasCMov()) return false; // FIXME: Add support for i8. - unsigned Opc; - switch (RetVT.SimpleTy) { - default: return false; - case MVT::i16: Opc = X86::CMOVNE16rr; break; - case MVT::i32: Opc = X86::CMOVNE32rr; break; - case MVT::i64: Opc = X86::CMOVNE64rr; break; - } + if (RetVT < MVT::i16 || RetVT > MVT::i64) + return false; const Value *Cond = I->getOperand(0); const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT); bool NeedTest = true; + X86::CondCode CC = X86::COND_NE; - // Optimize conditons coming from a compare if both instructions are in the + // Optimize conditions coming from a compare if both instructions are in the // same basic block (values defined in other basic blocks may not have // initialized registers). const auto *CI = dyn_cast(Cond); @@ -1782,21 +1799,19 @@ bool X86FastISel::X86FastEmitCMoveSelect(const Instruction *I) { break; } - X86::CondCode CC; bool NeedSwap; - std::tie(CC, NeedSwap) = getX86ConditonCode(Predicate); + std::tie(CC, NeedSwap) = getX86ConditionCode(Predicate); assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code."); - Opc = X86::getCMovFromCond(CC, RC->getSize()); const Value *CmpLHS = CI->getOperand(0); const Value *CmpRHS = CI->getOperand(1); if (NeedSwap) std::swap(CmpLHS, CmpRHS); - EVT CmpVT = TLI.getValueType(CmpLHS->getType()); + EVT CmpVT = TLI.getValueType(DL, CmpLHS->getType()); // Emit a compare of the LHS and RHS, setting the flags. - if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT)) - return false; + if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc())) + return false; if (SETFOpc) { unsigned FlagReg1 = createResultReg(&X86::GR8RegClass); @@ -1816,21 +1831,14 @@ bool X86FastISel::X86FastEmitCMoveSelect(const Instruction *I) { } } NeedTest = false; - } else { - bool FoldIntrinsic; - X86::CondCode CC; - std::tie(FoldIntrinsic, CC) = foldX86XALUIntrinsic(I, Cond); - - if (FoldIntrinsic) { - // Fake request the condition, otherwise the intrinsic might be completely - // optimized away. - unsigned TmpReg = getRegForValue(Cond); - if (TmpReg == 0) - return false; + } else if (foldX86XALUIntrinsic(CC, I, Cond)) { + // Fake request the condition, otherwise the intrinsic might be completely + // optimized away. + unsigned TmpReg = getRegForValue(Cond); + if (TmpReg == 0) + return false; - Opc = X86::getCMovFromCond(CC, RC->getSize()); - NeedTest = false; - } + NeedTest = false; } if (NeedTest) { @@ -1860,23 +1868,20 @@ bool X86FastISel::X86FastEmitCMoveSelect(const Instruction *I) { if (!LHSReg || !RHSReg) return false; - unsigned ResultReg = FastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill, + unsigned Opc = X86::getCMovFromCond(CC, RC->getSize()); + unsigned ResultReg = fastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill); - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } -/// \brief Emit SSE instructions to lower the select. +/// \brief Emit SSE or AVX instructions to lower the select. /// /// Try to use SSE1/SSE2 instructions to simulate a select without branches. /// This lowers fp selects into a CMP/AND/ANDN/OR sequence when the necessary -/// SSE instructions are available. -bool X86FastISel::X86FastEmitSSESelect(const Instruction *I) { - MVT RetVT; - if (!isTypeLegal(I->getType(), RetVT)) - return false; - - // Optimize conditons coming from a compare if both instructions are in the +/// SSE instructions are available. If AVX is available, try to use a VBLENDV. +bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) { + // Optimize conditions coming from a compare if both instructions are in the // same basic block (values defined in other basic blocks may not have // initialized registers). const auto *CI = dyn_cast(I->getOperand(0)); @@ -1885,7 +1890,7 @@ bool X86FastISel::X86FastEmitSSESelect(const Instruction *I) { if (I->getType() != CI->getOperand(0)->getType() || !((Subtarget->hasSSE1() && RetVT == MVT::f32) || - (Subtarget->hasSSE2() && RetVT == MVT::f64) )) + (Subtarget->hasSSE2() && RetVT == MVT::f64))) return false; const Value *CmpLHS = CI->getOperand(0); @@ -1903,26 +1908,24 @@ bool X86FastISel::X86FastEmitSSESelect(const Instruction *I) { unsigned CC; bool NeedSwap; - std::tie(CC, NeedSwap) = getX86SSECondtionCode(Predicate); + std::tie(CC, NeedSwap) = getX86SSEConditionCode(Predicate); if (CC > 7) return false; if (NeedSwap) std::swap(CmpLHS, CmpRHS); - static unsigned OpcTable[2][2][4] = { - { { X86::CMPSSrr, X86::FsANDPSrr, X86::FsANDNPSrr, X86::FsORPSrr }, - { X86::VCMPSSrr, X86::VFsANDPSrr, X86::VFsANDNPSrr, X86::VFsORPSrr } }, - { { X86::CMPSDrr, X86::FsANDPDrr, X86::FsANDNPDrr, X86::FsORPDrr }, - { X86::VCMPSDrr, X86::VFsANDPDrr, X86::VFsANDNPDrr, X86::VFsORPDrr } } + // Choose the SSE instruction sequence based on data type (float or double). + static unsigned OpcTable[2][4] = { + { X86::CMPSSrr, X86::FsANDPSrr, X86::FsANDNPSrr, X86::FsORPSrr }, + { X86::CMPSDrr, X86::FsANDPDrr, X86::FsANDNPDrr, X86::FsORPDrr } }; - bool HasAVX = Subtarget->hasAVX(); unsigned *Opc = nullptr; switch (RetVT.SimpleTy) { default: return false; - case MVT::f32: Opc = &OpcTable[0][HasAVX][0]; break; - case MVT::f64: Opc = &OpcTable[1][HasAVX][0]; break; + case MVT::f32: Opc = &OpcTable[0][0]; break; + case MVT::f64: Opc = &OpcTable[1][0]; break; } const Value *LHS = I->getOperand(1); @@ -1944,23 +1947,44 @@ bool X86FastISel::X86FastEmitSSESelect(const Instruction *I) { return false; const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT); - unsigned CmpReg = FastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill, - CmpRHSReg, CmpRHSIsKill, CC); - unsigned AndReg = FastEmitInst_rr(Opc[1], RC, CmpReg, /*IsKill=*/false, - LHSReg, LHSIsKill); - unsigned AndNReg = FastEmitInst_rr(Opc[2], RC, CmpReg, /*IsKill=*/true, - RHSReg, RHSIsKill); - unsigned ResultReg = FastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true, - AndReg, /*IsKill=*/true); - UpdateValueMap(I, ResultReg); + unsigned ResultReg; + + if (Subtarget->hasAVX()) { + const TargetRegisterClass *FR32 = &X86::FR32RegClass; + const TargetRegisterClass *VR128 = &X86::VR128RegClass; + + // If we have AVX, create 1 blendv instead of 3 logic instructions. + // Blendv was introduced with SSE 4.1, but the 2 register form implicitly + // uses XMM0 as the selection register. That may need just as many + // instructions as the AND/ANDN/OR sequence due to register moves, so + // don't bother. + unsigned CmpOpcode = + (RetVT.SimpleTy == MVT::f32) ? X86::VCMPSSrr : X86::VCMPSDrr; + unsigned BlendOpcode = + (RetVT.SimpleTy == MVT::f32) ? X86::VBLENDVPSrr : X86::VBLENDVPDrr; + + unsigned CmpReg = fastEmitInst_rri(CmpOpcode, FR32, CmpLHSReg, CmpLHSIsKill, + CmpRHSReg, CmpRHSIsKill, CC); + unsigned VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, RHSIsKill, + LHSReg, LHSIsKill, CmpReg, true); + ResultReg = createResultReg(RC); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg).addReg(VBlendReg); + } else { + unsigned CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpLHSIsKill, + CmpRHSReg, CmpRHSIsKill, CC); + unsigned AndReg = fastEmitInst_rr(Opc[1], RC, CmpReg, /*IsKill=*/false, + LHSReg, LHSIsKill); + unsigned AndNReg = fastEmitInst_rr(Opc[2], RC, CmpReg, /*IsKill=*/true, + RHSReg, RHSIsKill); + ResultReg = fastEmitInst_rr(Opc[3], RC, AndNReg, /*IsKill=*/true, + AndReg, /*IsKill=*/true); + } + updateValueMap(I, ResultReg); return true; } -bool X86FastISel::X86FastEmitPseudoSelect(const Instruction *I) { - MVT RetVT; - if (!isTypeLegal(I->getType(), RetVT)) - return false; - +bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) { // These are pseudo CMOV instructions and will be later expanded into control- // flow. unsigned Opc; @@ -1976,13 +2000,13 @@ bool X86FastISel::X86FastEmitPseudoSelect(const Instruction *I) { const Value *Cond = I->getOperand(0); X86::CondCode CC = X86::COND_NE; - // Optimize conditons coming from a compare if both instructions are in the + // Optimize conditions coming from a compare if both instructions are in the // same basic block (values defined in other basic blocks may not have // initialized registers). const auto *CI = dyn_cast(Cond); if (CI && (CI->getParent() == I->getParent())) { bool NeedSwap; - std::tie(CC, NeedSwap) = getX86ConditonCode(CI->getPredicate()); + std::tie(CC, NeedSwap) = getX86ConditionCode(CI->getPredicate()); if (CC > X86::LAST_VALID_COND) return false; @@ -1992,8 +2016,8 @@ bool X86FastISel::X86FastEmitPseudoSelect(const Instruction *I) { if (NeedSwap) std::swap(CmpLHS, CmpRHS); - EVT CmpVT = TLI.getValueType(CmpLHS->getType()); - if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT)) + EVT CmpVT = TLI.getValueType(DL, CmpLHS->getType()); + if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc())) return false; } else { unsigned CondReg = getRegForValue(Cond); @@ -2019,8 +2043,8 @@ bool X86FastISel::X86FastEmitPseudoSelect(const Instruction *I) { const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT); unsigned ResultReg = - FastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC); - UpdateValueMap(I, ResultReg); + fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill, CC); + updateValueMap(I, ResultReg); return true; } @@ -2049,70 +2073,113 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) .addReg(OpReg, getKillRegState(OpIsKill)); - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } } // First try to use real conditional move instructions. - if (X86FastEmitCMoveSelect(I)) + if (X86FastEmitCMoveSelect(RetVT, I)) return true; - // Try to use a sequence of SSE instructions to simulate a conditonal move. - if (X86FastEmitSSESelect(I)) + // Try to use a sequence of SSE instructions to simulate a conditional move. + if (X86FastEmitSSESelect(RetVT, I)) return true; // Fall-back to pseudo conditional move instructions, which will be later // converted to control-flow. - if (X86FastEmitPseudoSelect(I)) + if (X86FastEmitPseudoSelect(RetVT, I)) return true; return false; } +bool X86FastISel::X86SelectSIToFP(const Instruction *I) { + // The target-independent selection algorithm in FastISel already knows how + // to select a SINT_TO_FP if the target is SSE but not AVX. + // Early exit if the subtarget doesn't have AVX. + if (!Subtarget->hasAVX()) + return false; + + if (!I->getOperand(0)->getType()->isIntegerTy(32)) + return false; + + // Select integer to float/double conversion. + unsigned OpReg = getRegForValue(I->getOperand(0)); + if (OpReg == 0) + return false; + + const TargetRegisterClass *RC = nullptr; + unsigned Opcode; + + if (I->getType()->isDoubleTy()) { + // sitofp int -> double + Opcode = X86::VCVTSI2SDrr; + RC = &X86::FR64RegClass; + } else if (I->getType()->isFloatTy()) { + // sitofp int -> float + Opcode = X86::VCVTSI2SSrr; + RC = &X86::FR32RegClass; + } else + return false; + + unsigned ImplicitDefReg = createResultReg(RC); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg); + unsigned ResultReg = + fastEmitInst_rr(Opcode, RC, ImplicitDefReg, true, OpReg, false); + updateValueMap(I, ResultReg); + return true; +} + +// Helper method used by X86SelectFPExt and X86SelectFPTrunc. +bool X86FastISel::X86SelectFPExtOrFPTrunc(const Instruction *I, + unsigned TargetOpc, + const TargetRegisterClass *RC) { + assert((I->getOpcode() == Instruction::FPExt || + I->getOpcode() == Instruction::FPTrunc) && + "Instruction must be an FPExt or FPTrunc!"); + + unsigned OpReg = getRegForValue(I->getOperand(0)); + if (OpReg == 0) + return false; + + unsigned ResultReg = createResultReg(RC); + MachineInstrBuilder MIB; + MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpc), + ResultReg); + if (Subtarget->hasAVX()) + MIB.addReg(OpReg); + MIB.addReg(OpReg); + updateValueMap(I, ResultReg); + return true; +} + bool X86FastISel::X86SelectFPExt(const Instruction *I) { - // fpext from float to double. - if (X86ScalarSSEf64 && - I->getType()->isDoubleTy()) { - const Value *V = I->getOperand(0); - if (V->getType()->isFloatTy()) { - unsigned OpReg = getRegForValue(V); - if (OpReg == 0) return false; - unsigned ResultReg = createResultReg(&X86::FR64RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(X86::CVTSS2SDrr), ResultReg) - .addReg(OpReg); - UpdateValueMap(I, ResultReg); - return true; - } + if (X86ScalarSSEf64 && I->getType()->isDoubleTy() && + I->getOperand(0)->getType()->isFloatTy()) { + // fpext from float to double. + unsigned Opc = Subtarget->hasAVX() ? X86::VCVTSS2SDrr : X86::CVTSS2SDrr; + return X86SelectFPExtOrFPTrunc(I, Opc, &X86::FR64RegClass); } return false; } bool X86FastISel::X86SelectFPTrunc(const Instruction *I) { - if (X86ScalarSSEf64) { - if (I->getType()->isFloatTy()) { - const Value *V = I->getOperand(0); - if (V->getType()->isDoubleTy()) { - unsigned OpReg = getRegForValue(V); - if (OpReg == 0) return false; - unsigned ResultReg = createResultReg(&X86::FR32RegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(X86::CVTSD2SSrr), ResultReg) - .addReg(OpReg); - UpdateValueMap(I, ResultReg); - return true; - } - } + if (X86ScalarSSEf64 && I->getType()->isFloatTy() && + I->getOperand(0)->getType()->isDoubleTy()) { + // fptrunc from double to float. + unsigned Opc = Subtarget->hasAVX() ? X86::VCVTSD2SSrr : X86::CVTSD2SSrr; + return X86SelectFPExtOrFPTrunc(I, Opc, &X86::FR32RegClass); } return false; } bool X86FastISel::X86SelectTrunc(const Instruction *I) { - EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); - EVT DstVT = TLI.getValueType(I->getType()); + EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType()); + EVT DstVT = TLI.getValueType(DL, I->getType()); // This code only handles truncation to byte. if (DstVT != MVT::i8 && DstVT != MVT::i1) @@ -2127,30 +2194,31 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) { if (SrcVT == MVT::i8) { // Truncate from i8 to i1; no code needed. - UpdateValueMap(I, InputReg); + updateValueMap(I, InputReg); return true; } + bool KillInputReg = false; if (!Subtarget->is64Bit()) { // If we're on x86-32; we can't extract an i8 from a general register. // First issue a copy to GR16_ABCD or GR32_ABCD. - const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16) ? - (const TargetRegisterClass*)&X86::GR16_ABCDRegClass : - (const TargetRegisterClass*)&X86::GR32_ABCDRegClass; + const TargetRegisterClass *CopyRC = + (SrcVT == MVT::i16) ? &X86::GR16_ABCDRegClass : &X86::GR32_ABCDRegClass; unsigned CopyReg = createResultReg(CopyRC); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), - CopyReg).addReg(InputReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), CopyReg).addReg(InputReg); InputReg = CopyReg; + KillInputReg = true; } // Issue an extract_subreg. - unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8, - InputReg, /*Kill=*/true, + unsigned ResultReg = fastEmitInst_extractsubreg(MVT::i8, + InputReg, KillInputReg, X86::sub_8bit); if (!ResultReg) return false; - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } @@ -2176,9 +2244,8 @@ bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM, VT = MVT::i32; else if (Len >= 2) VT = MVT::i16; - else { + else VT = MVT::i8; - } unsigned Reg; bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg); @@ -2194,24 +2261,78 @@ bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM, return true; } -static bool isCommutativeIntrinsic(IntrinsicInst const &I) { - switch (I.getIntrinsicID()) { - case Intrinsic::sadd_with_overflow: - case Intrinsic::uadd_with_overflow: - case Intrinsic::smul_with_overflow: - case Intrinsic::umul_with_overflow: - return true; - default: - return false; - } -} - -bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { +bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) { // FIXME: Handle more intrinsics. - switch (I.getIntrinsicID()) { + switch (II->getIntrinsicID()) { default: return false; + case Intrinsic::convert_from_fp16: + case Intrinsic::convert_to_fp16: { + if (Subtarget->useSoftFloat() || !Subtarget->hasF16C()) + return false; + + const Value *Op = II->getArgOperand(0); + unsigned InputReg = getRegForValue(Op); + if (InputReg == 0) + return false; + + // F16C only allows converting from float to half and from half to float. + bool IsFloatToHalf = II->getIntrinsicID() == Intrinsic::convert_to_fp16; + if (IsFloatToHalf) { + if (!Op->getType()->isFloatTy()) + return false; + } else { + if (!II->getType()->isFloatTy()) + return false; + } + + unsigned ResultReg = 0; + const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::v8i16); + if (IsFloatToHalf) { + // 'InputReg' is implicitly promoted from register class FR32 to + // register class VR128 by method 'constrainOperandRegClass' which is + // directly called by 'fastEmitInst_ri'. + // Instruction VCVTPS2PHrr takes an extra immediate operand which is + // used to provide rounding control. + InputReg = fastEmitInst_ri(X86::VCVTPS2PHrr, RC, InputReg, false, 0); + + // Move the lower 32-bits of ResultReg to another register of class GR32. + ResultReg = createResultReg(&X86::GR32RegClass); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(X86::VMOVPDI2DIrr), ResultReg) + .addReg(InputReg, RegState::Kill); + + // The result value is in the lower 16-bits of ResultReg. + unsigned RegIdx = X86::sub_16bit; + ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, true, RegIdx); + } else { + assert(Op->getType()->isIntegerTy(16) && "Expected a 16-bit integer!"); + // Explicitly sign-extend the input to 32-bit. + InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::SIGN_EXTEND, InputReg, + /*Kill=*/false); + + // The following SCALAR_TO_VECTOR will be expanded into a VMOVDI2PDIrr. + InputReg = fastEmit_r(MVT::i32, MVT::v4i32, ISD::SCALAR_TO_VECTOR, + InputReg, /*Kill=*/true); + + InputReg = fastEmitInst_r(X86::VCVTPH2PSrr, RC, InputReg, /*Kill=*/true); + + // The result value is in the lower 32-bits of ResultReg. + // Emit an explicit copy from register class VR128 to register class FR32. + ResultReg = createResultReg(&X86::FR32RegClass); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg) + .addReg(InputReg, RegState::Kill); + } + + updateValueMap(II, ResultReg); + return true; + } case Intrinsic::frameaddress: { - Type *RetTy = I.getCalledFunction()->getReturnType(); + MachineFunction *MF = FuncInfo.MF; + if (MF->getTarget().getMCAsmInfo()->usesWindowsCFI()) + return false; + + Type *RetTy = II->getCalledFunction()->getReturnType(); MVT VT; if (!isTypeLegal(RetTy, VT)) @@ -2226,14 +2347,13 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { case MVT::i64: Opc = X86::MOV64rm; RC = &X86::GR64RegClass; break; } - // This needs to be set before we call getFrameRegister, otherwise we get - // the wrong frame register. - MachineFrameInfo *MFI = FuncInfo.MF->getFrameInfo(); + // This needs to be set before we call getPtrSizedFrameRegister, otherwise + // we get the wrong frame register. + MachineFrameInfo *MFI = MF->getFrameInfo(); MFI->setFrameAddressIsTaken(true); - const X86RegisterInfo *RegInfo = - static_cast(TM.getRegisterInfo()); - unsigned FrameReg = RegInfo->getFrameRegister(*(FuncInfo.MF)); + const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo(); + unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(*MF); assert(((FrameReg == X86::RBP && VT == MVT::i64) || (FrameReg == X86::EBP && VT == MVT::i32)) && "Invalid Frame Register!"); @@ -2251,7 +2371,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { // movq (%rax), %rax // ... unsigned DestReg; - unsigned Depth = cast(I.getOperand(0))->getZExtValue(); + unsigned Depth = cast(II->getOperand(0))->getZExtValue(); while (Depth--) { DestReg = createResultReg(RC); addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, @@ -2259,23 +2379,23 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { SrcReg = DestReg; } - UpdateValueMap(&I, SrcReg); + updateValueMap(II, SrcReg); return true; } case Intrinsic::memcpy: { - const MemCpyInst &MCI = cast(I); + const MemCpyInst *MCI = cast(II); // Don't handle volatile or variable length memcpys. - if (MCI.isVolatile()) + if (MCI->isVolatile()) return false; - if (isa(MCI.getLength())) { + if (isa(MCI->getLength())) { // Small memcpy's are common enough that we want to do them // without a call if possible. - uint64_t Len = cast(MCI.getLength())->getZExtValue(); + uint64_t Len = cast(MCI->getLength())->getZExtValue(); if (IsMemcpySmall(Len)) { X86AddressMode DestAM, SrcAM; - if (!X86SelectAddress(MCI.getRawDest(), DestAM) || - !X86SelectAddress(MCI.getRawSource(), SrcAM)) + if (!X86SelectAddress(MCI->getRawDest(), DestAM) || + !X86SelectAddress(MCI->getRawSource(), SrcAM)) return false; TryEmitSmallMemcpy(DestAM, SrcAM, Len); return true; @@ -2283,35 +2403,35 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { } unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32; - if (!MCI.getLength()->getType()->isIntegerTy(SizeWidth)) + if (!MCI->getLength()->getType()->isIntegerTy(SizeWidth)) return false; - if (MCI.getSourceAddressSpace() > 255 || MCI.getDestAddressSpace() > 255) + if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255) return false; - return DoSelectCall(&I, "memcpy"); + return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 2); } case Intrinsic::memset: { - const MemSetInst &MSI = cast(I); + const MemSetInst *MSI = cast(II); - if (MSI.isVolatile()) + if (MSI->isVolatile()) return false; unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32; - if (!MSI.getLength()->getType()->isIntegerTy(SizeWidth)) + if (!MSI->getLength()->getType()->isIntegerTy(SizeWidth)) return false; - if (MSI.getDestAddressSpace() > 255) + if (MSI->getDestAddressSpace() > 255) return false; - return DoSelectCall(&I, "memset"); + return lowerCallTo(II, "memset", II->getNumArgOperands() - 2); } case Intrinsic::stackprotector: { // Emit code to store the stack guard onto the stack. - EVT PtrTy = TLI.getPointerTy(); + EVT PtrTy = TLI.getPointerTy(DL); - const Value *Op1 = I.getArgOperand(0); // The guard's value. - const AllocaInst *Slot = cast(I.getArgOperand(1)); + const Value *Op1 = II->getArgOperand(0); // The guard's value. + const AllocaInst *Slot = cast(II->getArgOperand(1)); MFI.setStackProtectorIndex(FuncInfo.StaticAllocaMap[Slot]); @@ -2322,7 +2442,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { return true; } case Intrinsic::dbg_declare: { - const DbgDeclareInst *DI = cast(&I); + const DbgDeclareInst *DI = cast(II); X86AddressMode AM; assert(DI->getAddress() && "Null address should be checked earlier!"); if (!X86SelectAddress(DI->getAddress(), AM)) @@ -2330,8 +2450,12 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); // FIXME may need to add RegState::Debug to any registers produced, // although ESP/EBP should be the only ones at the moment. - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II), AM). - addImm(0).addMetadata(DI->getVariable()); + assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) && + "Expected inlined-at fields to agree"); + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II), AM) + .addImm(0) + .addMetadata(DI->getVariable()) + .addMetadata(DI->getExpression()); return true; } case Intrinsic::trap: { @@ -2342,13 +2466,13 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { if (!Subtarget->hasSSE1()) return false; - Type *RetTy = I.getCalledFunction()->getReturnType(); + Type *RetTy = II->getCalledFunction()->getReturnType(); MVT VT; if (!isTypeLegal(RetTy, VT)) return false; - // Unfortunatelly we can't use FastEmit_r, because the AVX version of FSQRT + // Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT // is not generated by FastISel yet. // FIXME: Update this code once tablegen can handle it. static const unsigned SqrtOpc[2][2] = { @@ -2364,7 +2488,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { case MVT::f64: Opc = SqrtOpc[1][HasAVX]; RC = &X86::FR64RegClass; break; } - const Value *SrcVal = I.getArgOperand(0); + const Value *SrcVal = II->getArgOperand(0); unsigned SrcReg = getRegForValue(SrcVal); if (SrcReg == 0) @@ -2387,7 +2511,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { MIB.addReg(SrcReg); - UpdateValueMap(&I, ResultReg); + updateValueMap(II, ResultReg); return true; } case Intrinsic::sadd_with_overflow: @@ -2397,8 +2521,8 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { case Intrinsic::smul_with_overflow: case Intrinsic::umul_with_overflow: { // This implements the basic lowering of the xalu with overflow intrinsics - // into add/sub/mul folowed by either seto or setb. - const Function *Callee = I.getCalledFunction(); + // into add/sub/mul followed by either seto or setb. + const Function *Callee = II->getCalledFunction(); auto *Ty = cast(Callee->getReturnType()); Type *RetTy = Ty->getTypeAtIndex(0U); Type *CondTy = Ty->getTypeAtIndex(1); @@ -2410,27 +2534,35 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { if (VT < MVT::i8 || VT > MVT::i64) return false; - const Value *LHS = I.getArgOperand(0); - const Value *RHS = I.getArgOperand(1); + const Value *LHS = II->getArgOperand(0); + const Value *RHS = II->getArgOperand(1); - // Canonicalize immediates to the RHS. + // Canonicalize immediate to the RHS. if (isa(LHS) && !isa(RHS) && - isCommutativeIntrinsic(I)) + isCommutativeIntrinsic(II)) std::swap(LHS, RHS); + bool UseIncDec = false; + if (isa(RHS) && cast(RHS)->isOne()) + UseIncDec = true; + unsigned BaseOpc, CondOpc; - switch (I.getIntrinsicID()) { + switch (II->getIntrinsicID()) { default: llvm_unreachable("Unexpected intrinsic!"); case Intrinsic::sadd_with_overflow: - BaseOpc = ISD::ADD; CondOpc = X86::SETOr; break; + BaseOpc = UseIncDec ? unsigned(X86ISD::INC) : unsigned(ISD::ADD); + CondOpc = X86::SETOr; + break; case Intrinsic::uadd_with_overflow: BaseOpc = ISD::ADD; CondOpc = X86::SETBr; break; case Intrinsic::ssub_with_overflow: - BaseOpc = ISD::SUB; CondOpc = X86::SETOr; break; + BaseOpc = UseIncDec ? unsigned(X86ISD::DEC) : unsigned(ISD::SUB); + CondOpc = X86::SETOr; + break; case Intrinsic::usub_with_overflow: BaseOpc = ISD::SUB; CondOpc = X86::SETBr; break; case Intrinsic::smul_with_overflow: - BaseOpc = ISD::MUL; CondOpc = X86::SETOr; break; + BaseOpc = X86ISD::SMUL; CondOpc = X86::SETOr; break; case Intrinsic::umul_with_overflow: BaseOpc = X86ISD::UMUL; CondOpc = X86::SETOr; break; } @@ -2442,9 +2574,21 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { unsigned ResultReg = 0; // Check if we have an immediate version. - if (auto const *C = dyn_cast(RHS)) { - ResultReg = FastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill, - C->getZExtValue()); + if (const auto *CI = dyn_cast(RHS)) { + static const unsigned Opc[2][4] = { + { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r }, + { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r } + }; + + if (BaseOpc == X86ISD::INC || BaseOpc == X86ISD::DEC) { + ResultReg = createResultReg(TLI.getRegClassFor(VT)); + bool IsDec = BaseOpc == X86ISD::DEC; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg) + .addReg(LHSReg, getKillRegState(LHSIsKill)); + } else + ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, LHSIsKill, + CI->getZExtValue()); } unsigned RHSReg; @@ -2454,22 +2598,38 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { if (RHSReg == 0) return false; RHSIsKill = hasTrivialKill(RHS); - ResultReg = FastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg, + ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, LHSIsKill, RHSReg, RHSIsKill); } - // FastISel doesn't have a pattern for X86::MUL*r. Emit it manually. + // FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit + // it manually. if (BaseOpc == X86ISD::UMUL && !ResultReg) { static const unsigned MULOpc[] = - { X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r }; + { X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r }; static const unsigned Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX }; // First copy the first operand into RAX, which is an implicit input to // the X86::MUL*r instruction. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8]) .addReg(LHSReg, getKillRegState(LHSIsKill)); - ResultReg = FastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8], + ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8], TLI.getRegClassFor(VT), RHSReg, RHSIsKill); + } else if (BaseOpc == X86ISD::SMUL && !ResultReg) { + static const unsigned MULOpc[] = + { X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr }; + if (VT == MVT::i8) { + // Copy the first operand into AL, which is an implicit input to the + // X86::IMUL8r instruction. + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), X86::AL) + .addReg(LHSReg, getKillRegState(LHSIsKill)); + ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg, + RHSIsKill); + } else + ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8], + TLI.getRegClassFor(VT), LHSReg, LHSIsKill, + RHSReg, RHSIsKill); } if (!ResultReg) @@ -2480,7 +2640,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CondOpc), ResultReg2); - UpdateValueMap(&I, ResultReg, 2); + updateValueMap(II, ResultReg, 2); return true; } case Intrinsic::x86_sse_cvttss2si: @@ -2488,7 +2648,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { case Intrinsic::x86_sse2_cvttsd2si: case Intrinsic::x86_sse2_cvttsd2si64: { bool IsInputDouble; - switch (I.getIntrinsicID()) { + switch (II->getIntrinsicID()) { default: llvm_unreachable("Unexpected intrinsic."); case Intrinsic::x86_sse_cvttss2si: case Intrinsic::x86_sse_cvttss2si64: @@ -2504,7 +2664,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { break; } - Type *RetTy = I.getCalledFunction()->getReturnType(); + Type *RetTy = II->getCalledFunction()->getReturnType(); MVT VT; if (!isTypeLegal(RetTy, VT)) return false; @@ -2524,7 +2684,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { } // Check if we can fold insertelement instructions into the convert. - const Value *Op = I.getArgOperand(0); + const Value *Op = II->getArgOperand(0); while (auto *IE = dyn_cast(Op)) { const Value *Index = IE->getOperand(2); if (!isa(Index)) @@ -2546,13 +2706,13 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) .addReg(Reg); - UpdateValueMap(&I, ResultReg); + updateValueMap(II, ResultReg); return true; } } } -bool X86FastISel::FastLowerArguments() { +bool X86FastISel::fastLowerArguments() { if (!FuncInfo.CanLowerReturn) return false; @@ -2569,7 +2729,7 @@ bool X86FastISel::FastLowerArguments() { if (!Subtarget->is64Bit()) return false; - + // Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments. unsigned GPRCnt = 0; unsigned FPRCnt = 0; @@ -2587,7 +2747,7 @@ bool X86FastISel::FastLowerArguments() { if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) return false; - EVT ArgVT = TLI.getValueType(ArgTy); + EVT ArgVT = TLI.getValueType(DL, ArgTy); if (!ArgVT.isSimple()) return false; switch (ArgVT.getSimpleVT().SimpleTy) { default: return false; @@ -2624,7 +2784,7 @@ bool X86FastISel::FastLowerArguments() { unsigned GPRIdx = 0; unsigned FPRIdx = 0; for (auto const &Arg : F->args()) { - MVT VT = TLI.getSimpleValueType(Arg.getType()); + MVT VT = TLI.getSimpleValueType(DL, Arg.getType()); const TargetRegisterClass *RC = TLI.getRegClassFor(VT); unsigned SrcReg; switch (VT.SimpleTy) { @@ -2642,58 +2802,57 @@ bool X86FastISel::FastLowerArguments() { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), ResultReg) .addReg(DstReg, getKillRegState(true)); - UpdateValueMap(&Arg, ResultReg); + updateValueMap(&Arg, ResultReg); } return true; } -bool X86FastISel::X86SelectCall(const Instruction *I) { - const CallInst *CI = cast(I); - const Value *Callee = CI->getCalledValue(); - - // Can't handle inline asm yet. - if (isa(Callee)) - return false; - - // Handle intrinsic calls. - if (const IntrinsicInst *II = dyn_cast(CI)) - return X86VisitIntrinsicCall(*II); - - // Allow SelectionDAG isel to handle tail calls. - if (cast(I)->isTailCall()) - return false; - - return DoSelectCall(I, nullptr); -} - -static unsigned computeBytesPoppedByCallee(const X86Subtarget &Subtarget, - const ImmutableCallSite &CS) { - if (Subtarget.is64Bit()) +static unsigned computeBytesPoppedByCallee(const X86Subtarget *Subtarget, + CallingConv::ID CC, + ImmutableCallSite *CS) { + if (Subtarget->is64Bit()) return 0; - if (Subtarget.getTargetTriple().isOSMSVCRT()) + if (Subtarget->getTargetTriple().isOSMSVCRT()) return 0; - CallingConv::ID CC = CS.getCallingConv(); - if (CC == CallingConv::Fast || CC == CallingConv::GHC) + if (CC == CallingConv::Fast || CC == CallingConv::GHC || + CC == CallingConv::HiPE) return 0; - if (!CS.paramHasAttr(1, Attribute::StructRet)) + if (CS && !CS->paramHasAttr(1, Attribute::StructRet)) return 0; - if (CS.paramHasAttr(1, Attribute::InReg)) + if (CS && CS->paramHasAttr(1, Attribute::InReg)) return 0; return 4; } -// Select either a call, or an llvm.memcpy/memmove/memset intrinsic -bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { - const CallInst *CI = cast(I); - const Value *Callee = CI->getCalledValue(); - - // Handle only C and fastcc calling conventions for now. - ImmutableCallSite CS(CI); - CallingConv::ID CC = CS.getCallingConv(); - bool isWin64 = Subtarget->isCallingConvWin64(CC); - if (CC != CallingConv::C && CC != CallingConv::Fast && - CC != CallingConv::X86_FastCall && CC != CallingConv::X86_64_Win64 && - CC != CallingConv::X86_64_SysV) +bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { + auto &OutVals = CLI.OutVals; + auto &OutFlags = CLI.OutFlags; + auto &OutRegs = CLI.OutRegs; + auto &Ins = CLI.Ins; + auto &InRegs = CLI.InRegs; + CallingConv::ID CC = CLI.CallConv; + bool &IsTailCall = CLI.IsTailCall; + bool IsVarArg = CLI.IsVarArg; + const Value *Callee = CLI.Callee; + MCSymbol *Symbol = CLI.Symbol; + + bool Is64Bit = Subtarget->is64Bit(); + bool IsWin64 = Subtarget->isCallingConvWin64(CC); + + // Handle only C, fastcc, and webkit_js calling conventions for now. + switch (CC) { + default: return false; + case CallingConv::C: + case CallingConv::Fast: + case CallingConv::WebKit_JS: + case CallingConv::X86_FastCall: + case CallingConv::X86_64_Win64: + case CallingConv::X86_64_SysV: + break; + } + + // Allow SelectionDAG isel to handle tail calls. + if (IsTailCall) return false; // fastcc with -tailcallopt is intended to provide a guaranteed @@ -2701,166 +2860,99 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt) return false; - PointerType *PT = cast(CS.getCalledValue()->getType()); - FunctionType *FTy = cast(PT->getElementType()); - bool isVarArg = FTy->isVarArg(); - // Don't know how to handle Win64 varargs yet. Nothing special needed for - // x86-32. Special handling for x86-64 is implemented. - if (isVarArg && isWin64) + // x86-32. Special handling for x86-64 is implemented. + if (IsVarArg && IsWin64) return false; // Don't know about inalloca yet. - if (CS.hasInAllocaArgument()) + if (CLI.CS && CLI.CS->hasInAllocaArgument()) return false; // Fast-isel doesn't know about callee-pop yet. - if (X86::isCalleePop(CC, Subtarget->is64Bit(), isVarArg, + if (X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg, TM.Options.GuaranteedTailCallOpt)) return false; - // Check whether the function can return without sret-demotion. - SmallVector Outs; - GetReturnInfo(I->getType(), CS.getAttributes(), Outs, TLI); - bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(), - *FuncInfo.MF, FTy->isVarArg(), - Outs, FTy->getContext()); - if (!CanLowerReturn) - return false; + SmallVector OutVTs; + SmallVector ArgRegs; - // Materialize callee address in a register. FIXME: GV address can be - // handled with a CALLpcrel32 instead. - X86AddressMode CalleeAM; - if (!X86SelectCallAddress(Callee, CalleeAM)) - return false; - unsigned CalleeOp = 0; - const GlobalValue *GV = nullptr; - if (CalleeAM.GV != nullptr) { - GV = CalleeAM.GV; - } else if (CalleeAM.Base.Reg != 0) { - CalleeOp = CalleeAM.Base.Reg; - } else - return false; - - // Deal with call operands first. - SmallVector ArgVals; - SmallVector Args; - SmallVector ArgVTs; - SmallVector ArgFlags; - unsigned arg_size = CS.arg_size(); - Args.reserve(arg_size); - ArgVals.reserve(arg_size); - ArgVTs.reserve(arg_size); - ArgFlags.reserve(arg_size); - for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); - i != e; ++i) { - // If we're lowering a mem intrinsic instead of a regular call, skip the - // last two arguments, which should not passed to the underlying functions. - if (MemIntName && e-i <= 2) - break; - Value *ArgVal = *i; - ISD::ArgFlagsTy Flags; - unsigned AttrInd = i - CS.arg_begin() + 1; - if (CS.paramHasAttr(AttrInd, Attribute::SExt)) - Flags.setSExt(); - if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) - Flags.setZExt(); - - if (CS.paramHasAttr(AttrInd, Attribute::ByVal)) { - PointerType *Ty = cast(ArgVal->getType()); - Type *ElementTy = Ty->getElementType(); - unsigned FrameSize = DL.getTypeAllocSize(ElementTy); - unsigned FrameAlign = CS.getParamAlignment(AttrInd); - if (!FrameAlign) - FrameAlign = TLI.getByValTypeAlignment(ElementTy); - Flags.setByVal(); - Flags.setByValSize(FrameSize); - Flags.setByValAlign(FrameAlign); - if (!IsMemcpySmall(FrameSize)) - return false; - } - - if (CS.paramHasAttr(AttrInd, Attribute::InReg)) - Flags.setInReg(); - if (CS.paramHasAttr(AttrInd, Attribute::Nest)) - Flags.setNest(); - - // If this is an i1/i8/i16 argument, promote to i32 to avoid an extra - // instruction. This is safe because it is common to all fastisel supported - // calling conventions on x86. - if (ConstantInt *CI = dyn_cast(ArgVal)) { - if (CI->getBitWidth() == 1 || CI->getBitWidth() == 8 || - CI->getBitWidth() == 16) { + // If this is a constant i1/i8/i16 argument, promote to i32 to avoid an extra + // instruction. This is safe because it is common to all FastISel supported + // calling conventions on x86. + for (int i = 0, e = OutVals.size(); i != e; ++i) { + Value *&Val = OutVals[i]; + ISD::ArgFlagsTy Flags = OutFlags[i]; + if (auto *CI = dyn_cast(Val)) { + if (CI->getBitWidth() < 32) { if (Flags.isSExt()) - ArgVal = ConstantExpr::getSExt(CI,Type::getInt32Ty(CI->getContext())); + Val = ConstantExpr::getSExt(CI, Type::getInt32Ty(CI->getContext())); else - ArgVal = ConstantExpr::getZExt(CI,Type::getInt32Ty(CI->getContext())); + Val = ConstantExpr::getZExt(CI, Type::getInt32Ty(CI->getContext())); } } - unsigned ArgReg; - // Passing bools around ends up doing a trunc to i1 and passing it. // Codegen this as an argument + "and 1". - if (ArgVal->getType()->isIntegerTy(1) && isa(ArgVal) && - cast(ArgVal)->getParent() == I->getParent() && - ArgVal->hasOneUse()) { - ArgVal = cast(ArgVal)->getOperand(0); - ArgReg = getRegForValue(ArgVal); - if (ArgReg == 0) return false; - - MVT ArgVT; - if (!isTypeLegal(ArgVal->getType(), ArgVT)) return false; - - ArgReg = FastEmit_ri(ArgVT, ArgVT, ISD::AND, ArgReg, - ArgVal->hasOneUse(), 1); + MVT VT; + auto *TI = dyn_cast(Val); + unsigned ResultReg; + if (TI && TI->getType()->isIntegerTy(1) && CLI.CS && + (TI->getParent() == CLI.CS->getInstruction()->getParent()) && + TI->hasOneUse()) { + Value *PrevVal = TI->getOperand(0); + ResultReg = getRegForValue(PrevVal); + + if (!ResultReg) + return false; + + if (!isTypeLegal(PrevVal->getType(), VT)) + return false; + + ResultReg = + fastEmit_ri(VT, VT, ISD::AND, ResultReg, hasTrivialKill(PrevVal), 1); } else { - ArgReg = getRegForValue(ArgVal); + if (!isTypeLegal(Val->getType(), VT)) + return false; + ResultReg = getRegForValue(Val); } - if (ArgReg == 0) return false; - - Type *ArgTy = ArgVal->getType(); - MVT ArgVT; - if (!isTypeLegal(ArgTy, ArgVT)) - return false; - if (ArgVT == MVT::x86mmx) + if (!ResultReg) return false; - unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy); - Flags.setOrigAlign(OriginalAlignment); - Args.push_back(ArgReg); - ArgVals.push_back(ArgVal); - ArgVTs.push_back(ArgVT); - ArgFlags.push_back(Flags); + ArgRegs.push_back(ResultReg); + OutVTs.push_back(VT); } // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; - CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, - I->getParent()->getContext()); + CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, CLI.RetTy->getContext()); // Allocate shadow area for Win64 - if (isWin64) + if (IsWin64) CCInfo.AllocateStack(32, 8); - CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_X86); + CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86); // Get a count of how many bytes are to be pushed on the stack. - unsigned NumBytes = CCInfo.getNextStackOffset(); + unsigned NumBytes = CCInfo.getAlignedCallFrameSize(); // Issue CALLSEQ_START unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown)) - .addImm(NumBytes); + .addImm(NumBytes).addImm(0); - // Process argument: walk the register/memloc assignments, inserting - // copies / loads. - SmallVector RegArgs; + // Walk the register/memloc assignments, inserting copies/loads. + const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo(); for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { - CCValAssign &VA = ArgLocs[i]; - unsigned Arg = Args[VA.getValNo()]; - EVT ArgVT = ArgVTs[VA.getValNo()]; + CCValAssign const &VA = ArgLocs[i]; + const Value *ArgVal = OutVals[VA.getValNo()]; + MVT ArgVT = OutVTs[VA.getValNo()]; + + if (ArgVT == MVT::x86mmx) + return false; + + unsigned ArgReg = ArgRegs[VA.getValNo()]; // Promote the value if needed. switch (VA.getLocInfo()) { @@ -2868,8 +2960,8 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { case CCValAssign::SExt: { assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() && "Unexpected extend"); - bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), - Arg, ArgVT, Arg); + bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg, + ArgVT, ArgReg); assert(Emitted && "Failed to emit a sext!"); (void)Emitted; ArgVT = VA.getLocVT(); break; @@ -2877,8 +2969,8 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { case CCValAssign::ZExt: { assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() && "Unexpected extend"); - bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), - Arg, ArgVT, Arg); + bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg, + ArgVT, ArgReg); assert(Emitted && "Failed to emit a zext!"); (void)Emitted; ArgVT = VA.getLocVT(); break; @@ -2886,66 +2978,75 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { case CCValAssign::AExt: { assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() && "Unexpected extend"); - bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), - Arg, ArgVT, Arg); + bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), ArgReg, + ArgVT, ArgReg); if (!Emitted) - Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), - Arg, ArgVT, Arg); + Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg, + ArgVT, ArgReg); if (!Emitted) - Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), - Arg, ArgVT, Arg); + Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg, + ArgVT, ArgReg); assert(Emitted && "Failed to emit a aext!"); (void)Emitted; ArgVT = VA.getLocVT(); break; } case CCValAssign::BCvt: { - unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), VA.getLocVT(), - ISD::BITCAST, Arg, /*TODO: Kill=*/false); - assert(BC != 0 && "Failed to emit a bitcast!"); - Arg = BC; + ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg, + /*TODO: Kill=*/false); + assert(ArgReg && "Failed to emit a bitcast!"); ArgVT = VA.getLocVT(); break; } - case CCValAssign::VExt: + case CCValAssign::VExt: // VExt has not been implemented, so this should be impossible to reach // for now. However, fallback to Selection DAG isel once implemented. return false; + case CCValAssign::AExtUpper: + case CCValAssign::SExtUpper: + case CCValAssign::ZExtUpper: + case CCValAssign::FPExt: + llvm_unreachable("Unexpected loc info!"); case CCValAssign::Indirect: // FIXME: Indirect doesn't need extending, but fast-isel doesn't fully // support this. return false; - case CCValAssign::FPExt: - llvm_unreachable("Unexpected loc info!"); } if (VA.isRegLoc()) { BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg); - RegArgs.push_back(VA.getLocReg()); + TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg); + OutRegs.push_back(VA.getLocReg()); } else { + assert(VA.isMemLoc()); + + // Don't emit stores for undef values. + if (isa(ArgVal)) + continue; + unsigned LocMemOffset = VA.getLocMemOffset(); X86AddressMode AM; - const X86RegisterInfo *RegInfo = static_cast( - getTargetMachine()->getRegisterInfo()); AM.Base.Reg = RegInfo->getStackRegister(); AM.Disp = LocMemOffset; - const Value *ArgVal = ArgVals[VA.getValNo()]; - ISD::ArgFlagsTy Flags = ArgFlags[VA.getValNo()]; - + ISD::ArgFlagsTy Flags = OutFlags[VA.getValNo()]; + unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType()); + MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( + MachinePointerInfo::getStack(*FuncInfo.MF, LocMemOffset), + MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment); if (Flags.isByVal()) { X86AddressMode SrcAM; - SrcAM.Base.Reg = Arg; - bool Res = TryEmitSmallMemcpy(AM, SrcAM, Flags.getByValSize()); - assert(Res && "memcpy length already checked!"); (void)Res; + SrcAM.Base.Reg = ArgReg; + if (!TryEmitSmallMemcpy(AM, SrcAM, Flags.getByValSize())) + return false; } else if (isa(ArgVal) || isa(ArgVal)) { // If this is a really simple value, emit this with the Value* version // of X86FastEmitStore. If it isn't simple, we don't want to do this, // as it can cause us to reevaluate the argument. - if (!X86FastEmitStore(ArgVT, ArgVal, AM)) + if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO)) return false; } else { - if (!X86FastEmitStore(ArgVT, Arg, /*ValIsKill=*/false, AM)) + bool ValIsKill = hasTrivialKill(ArgVal); + if (!X86FastEmitStore(ArgVT, ArgReg, ValIsKill, AM, MMO)) return false; } } @@ -2959,37 +3060,53 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base); } - if (Subtarget->is64Bit() && isVarArg && !isWin64) { + if (Is64Bit && IsVarArg && !IsWin64) { + // From AMD64 ABI document: + // For calls that may call functions that use varargs or stdargs + // (prototype-less calls or calls to functions containing ellipsis (...) in + // the declaration) %al is used as hidden argument to specify the number + // of SSE registers used. The contents of %al do not need to match exactly + // the number of registers, but must be an ubound on the number of SSE + // registers used and is in the range 0 - 8 inclusive. + // Count the number of XMM registers allocated. static const MCPhysReg XMMArgRegs[] = { X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 }; - unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8); + unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs); + assert((Subtarget->hasSSE1() || !NumXMMRegs) + && "SSE registers cannot be used when SSE is disabled"); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri), X86::AL).addImm(NumXMMRegs); } + // Materialize callee address in a register. FIXME: GV address can be + // handled with a CALLpcrel32 instead. + X86AddressMode CalleeAM; + if (!X86SelectCallAddress(Callee, CalleeAM)) + return false; + + unsigned CalleeOp = 0; + const GlobalValue *GV = nullptr; + if (CalleeAM.GV != nullptr) { + GV = CalleeAM.GV; + } else if (CalleeAM.Base.Reg != 0) { + CalleeOp = CalleeAM.Base.Reg; + } else + return false; + // Issue the call. MachineInstrBuilder MIB; if (CalleeOp) { // Register-indirect call. - unsigned CallOpc; - if (Subtarget->is64Bit()) - CallOpc = X86::CALL64r; - else - CallOpc = X86::CALL32r; + unsigned CallOpc = Is64Bit ? X86::CALL64r : X86::CALL32r; MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc)) .addReg(CalleeOp); - } else { // Direct call. assert(GV && "Not a direct call"); - unsigned CallOpc; - if (Subtarget->is64Bit()) - CallOpc = X86::CALL64pcrel32; - else - CallOpc = X86::CALLpcrel32; + unsigned CallOpc = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32; // See if we need any target-specific flags on the GV operand. unsigned char OpFlags = 0; @@ -3003,7 +3120,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) { OpFlags = X86II::MO_PLT; } else if (Subtarget->isPICStyleStubAny() && - (GV->isDeclaration() || GV->isWeakForLinker()) && + !GV->isStrongDefinitionForLinker() && (!Subtarget->getTargetTriple().isMacOSX() || Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) { // PC-relative references to external symbols should go through $stub, @@ -3012,92 +3129,72 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { OpFlags = X86II::MO_DARWIN_STUB; } - MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc)); - if (MemIntName) - MIB.addExternalSymbol(MemIntName, OpFlags); + if (Symbol) + MIB.addSym(Symbol, OpFlags); else MIB.addGlobalAddress(GV, 0, OpFlags); } - // Add a register mask with the call-preserved registers. + // Add a register mask operand representing the call-preserved registers. // Proper defs for return values will be added by setPhysRegsDeadExcept(). - MIB.addRegMask(TRI.getCallPreservedMask(CS.getCallingConv())); + MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC)); // Add an implicit use GOT pointer in EBX. if (Subtarget->isPICStyleGOT()) MIB.addReg(X86::EBX, RegState::Implicit); - if (Subtarget->is64Bit() && isVarArg && !isWin64) + if (Is64Bit && IsVarArg && !IsWin64) MIB.addReg(X86::AL, RegState::Implicit); // Add implicit physical register uses to the call. - for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) - MIB.addReg(RegArgs[i], RegState::Implicit); + for (auto Reg : OutRegs) + MIB.addReg(Reg, RegState::Implicit); // Issue CALLSEQ_END + unsigned NumBytesForCalleeToPop = + computeBytesPoppedByCallee(Subtarget, CC, CLI.CS); unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); - const unsigned NumBytesCallee = computeBytesPoppedByCallee(*Subtarget, CS); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp)) - .addImm(NumBytes).addImm(NumBytesCallee); - - // Build info for return calling conv lowering code. - // FIXME: This is practically a copy-paste from TargetLowering::LowerCallTo. - SmallVector Ins; - SmallVector RetTys; - ComputeValueVTs(TLI, I->getType(), RetTys); - for (unsigned i = 0, e = RetTys.size(); i != e; ++i) { - EVT VT = RetTys[i]; - MVT RegisterVT = TLI.getRegisterType(I->getParent()->getContext(), VT); - unsigned NumRegs = TLI.getNumRegisters(I->getParent()->getContext(), VT); - for (unsigned j = 0; j != NumRegs; ++j) { - ISD::InputArg MyFlags; - MyFlags.VT = RegisterVT; - MyFlags.Used = !CS.getInstruction()->use_empty(); - if (CS.paramHasAttr(0, Attribute::SExt)) - MyFlags.Flags.setSExt(); - if (CS.paramHasAttr(0, Attribute::ZExt)) - MyFlags.Flags.setZExt(); - if (CS.paramHasAttr(0, Attribute::InReg)) - MyFlags.Flags.setInReg(); - Ins.push_back(MyFlags); - } - } + .addImm(NumBytes).addImm(NumBytesForCalleeToPop); // Now handle call return values. - SmallVector UsedRegs; SmallVector RVLocs; - CCState CCRetInfo(CC, false, *FuncInfo.MF, TM, RVLocs, - I->getParent()->getContext()); - unsigned ResultReg = FuncInfo.CreateRegs(I->getType()); + CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs, + CLI.RetTy->getContext()); CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86); + + // Copy all of the result registers out of their specified physreg. + unsigned ResultReg = FuncInfo.CreateRegs(CLI.RetTy); for (unsigned i = 0; i != RVLocs.size(); ++i) { - EVT CopyVT = RVLocs[i].getValVT(); + CCValAssign &VA = RVLocs[i]; + EVT CopyVT = VA.getValVT(); unsigned CopyReg = ResultReg + i; - // If this is a call to a function that returns an fp value on the x87 fp - // stack, but where we prefer to use the value in xmm registers, copy it - // out as F80 and use a truncate to move it from fp stack reg to xmm reg. - if ((RVLocs[i].getLocReg() == X86::ST0 || - RVLocs[i].getLocReg() == X86::ST1)) { - if (isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) { - CopyVT = MVT::f80; - CopyReg = createResultReg(&X86::RFP80RegClass); - } - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(X86::FpPOP_RETVAL), CopyReg); - } else { - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(TargetOpcode::COPY), - CopyReg).addReg(RVLocs[i].getLocReg()); - UsedRegs.push_back(RVLocs[i].getLocReg()); + // If this is x86-64, and we disabled SSE, we can't return FP values + if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) && + ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) { + report_fatal_error("SSE register return with SSE disabled"); } - if (CopyVT != RVLocs[i].getValVT()) { - // Round the F80 the right size, which also moves to the appropriate xmm - // register. This is accomplished by storing the F80 value in memory and - // then loading it back. Ewww... - EVT ResVT = RVLocs[i].getValVT(); + // If we prefer to use the value in xmm registers, copy it out as f80 and + // use a truncate to move it from fp stack reg to xmm reg. + if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) && + isScalarFPTypeInSSEReg(VA.getValVT())) { + CopyVT = MVT::f80; + CopyReg = createResultReg(&X86::RFP80RegClass); + } + + // Copy out the result. + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), CopyReg).addReg(VA.getLocReg()); + InRegs.push_back(VA.getLocReg()); + + // Round the f80 to the right size, which also moves it to the appropriate + // xmm register. This is accomplished by storing the f80 value in memory + // and then loading it back. + if (CopyVT != VA.getValVT()) { + EVT ResVT = VA.getValVT(); unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64; unsigned MemSize = ResVT.getSizeInBits()/8; int FI = MFI.CreateStackObject(MemSize, MemSize, false); @@ -3110,18 +3207,15 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { } } - if (RVLocs.size()) - UpdateValueMap(I, ResultReg, RVLocs.size()); - - // Set all unused physreg defs as dead. - static_cast(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI); + CLI.ResultReg = ResultReg; + CLI.NumResultRegs = RVLocs.size(); + CLI.Call = MIB; return true; } - bool -X86FastISel::TargetSelectInstruction(const Instruction *I) { +X86FastISel::fastSelectInstruction(const Instruction *I) { switch (I->getOpcode()) { default: break; case Instruction::Load: @@ -3137,8 +3231,6 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) { return X86SelectZExt(I); case Instruction::Br: return X86SelectBranch(I); - case Instruction::Call: - return X86SelectCall(I); case Instruction::LShr: case Instruction::AShr: case Instruction::Shl: @@ -3156,17 +3248,43 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) { return X86SelectFPExt(I); case Instruction::FPTrunc: return X86SelectFPTrunc(I); + case Instruction::SIToFP: + return X86SelectSIToFP(I); case Instruction::IntToPtr: // Deliberate fall-through. case Instruction::PtrToInt: { - EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); - EVT DstVT = TLI.getValueType(I->getType()); + EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType()); + EVT DstVT = TLI.getValueType(DL, I->getType()); if (DstVT.bitsGT(SrcVT)) return X86SelectZExt(I); if (DstVT.bitsLT(SrcVT)) return X86SelectTrunc(I); unsigned Reg = getRegForValue(I->getOperand(0)); if (Reg == 0) return false; - UpdateValueMap(I, Reg); + updateValueMap(I, Reg); + return true; + } + case Instruction::BitCast: { + // Select SSE2/AVX bitcasts between 128/256 bit vector types. + if (!Subtarget->hasSSE2()) + return false; + + EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType()); + EVT DstVT = TLI.getValueType(DL, I->getType()); + + if (!SrcVT.isSimple() || !DstVT.isSimple()) + return false; + + if (!SrcVT.is128BitVector() && + !(Subtarget->hasAVX() && SrcVT.is256BitVector())) + return false; + + unsigned Reg = getRegForValue(I->getOperand(0)); + if (Reg == 0) + return false; + + // No instruction is needed for conversion. Reuse the register used by + // the fist operand. + updateValueMap(I, Reg); return true; } } @@ -3174,13 +3292,69 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) { return false; } -unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { - MVT VT; - if (!isTypeLegal(C->getType(), VT)) +unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) { + if (VT > MVT::i64) return 0; + uint64_t Imm = CI->getZExtValue(); + if (Imm == 0) { + unsigned SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass); + switch (VT.SimpleTy) { + default: llvm_unreachable("Unexpected value type"); + case MVT::i1: + case MVT::i8: + return fastEmitInst_extractsubreg(MVT::i8, SrcReg, /*Kill=*/true, + X86::sub_8bit); + case MVT::i16: + return fastEmitInst_extractsubreg(MVT::i16, SrcReg, /*Kill=*/true, + X86::sub_16bit); + case MVT::i32: + return SrcReg; + case MVT::i64: { + unsigned ResultReg = createResultReg(&X86::GR64RegClass); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg) + .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit); + return ResultReg; + } + } + } + + unsigned Opc = 0; + switch (VT.SimpleTy) { + default: llvm_unreachable("Unexpected value type"); + case MVT::i1: VT = MVT::i8; // fall-through + case MVT::i8: Opc = X86::MOV8ri; break; + case MVT::i16: Opc = X86::MOV16ri; break; + case MVT::i32: Opc = X86::MOV32ri; break; + case MVT::i64: { + if (isUInt<32>(Imm)) + Opc = X86::MOV32ri; + else if (isInt<32>(Imm)) + Opc = X86::MOV64ri32; + else + Opc = X86::MOV64ri; + break; + } + } + if (VT == MVT::i64 && Opc == X86::MOV32ri) { + unsigned SrcReg = fastEmitInst_i(Opc, &X86::GR32RegClass, Imm); + unsigned ResultReg = createResultReg(&X86::GR64RegClass); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg) + .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit); + return ResultReg; + } + return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm); +} + +unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) { + if (CFP->isNullValue()) + return fastMaterializeFloatZero(CFP); + // Can't handle alternate code models yet. - if (TM.getCodeModel() != CodeModel::Small) + CodeModel::Model CM = TM.getCodeModel(); + if (CM != CodeModel::Small && CM != CodeModel::Large) return 0; // Get opcode and regclass of the output for the given load instruction. @@ -3188,23 +3362,6 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { const TargetRegisterClass *RC = nullptr; switch (VT.SimpleTy) { default: return 0; - case MVT::i8: - Opc = X86::MOV8rm; - RC = &X86::GR8RegClass; - break; - case MVT::i16: - Opc = X86::MOV16rm; - RC = &X86::GR16RegClass; - break; - case MVT::i32: - Opc = X86::MOV32rm; - RC = &X86::GR32RegClass; - break; - case MVT::i64: - // Must be in x86-64 mode. - Opc = X86::MOV64rm; - RC = &X86::GR64RegClass; - break; case MVT::f32: if (X86ScalarSSEf32) { Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm; @@ -3228,39 +3385,11 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { return 0; } - // Materialize addresses with LEA/MOV instructions. - if (isa(C)) { - X86AddressMode AM; - if (X86SelectAddress(C, AM)) { - // If the expression is just a basereg, then we're done, otherwise we need - // to emit an LEA. - if (AM.BaseType == X86AddressMode::RegBase && - AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr) - return AM.Base.Reg; - - unsigned ResultReg = createResultReg(RC); - if (TM.getRelocationModel() == Reloc::Static && - TLI.getPointerTy() == MVT::i64) { - // The displacement code be more than 32 bits away so we need to use - // an instruction with a 64 bit immediate - Opc = X86::MOV64ri; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(Opc), ResultReg).addGlobalAddress(cast(C)); - } else { - Opc = TLI.getPointerTy() == MVT::i32 ? X86::LEA32r : X86::LEA64r; - addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(Opc), ResultReg), AM); - } - return ResultReg; - } - return 0; - } - // MachineConstantPool wants an explicit alignment. - unsigned Align = DL.getPrefTypeAlignment(C->getType()); + unsigned Align = DL.getPrefTypeAlignment(CFP->getType()); if (Align == 0) { - // Alignment of vector types. FIXME! - Align = DL.getTypeAllocSize(C->getType()); + // Alignment of vector types. FIXME! + Align = DL.getTypeAllocSize(CFP->getType()); } // x86-32 PIC requires a PIC base register for constant pools. @@ -3278,23 +3407,91 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { } // Create the load from the constant pool. - unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align); + unsigned CPI = MCP.getConstantPoolIndex(CFP, Align); unsigned ResultReg = createResultReg(RC); + + if (CM == CodeModel::Large) { + unsigned AddrReg = createResultReg(&X86::GR64RegClass); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri), + AddrReg) + .addConstantPoolIndex(CPI, 0, OpFlag); + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Opc), ResultReg); + addDirectMem(MIB, AddrReg); + MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand( + MachinePointerInfo::getConstantPool(*FuncInfo.MF), + MachineMemOperand::MOLoad, DL.getPointerSize(), Align); + MIB->addMemOperand(*FuncInfo.MF, MMO); + return ResultReg; + } + addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg), - MCPOffset, PICBase, OpFlag); - + CPI, PICBase, OpFlag); return ResultReg; } -unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) { +unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) { + // Can't handle alternate code models yet. + if (TM.getCodeModel() != CodeModel::Small) + return 0; + + // Materialize addresses with LEA/MOV instructions. + X86AddressMode AM; + if (X86SelectAddress(GV, AM)) { + // If the expression is just a basereg, then we're done, otherwise we need + // to emit an LEA. + if (AM.BaseType == X86AddressMode::RegBase && + AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr) + return AM.Base.Reg; + + unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); + if (TM.getRelocationModel() == Reloc::Static && + TLI.getPointerTy(DL) == MVT::i64) { + // The displacement code could be more than 32 bits away so we need to use + // an instruction with a 64 bit immediate + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV64ri), + ResultReg) + .addGlobalAddress(GV); + } else { + unsigned Opc = + TLI.getPointerTy(DL) == MVT::i32 + ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r) + : X86::LEA64r; + addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(Opc), ResultReg), AM); + } + return ResultReg; + } + return 0; +} + +unsigned X86FastISel::fastMaterializeConstant(const Constant *C) { + EVT CEVT = TLI.getValueType(DL, C->getType(), true); + + // Only handle simple types. + if (!CEVT.isSimple()) + return 0; + MVT VT = CEVT.getSimpleVT(); + + if (const auto *CI = dyn_cast(C)) + return X86MaterializeInt(CI, VT); + else if (const ConstantFP *CFP = dyn_cast(C)) + return X86MaterializeFP(CFP, VT); + else if (const GlobalValue *GV = dyn_cast(C)) + return X86MaterializeGV(GV, VT); + + return 0; +} + +unsigned X86FastISel::fastMaterializeAlloca(const AllocaInst *C) { // Fail on dynamic allocas. At this point, getRegForValue has already // checked its CSE maps, so if we're here trying to handle a dynamic // alloca, we're not going to succeed. X86SelectAddress has a // check for dynamic allocas, because it's called directly from - // various places, but TargetMaterializeAlloca also needs a check + // various places, but targetMaterializeAlloca also needs a check // in order to avoid recursion between getRegForValue, - // X86SelectAddrss, and TargetMaterializeAlloca. + // X86SelectAddrss, and targetMaterializeAlloca. if (!FuncInfo.StaticAllocaMap.count(C)) return 0; assert(C->isStaticAlloca() && "dynamic alloca in the static alloca map?"); @@ -3302,15 +3499,18 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) { X86AddressMode AM; if (!X86SelectAddress(C, AM)) return 0; - unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; - const TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy()); + unsigned Opc = + TLI.getPointerTy(DL) == MVT::i32 + ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r) + : X86::LEA64r; + const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL)); unsigned ResultReg = createResultReg(RC); addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg), AM); return ResultReg; } -unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) { +unsigned X86FastISel::fastMaterializeFloatZero(const ConstantFP *CF) { MVT VT; if (!isTypeLegal(CF->getType(), VT)) return 0; @@ -3356,7 +3556,7 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, if (!X86SelectAddress(Ptr, AM)) return false; - const X86InstrInfo &XII = (const X86InstrInfo&)TII; + const X86InstrInfo &XII = (const X86InstrInfo &)TII; unsigned Size = DL.getTypeAllocSize(LI->getType()); unsigned Alignment = LI->getAlignment(); @@ -3367,13 +3567,32 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo, SmallVector AddrOps; AM.getFullAddress(AddrOps); - MachineInstr *Result = - XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps, Size, Alignment); + MachineInstr *Result = XII.foldMemoryOperandImpl( + *FuncInfo.MF, MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, Alignment, + /*AllowCommute=*/true); if (!Result) return false; + // The index register could be in the wrong register class. Unfortunately, + // foldMemoryOperandImpl could have commuted the instruction so its not enough + // to just look at OpNo + the offset to the index reg. We actually need to + // scan the instruction to find the index reg and see if its the correct reg + // class. + unsigned OperandNo = 0; + for (MachineInstr::mop_iterator I = Result->operands_begin(), + E = Result->operands_end(); I != E; ++I, ++OperandNo) { + MachineOperand &MO = *I; + if (!MO.isReg() || MO.isDef() || MO.getReg() != AM.IndexReg) + continue; + // Found the index reg, now try to rewrite it. + unsigned IndexReg = constrainOperandRegClass(Result->getDesc(), + MO.getReg(), OperandNo); + if (IndexReg == MO.getReg()) + continue; + MO.setReg(IndexReg); + } + Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI)); - FuncInfo.MBB->insert(FuncInfo.InsertPt, Result); MI->eraseFromParent(); return true; }