X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FSelectionDAG%2FFastISel.cpp;h=dce3389b014098d7ac689680c49b7b3e6d6c233b;hb=50822e618ca30b8a004df66de9f84166427a1b57;hp=132c42ef0d71eb951f454831b4651a66d2ce107b;hpb=f81eca0ab908fdcf98ae0efaa75acccc8ba40dc2;p=oota-llvm.git diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp index 132c42ef0d7..ab3dbfa40bc 100644 --- a/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -39,23 +39,130 @@ // //===----------------------------------------------------------------------===// -#include "llvm/Function.h" -#include "llvm/GlobalVariable.h" -#include "llvm/Instructions.h" -#include "llvm/IntrinsicInst.h" +#include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/FastISel.h" +#include "llvm/ADT/Optional.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/Analysis/BranchProbabilityInfo.h" +#include "llvm/Analysis/Loads.h" +#include "llvm/CodeGen/Analysis.h" +#include "llvm/CodeGen/FunctionLoweringInfo.h" +#include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/Analysis/DebugInfo.h" -#include "llvm/Target/TargetData.h" +#include "llvm/CodeGen/StackMaps.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DebugInfo.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/Operator.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Support/ErrorHandling.h" -#include "FunctionLoweringInfo.h" +#include "llvm/Target/TargetSubtargetInfo.h" using namespace llvm; +#define DEBUG_TYPE "isel" + +STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by " + "target-independent selector"); +STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by " + "target-specific selector"); +STATISTIC(NumFastIselDead, "Number of dead insts removed on failure"); + +void FastISel::ArgListEntry::setAttributes(ImmutableCallSite *CS, + unsigned AttrIdx) { + IsSExt = CS->paramHasAttr(AttrIdx, Attribute::SExt); + IsZExt = CS->paramHasAttr(AttrIdx, Attribute::ZExt); + IsInReg = CS->paramHasAttr(AttrIdx, Attribute::InReg); + IsSRet = CS->paramHasAttr(AttrIdx, Attribute::StructRet); + IsNest = CS->paramHasAttr(AttrIdx, Attribute::Nest); + IsByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal); + IsInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca); + IsReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned); + Alignment = CS->getParamAlignment(AttrIdx); +} + +/// Set the current block to which generated machine instructions will be +/// appended, and clear the local CSE map. +void FastISel::startNewBlock() { + LocalValueMap.clear(); + + // Instructions are appended to FuncInfo.MBB. If the basic block already + // contains labels or copies, use the last instruction as the last local + // value. + EmitStartPt = nullptr; + if (!FuncInfo.MBB->empty()) + EmitStartPt = &FuncInfo.MBB->back(); + LastLocalValue = EmitStartPt; +} + +bool FastISel::lowerArguments() { + if (!FuncInfo.CanLowerReturn) + // Fallback to SDISel argument lowering code to deal with sret pointer + // parameter. + return false; + + if (!fastLowerArguments()) + return false; + + // Enter arguments into ValueMap for uses in non-entry BBs. + for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(), + E = FuncInfo.Fn->arg_end(); + I != E; ++I) { + DenseMap::iterator VI = LocalValueMap.find(I); + assert(VI != LocalValueMap.end() && "Missed an argument?"); + FuncInfo.ValueMap[I] = VI->second; + } + return true; +} + +void FastISel::flushLocalValueMap() { + LocalValueMap.clear(); + LastLocalValue = EmitStartPt; + recomputeInsertPt(); + SavedInsertPt = FuncInfo.InsertPt; +} + +bool FastISel::hasTrivialKill(const Value *V) { + // Don't consider constants or arguments to have trivial kills. + const Instruction *I = dyn_cast(V); + if (!I) + return false; + + // No-op casts are trivially coalesced by fast-isel. + if (const auto *Cast = dyn_cast(I)) + if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) && + !hasTrivialKill(Cast->getOperand(0))) + return false; + + // Even the value might have only one use in the LLVM IR, it is possible that + // FastISel might fold the use into another instruction and now there is more + // than one use at the Machine Instruction level. + unsigned Reg = lookUpRegForValue(V); + if (Reg && !MRI.use_empty(Reg)) + return false; + + // GEPs with all zero indices are trivially coalesced by fast-isel. + if (const auto *GEP = dyn_cast(I)) + if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0))) + return false; + + // Only instructions with a single use in the same basic block are considered + // to have trivial kills. + return I->hasOneUse() && + !(I->getOpcode() == Instruction::BitCast || + I->getOpcode() == Instruction::PtrToInt || + I->getOpcode() == Instruction::IntToPtr) && + cast(*I->user_begin())->getParent() == I->getParent(); +} + unsigned FastISel::getRegForValue(const Value *V) { EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true); // Don't handle non-simple values in FastISel. @@ -67,36 +174,54 @@ unsigned FastISel::getRegForValue(const Value *V) { // of whether FastISel can handle them. MVT VT = RealVT.getSimpleVT(); if (!TLI.isTypeLegal(VT)) { - // Promote MVT::i1 to a legal type though, because it's common and easy. - if (VT == MVT::i1) + // Handle integer promotions, though, because they're common and easy. + if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT(); else return 0; } - // Look up the value to see if we already have a register for it. We - // cache values defined by Instructions across blocks, and other values - // only locally. This is because Instructions already have the SSA - // def-dominates-use requirement enforced. - if (ValueMap.count(V)) - return ValueMap[V]; - unsigned Reg = LocalValueMap[V]; - if (Reg != 0) + // Look up the value to see if we already have a register for it. + unsigned Reg = lookUpRegForValue(V); + if (Reg) return Reg; - if (const ConstantInt *CI = dyn_cast(V)) { + // In bottom-up mode, just create the virtual register which will be used + // to hold the value. It will be materialized later. + if (isa(V) && + (!isa(V) || + !FuncInfo.StaticAllocaMap.count(cast(V)))) + return FuncInfo.InitializeRegForValue(V); + + SavePoint SaveInsertPt = enterLocalValueArea(); + + // Materialize the value in a register. Emit any instructions in the + // local value area. + Reg = materializeRegForValue(V, VT); + + leaveLocalValueArea(SaveInsertPt); + + return Reg; +} + +unsigned FastISel::materializeConstant(const Value *V, MVT VT) { + unsigned Reg = 0; + if (const auto *CI = dyn_cast(V)) { if (CI->getValue().getActiveBits() <= 64) - Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue()); - } else if (isa(V)) { - Reg = TargetMaterializeAlloca(cast(V)); - } else if (isa(V)) { + Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue()); + } else if (isa(V)) + Reg = fastMaterializeAlloca(cast(V)); + else if (isa(V)) // Translate this as an integer zero so that it can be // local-CSE'd with actual integer zeros. - Reg = - getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext()))); - } else if (const ConstantFP *CF = dyn_cast(V)) { - // Try to emit the constant directly. - Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF); + Reg = getRegForValue( + Constant::getNullValue(DL.getIntPtrType(V->getContext()))); + else if (const auto *CF = dyn_cast(V)) { + if (CF->isNullValue()) + Reg = fastMaterializeFloatZero(CF); + else + // Try to emit the constant directly. + Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF); if (!Reg) { // Try to emit the constant by using an integer constant with a cast. @@ -106,34 +231,52 @@ unsigned FastISel::getRegForValue(const Value *V) { uint64_t x[2]; uint32_t IntBitWidth = IntVT.getSizeInBits(); bool isExact; - (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true, - APFloat::rmTowardZero, &isExact); + (void)Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true, + APFloat::rmTowardZero, &isExact); if (isExact) { - APInt IntVal(IntBitWidth, 2, x); + APInt IntVal(IntBitWidth, x); unsigned IntegerReg = - getRegForValue(ConstantInt::get(V->getContext(), IntVal)); + getRegForValue(ConstantInt::get(V->getContext(), IntVal)); if (IntegerReg != 0) - Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg); + Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg, + /*Kill=*/false); } } - } else if (const Operator *Op = dyn_cast(V)) { - if (!SelectOperator(Op, Op->getOpcode())) return 0; - Reg = LocalValueMap[Op]; + } else if (const auto *Op = dyn_cast(V)) { + if (!selectOperator(Op, Op->getOpcode())) + if (!isa(Op) || + !fastSelectInstruction(cast(Op))) + return 0; + Reg = lookUpRegForValue(Op); } else if (isa(V)) { Reg = createResultReg(TLI.getRegClassFor(VT)); - BuildMI(MBB, DL, TII.get(TargetOpcode::IMPLICIT_DEF), Reg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::IMPLICIT_DEF), Reg); } - - // If target-independent code couldn't handle the value, give target-specific - // code a try. - if (!Reg && isa(V)) - Reg = TargetMaterializeConstant(cast(V)); - + return Reg; +} + +/// Helper for getRegForValue. This function is called when the value isn't +/// already available in a register and must be materialized with new +/// instructions. +unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) { + unsigned Reg = 0; + // Give the target-specific code a try first. + if (isa(V)) + Reg = fastMaterializeConstant(cast(V)); + + // If target-specific code couldn't or didn't want to handle the value, then + // give target-independent code a try. + if (!Reg) + Reg = materializeConstant(V, VT); + // Don't cache constant materializations in the general ValueMap. // To do so would require tracking what uses they dominate. - if (Reg != 0) + if (Reg) { LocalValueMap[V] = Reg; + LastLocalValue = MRI.getVRegDef(Reg); + } return Reg; } @@ -141,55 +284,100 @@ unsigned FastISel::lookUpRegForValue(const Value *V) { // Look up the value to see if we already have a register for it. We // cache values defined by Instructions across blocks, and other values // only locally. This is because Instructions already have the SSA - // def-dominatess-use requirement enforced. - if (ValueMap.count(V)) - return ValueMap[V]; + // def-dominates-use requirement enforced. + DenseMap::iterator I = FuncInfo.ValueMap.find(V); + if (I != FuncInfo.ValueMap.end()) + return I->second; return LocalValueMap[V]; } -/// UpdateValueMap - Update the value map to include the new mapping for this -/// instruction, or insert an extra copy to get the result in a previous -/// determined register. -/// NOTE: This is only necessary because we might select a block that uses -/// a value before we select the block that defines the value. It might be -/// possible to fix this by selecting blocks in reverse postorder. -unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) { +void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) { if (!isa(I)) { LocalValueMap[I] = Reg; - return Reg; + return; } - - unsigned &AssignedReg = ValueMap[I]; + + unsigned &AssignedReg = FuncInfo.ValueMap[I]; if (AssignedReg == 0) + // Use the new register. AssignedReg = Reg; else if (Reg != AssignedReg) { - const TargetRegisterClass *RegClass = MRI.getRegClass(Reg); - TII.copyRegToReg(*MBB, MBB->end(), AssignedReg, - Reg, RegClass, RegClass); + // Arrange for uses of AssignedReg to be replaced by uses of Reg. + for (unsigned i = 0; i < NumRegs; i++) + FuncInfo.RegFixups[AssignedReg + i] = Reg + i; + + AssignedReg = Reg; } - return AssignedReg; } -unsigned FastISel::getRegForGEPIndex(const Value *Idx) { +std::pair FastISel::getRegForGEPIndex(const Value *Idx) { unsigned IdxN = getRegForValue(Idx); if (IdxN == 0) // Unhandled operand. Halt "fast" selection and bail. - return 0; + return std::pair(0, false); + + bool IdxNIsKill = hasTrivialKill(Idx); // If the index is smaller or larger than intptr_t, truncate or extend it. MVT PtrVT = TLI.getPointerTy(); EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false); - if (IdxVT.bitsLT(PtrVT)) - IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN); - else if (IdxVT.bitsGT(PtrVT)) - IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN); - return IdxN; + if (IdxVT.bitsLT(PtrVT)) { + IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN, + IdxNIsKill); + IdxNIsKill = true; + } else if (IdxVT.bitsGT(PtrVT)) { + IdxN = + fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill); + IdxNIsKill = true; + } + return std::pair(IdxN, IdxNIsKill); } -/// SelectBinaryOp - Select and emit code for a binary operator instruction, -/// which has an opcode which directly corresponds to the given ISD opcode. -/// -bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) { +void FastISel::recomputeInsertPt() { + if (getLastLocalValue()) { + FuncInfo.InsertPt = getLastLocalValue(); + FuncInfo.MBB = FuncInfo.InsertPt->getParent(); + ++FuncInfo.InsertPt; + } else + FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI(); + + // Now skip past any EH_LABELs, which must remain at the beginning. + while (FuncInfo.InsertPt != FuncInfo.MBB->end() && + FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL) + ++FuncInfo.InsertPt; +} + +void FastISel::removeDeadCode(MachineBasicBlock::iterator I, + MachineBasicBlock::iterator E) { + assert(I && E && std::distance(I, E) > 0 && "Invalid iterator!"); + while (I != E) { + MachineInstr *Dead = &*I; + ++I; + Dead->eraseFromParent(); + ++NumFastIselDead; + } + recomputeInsertPt(); +} + +FastISel::SavePoint FastISel::enterLocalValueArea() { + MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt; + DebugLoc OldDL = DbgLoc; + recomputeInsertPt(); + DbgLoc = DebugLoc(); + SavePoint SP = {OldInsertPt, OldDL}; + return SP; +} + +void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) { + if (FuncInfo.InsertPt != FuncInfo.MBB->begin()) + LastLocalValue = std::prev(FuncInfo.InsertPt); + + // Restore the previous insert position. + FuncInfo.InsertPt = OldInsertPt.InsertPt; + DbgLoc = OldInsertPt.DL; +} + +bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) { EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true); if (VT == MVT::Other || !VT.isSimple()) // Unhandled type. Halt "fast" selection and bail. @@ -202,485 +390,1124 @@ bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) { if (!TLI.isTypeLegal(VT)) { // MVT::i1 is special. Allow AND, OR, or XOR because they // don't require additional zeroing, which makes them easy. - if (VT == MVT::i1 && - (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR || - ISDOpcode == ISD::XOR)) + if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR || + ISDOpcode == ISD::XOR)) VT = TLI.getTypeToTransformTo(I->getContext(), VT); else return false; } + // Check if the first operand is a constant, and handle it as "ri". At -O0, + // we don't have anything that canonicalizes operand order. + if (const auto *CI = dyn_cast(I->getOperand(0))) + if (isa(I) && cast(I)->isCommutative()) { + unsigned Op1 = getRegForValue(I->getOperand(1)); + if (!Op1) + return false; + bool Op1IsKill = hasTrivialKill(I->getOperand(1)); + + unsigned ResultReg = + fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill, + CI->getZExtValue(), VT.getSimpleVT()); + if (!ResultReg) + return false; + + // We successfully emitted code for the given LLVM Instruction. + updateValueMap(I, ResultReg); + return true; + } + unsigned Op0 = getRegForValue(I->getOperand(0)); - if (Op0 == 0) - // Unhandled operand. Halt "fast" selection and bail. + if (!Op0) // Unhandled operand. Halt "fast" selection and bail. return false; + bool Op0IsKill = hasTrivialKill(I->getOperand(0)); // Check if the second operand is a constant and handle it appropriately. - if (ConstantInt *CI = dyn_cast(I->getOperand(1))) { - unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(), - ISDOpcode, Op0, CI->getZExtValue()); - if (ResultReg != 0) { - // We successfully emitted code for the given LLVM Instruction. - UpdateValueMap(I, ResultReg); - return true; + if (const auto *CI = dyn_cast(I->getOperand(1))) { + uint64_t Imm = CI->getZExtValue(); + + // Transform "sdiv exact X, 8" -> "sra X, 3". + if (ISDOpcode == ISD::SDIV && isa(I) && + cast(I)->isExact() && isPowerOf2_64(Imm)) { + Imm = Log2_64(Imm); + ISDOpcode = ISD::SRA; + } + + // Transform "urem x, pow2" -> "and x, pow2-1". + if (ISDOpcode == ISD::UREM && isa(I) && + isPowerOf2_64(Imm)) { + --Imm; + ISDOpcode = ISD::AND; } + + unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, + Op0IsKill, Imm, VT.getSimpleVT()); + if (!ResultReg) + return false; + + // We successfully emitted code for the given LLVM Instruction. + updateValueMap(I, ResultReg); + return true; } // Check if the second operand is a constant float. - if (ConstantFP *CF = dyn_cast(I->getOperand(1))) { - unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(), - ISDOpcode, Op0, CF); - if (ResultReg != 0) { + if (const auto *CF = dyn_cast(I->getOperand(1))) { + unsigned ResultReg = fastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(), + ISDOpcode, Op0, Op0IsKill, CF); + if (ResultReg) { // We successfully emitted code for the given LLVM Instruction. - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } } unsigned Op1 = getRegForValue(I->getOperand(1)); - if (Op1 == 0) - // Unhandled operand. Halt "fast" selection and bail. + if (!Op1) // Unhandled operand. Halt "fast" selection and bail. return false; + bool Op1IsKill = hasTrivialKill(I->getOperand(1)); // Now we have both operands in registers. Emit the instruction. - unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(), - ISDOpcode, Op0, Op1); - if (ResultReg == 0) + unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(), + ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill); + if (!ResultReg) // Target-specific code wasn't able to find a machine opcode for // the given ISD opcode and type. Halt "fast" selection and bail. return false; // We successfully emitted code for the given LLVM Instruction. - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } -bool FastISel::SelectGetElementPtr(const User *I) { +bool FastISel::selectGetElementPtr(const User *I) { unsigned N = getRegForValue(I->getOperand(0)); - if (N == 0) - // Unhandled operand. Halt "fast" selection and bail. + if (!N) // Unhandled operand. Halt "fast" selection and bail. return false; - - const Type *Ty = I->getOperand(0)->getType(); + bool NIsKill = hasTrivialKill(I->getOperand(0)); + + // Keep a running tab of the total offset to coalesce multiple N = N + Offset + // into a single N = N + TotalOffset. + uint64_t TotalOffs = 0; + // FIXME: What's a good SWAG number for MaxOffs? + uint64_t MaxOffs = 2048; + Type *Ty = I->getOperand(0)->getType(); MVT VT = TLI.getPointerTy(); - for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1, - E = I->op_end(); OI != E; ++OI) { + for (GetElementPtrInst::const_op_iterator OI = I->op_begin() + 1, + E = I->op_end(); + OI != E; ++OI) { const Value *Idx = *OI; - if (const StructType *StTy = dyn_cast(Ty)) { + if (auto *StTy = dyn_cast(Ty)) { unsigned Field = cast(Idx)->getZExtValue(); if (Field) { // N = N + Offset - uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field); - // FIXME: This can be optimized by combining the add with a - // subsequent one. - N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT); - if (N == 0) - // Unhandled operand. Halt "fast" selection and bail. - return false; + TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field); + if (TotalOffs >= MaxOffs) { + N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); + if (!N) // Unhandled operand. Halt "fast" selection and bail. + return false; + NIsKill = true; + TotalOffs = 0; + } } Ty = StTy->getElementType(Field); } else { Ty = cast(Ty)->getElementType(); // If this is a constant subscript, handle it quickly. - if (const ConstantInt *CI = dyn_cast(Idx)) { - if (CI->getZExtValue() == 0) continue; - uint64_t Offs = - TD.getTypeAllocSize(Ty)*cast(CI)->getSExtValue(); - N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT); - if (N == 0) - // Unhandled operand. Halt "fast" selection and bail. - return false; + if (const auto *CI = dyn_cast(Idx)) { + if (CI->isZero()) + continue; + // N = N + Offset + TotalOffs += + DL.getTypeAllocSize(Ty) * cast(CI)->getSExtValue(); + if (TotalOffs >= MaxOffs) { + N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); + if (!N) // Unhandled operand. Halt "fast" selection and bail. + return false; + NIsKill = true; + TotalOffs = 0; + } continue; } - + if (TotalOffs) { + N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); + if (!N) // Unhandled operand. Halt "fast" selection and bail. + return false; + NIsKill = true; + TotalOffs = 0; + } + // N = N + Idx * ElementSize; - uint64_t ElementSize = TD.getTypeAllocSize(Ty); - unsigned IdxN = getRegForGEPIndex(Idx); - if (IdxN == 0) - // Unhandled operand. Halt "fast" selection and bail. + uint64_t ElementSize = DL.getTypeAllocSize(Ty); + std::pair Pair = getRegForGEPIndex(Idx); + unsigned IdxN = Pair.first; + bool IdxNIsKill = Pair.second; + if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. return false; if (ElementSize != 1) { - IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT); - if (IdxN == 0) - // Unhandled operand. Halt "fast" selection and bail. + IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT); + if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. return false; + IdxNIsKill = true; } - N = FastEmit_rr(VT, VT, ISD::ADD, N, IdxN); - if (N == 0) - // Unhandled operand. Halt "fast" selection and bail. + N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill); + if (!N) // Unhandled operand. Halt "fast" selection and bail. return false; } } + if (TotalOffs) { + N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); + if (!N) // Unhandled operand. Halt "fast" selection and bail. + return false; + } // We successfully emitted code for the given LLVM Instruction. - UpdateValueMap(I, N); + updateValueMap(I, N); + return true; +} + +bool FastISel::addStackMapLiveVars(SmallVectorImpl &Ops, + const CallInst *CI, unsigned StartIdx) { + for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) { + Value *Val = CI->getArgOperand(i); + // Check for constants and encode them with a StackMaps::ConstantOp prefix. + if (const auto *C = dyn_cast(Val)) { + Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp)); + Ops.push_back(MachineOperand::CreateImm(C->getSExtValue())); + } else if (isa(Val)) { + Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp)); + Ops.push_back(MachineOperand::CreateImm(0)); + } else if (auto *AI = dyn_cast(Val)) { + // Values coming from a stack location also require a sepcial encoding, + // but that is added later on by the target specific frame index + // elimination implementation. + auto SI = FuncInfo.StaticAllocaMap.find(AI); + if (SI != FuncInfo.StaticAllocaMap.end()) + Ops.push_back(MachineOperand::CreateFI(SI->second)); + else + return false; + } else { + unsigned Reg = getRegForValue(Val); + if (!Reg) + return false; + Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false)); + } + } + return true; +} + +bool FastISel::selectStackmap(const CallInst *I) { + // void @llvm.experimental.stackmap(i64 , i32 , + // [live variables...]) + assert(I->getCalledFunction()->getReturnType()->isVoidTy() && + "Stackmap cannot return a value."); + + // The stackmap intrinsic only records the live variables (the arguments + // passed to it) and emits NOPS (if requested). Unlike the patchpoint + // intrinsic, this won't be lowered to a function call. This means we don't + // have to worry about calling conventions and target-specific lowering code. + // Instead we perform the call lowering right here. + // + // CALLSEQ_START(0) + // STACKMAP(id, nbytes, ...) + // CALLSEQ_END(0, 0) + // + SmallVector Ops; + + // Add the and constants. + assert(isa(I->getOperand(PatchPointOpers::IDPos)) && + "Expected a constant integer."); + const auto *ID = cast(I->getOperand(PatchPointOpers::IDPos)); + Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue())); + + assert(isa(I->getOperand(PatchPointOpers::NBytesPos)) && + "Expected a constant integer."); + const auto *NumBytes = + cast(I->getOperand(PatchPointOpers::NBytesPos)); + Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue())); + + // Push live variables for the stack map (skipping the first two arguments + // and ). + if (!addStackMapLiveVars(Ops, I, 2)) + return false; + + // We are not adding any register mask info here, because the stackmap doesn't + // clobber anything. + + // Add scratch registers as implicit def and early clobber. + CallingConv::ID CC = I->getCallingConv(); + const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC); + for (unsigned i = 0; ScratchRegs[i]; ++i) + Ops.push_back(MachineOperand::CreateReg( + ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false, + /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true)); + + // Issue CALLSEQ_START + unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown)) + .addImm(0); + + // Issue STACKMAP. + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::STACKMAP)); + for (auto const &MO : Ops) + MIB.addOperand(MO); + + // Issue CALLSEQ_END + unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp)) + .addImm(0) + .addImm(0); + + // Inform the Frame Information that we have a stackmap in this function. + FuncInfo.MF->getFrameInfo()->setHasStackMap(); + return true; } -bool FastISel::SelectCall(const User *I) { - const Function *F = cast(I)->getCalledFunction(); - if (!F) return false; +/// \brief Lower an argument list according to the target calling convention. +/// +/// This is a helper for lowering intrinsics that follow a target calling +/// convention or require stack pointer adjustment. Only a subset of the +/// intrinsic's operands need to participate in the calling convention. +bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx, + unsigned NumArgs, const Value *Callee, + bool ForceRetVoidTy, CallLoweringInfo &CLI) { + ArgListTy Args; + Args.reserve(NumArgs); + + // Populate the argument list. + // Attributes for args start at offset 1, after the return attribute. + ImmutableCallSite CS(CI); + for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1; + ArgI != ArgE; ++ArgI) { + Value *V = CI->getOperand(ArgI); + + assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); + + ArgListEntry Entry; + Entry.Val = V; + Entry.Ty = V->getType(); + Entry.setAttributes(&CS, AttrI); + Args.push_back(Entry); + } + + Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext()) + : CI->getType(); + CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs); + + return lowerCallTo(CLI); +} + +bool FastISel::selectPatchpoint(const CallInst *I) { + // void|i64 @llvm.experimental.patchpoint.void|i64(i64 , + // i32 , + // i8* , + // i32 , + // [Args...], + // [live variables...]) + CallingConv::ID CC = I->getCallingConv(); + bool IsAnyRegCC = CC == CallingConv::AnyReg; + bool HasDef = !I->getType()->isVoidTy(); + Value *Callee = I->getOperand(PatchPointOpers::TargetPos); + + // Get the real number of arguments participating in the call + assert(isa(I->getOperand(PatchPointOpers::NArgPos)) && + "Expected a constant integer."); + const auto *NumArgsVal = + cast(I->getOperand(PatchPointOpers::NArgPos)); + unsigned NumArgs = NumArgsVal->getZExtValue(); + + // Skip the four meta args: , , , + // This includes all meta-operands up to but not including CC. + unsigned NumMetaOpers = PatchPointOpers::CCPos; + assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs && + "Not enough arguments provided to the patchpoint intrinsic"); + + // For AnyRegCC the arguments are lowered later on manually. + unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs; + CallLoweringInfo CLI; + if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI)) + return false; + + assert(CLI.Call && "No call instruction specified."); + + SmallVector Ops; + + // Add an explicit result reg if we use the anyreg calling convention. + if (IsAnyRegCC && HasDef) { + assert(CLI.NumResultRegs == 0 && "Unexpected result register."); + CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64)); + CLI.NumResultRegs = 1; + Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true)); + } + + // Add the and constants. + assert(isa(I->getOperand(PatchPointOpers::IDPos)) && + "Expected a constant integer."); + const auto *ID = cast(I->getOperand(PatchPointOpers::IDPos)); + Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue())); + + assert(isa(I->getOperand(PatchPointOpers::NBytesPos)) && + "Expected a constant integer."); + const auto *NumBytes = + cast(I->getOperand(PatchPointOpers::NBytesPos)); + Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue())); + + // Assume that the callee is a constant address or null pointer. + // FIXME: handle function symbols in the future. + uint64_t CalleeAddr; + if (const auto *C = dyn_cast(Callee)) + CalleeAddr = cast(C->getOperand(0))->getZExtValue(); + else if (const auto *C = dyn_cast(Callee)) { + if (C->getOpcode() == Instruction::IntToPtr) + CalleeAddr = cast(C->getOperand(0))->getZExtValue(); + else + llvm_unreachable("Unsupported ConstantExpr."); + } else if (isa(Callee)) + CalleeAddr = 0; + else + llvm_unreachable("Unsupported callee address."); + + Ops.push_back(MachineOperand::CreateImm(CalleeAddr)); + + // Adjust to account for any arguments that have been passed on + // the stack instead. + unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size(); + Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs)); + + // Add the calling convention + Ops.push_back(MachineOperand::CreateImm((unsigned)CC)); + + // Add the arguments we omitted previously. The register allocator should + // place these in any free register. + if (IsAnyRegCC) { + for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) { + unsigned Reg = getRegForValue(I->getArgOperand(i)); + if (!Reg) + return false; + Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false)); + } + } + + // Push the arguments from the call instruction. + for (auto Reg : CLI.OutRegs) + Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false)); + + // Push live variables for the stack map. + if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs)) + return false; + + // Push the register mask info. + Ops.push_back(MachineOperand::CreateRegMask(TRI.getCallPreservedMask(CC))); + + // Add scratch registers as implicit def and early clobber. + const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC); + for (unsigned i = 0; ScratchRegs[i]; ++i) + Ops.push_back(MachineOperand::CreateReg( + ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false, + /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true)); + + // Add implicit defs (return values). + for (auto Reg : CLI.InRegs) + Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true, + /*IsImpl=*/true)); + + // Insert the patchpoint instruction before the call generated by the target. + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc, + TII.get(TargetOpcode::PATCHPOINT)); + + for (auto &MO : Ops) + MIB.addOperand(MO); + + MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI); + + // Delete the original call instruction. + CLI.Call->eraseFromParent(); + + // Inform the Frame Information that we have a patchpoint in this function. + FuncInfo.MF->getFrameInfo()->setHasPatchPoint(); + + if (CLI.NumResultRegs) + updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs); + return true; +} + +/// Returns an AttributeSet representing the attributes applied to the return +/// value of the given call. +static AttributeSet getReturnAttrs(FastISel::CallLoweringInfo &CLI) { + SmallVector Attrs; + if (CLI.RetSExt) + Attrs.push_back(Attribute::SExt); + if (CLI.RetZExt) + Attrs.push_back(Attribute::ZExt); + if (CLI.IsInReg) + Attrs.push_back(Attribute::InReg); + + return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex, + Attrs); +} + +bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName, + unsigned NumArgs) { + ImmutableCallSite CS(CI); + + PointerType *PT = cast(CS.getCalledValue()->getType()); + FunctionType *FTy = cast(PT->getElementType()); + Type *RetTy = FTy->getReturnType(); + + ArgListTy Args; + Args.reserve(NumArgs); + + // Populate the argument list. + // Attributes for args start at offset 1, after the return attribute. + for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) { + Value *V = CI->getOperand(ArgI); + + assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); + + ArgListEntry Entry; + Entry.Val = V; + Entry.Ty = V->getType(); + Entry.setAttributes(&CS, ArgI + 1); + Args.push_back(Entry); + } + + CallLoweringInfo CLI; + CLI.setCallee(RetTy, FTy, SymName, std::move(Args), CS, NumArgs); + + return lowerCallTo(CLI); +} + +bool FastISel::lowerCallTo(CallLoweringInfo &CLI) { + // Handle the incoming return values from the call. + CLI.clearIns(); + SmallVector RetTys; + ComputeValueVTs(TLI, CLI.RetTy, RetTys); + + SmallVector Outs; + GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI); + + bool CanLowerReturn = TLI.CanLowerReturn( + CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext()); + + // FIXME: sret demotion isn't supported yet - bail out. + if (!CanLowerReturn) + return false; + + for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { + EVT VT = RetTys[I]; + MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT); + unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT); + for (unsigned i = 0; i != NumRegs; ++i) { + ISD::InputArg MyFlags; + MyFlags.VT = RegisterVT; + MyFlags.ArgVT = VT; + MyFlags.Used = CLI.IsReturnValueUsed; + if (CLI.RetSExt) + MyFlags.Flags.setSExt(); + if (CLI.RetZExt) + MyFlags.Flags.setZExt(); + if (CLI.IsInReg) + MyFlags.Flags.setInReg(); + CLI.Ins.push_back(MyFlags); + } + } + + // Handle all of the outgoing arguments. + CLI.clearOuts(); + for (auto &Arg : CLI.getArgs()) { + Type *FinalType = Arg.Ty; + if (Arg.IsByVal) + FinalType = cast(Arg.Ty)->getElementType(); + bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters( + FinalType, CLI.CallConv, CLI.IsVarArg); + + ISD::ArgFlagsTy Flags; + if (Arg.IsZExt) + Flags.setZExt(); + if (Arg.IsSExt) + Flags.setSExt(); + if (Arg.IsInReg) + Flags.setInReg(); + if (Arg.IsSRet) + Flags.setSRet(); + if (Arg.IsByVal) + Flags.setByVal(); + if (Arg.IsInAlloca) { + Flags.setInAlloca(); + // Set the byval flag for CCAssignFn callbacks that don't know about + // inalloca. This way we can know how many bytes we should've allocated + // and how many bytes a callee cleanup function will pop. If we port + // inalloca to more targets, we'll have to add custom inalloca handling in + // the various CC lowering callbacks. + Flags.setByVal(); + } + if (Arg.IsByVal || Arg.IsInAlloca) { + PointerType *Ty = cast(Arg.Ty); + Type *ElementTy = Ty->getElementType(); + unsigned FrameSize = DL.getTypeAllocSize(ElementTy); + // For ByVal, alignment should come from FE. BE will guess if this info is + // not there, but there are cases it cannot get right. + unsigned FrameAlign = Arg.Alignment; + if (!FrameAlign) + FrameAlign = TLI.getByValTypeAlignment(ElementTy); + Flags.setByValSize(FrameSize); + Flags.setByValAlign(FrameAlign); + } + if (Arg.IsNest) + Flags.setNest(); + if (NeedsRegBlock) + Flags.setInConsecutiveRegs(); + unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty); + Flags.setOrigAlign(OriginalAlignment); + + CLI.OutVals.push_back(Arg.Val); + CLI.OutFlags.push_back(Flags); + } + + if (!fastLowerCall(CLI)) + return false; + + // Set all unused physreg defs as dead. + assert(CLI.Call && "No call instruction specified."); + CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI); + + if (CLI.NumResultRegs && CLI.CS) + updateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs); + + return true; +} + +bool FastISel::lowerCall(const CallInst *CI) { + ImmutableCallSite CS(CI); + + PointerType *PT = cast(CS.getCalledValue()->getType()); + FunctionType *FuncTy = cast(PT->getElementType()); + Type *RetTy = FuncTy->getReturnType(); + + ArgListTy Args; + ArgListEntry Entry; + Args.reserve(CS.arg_size()); + + for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); + i != e; ++i) { + Value *V = *i; + + // Skip empty types + if (V->getType()->isEmptyTy()) + continue; + + Entry.Val = V; + Entry.Ty = V->getType(); + + // Skip the first return-type Attribute to get to params. + Entry.setAttributes(&CS, i - CS.arg_begin() + 1); + Args.push_back(Entry); + } + + // Check if target-independent constraints permit a tail call here. + // Target-dependent constraints are checked within fastLowerCall. + bool IsTailCall = CI->isTailCall(); + if (IsTailCall && !isInTailCallPosition(CS, TM)) + IsTailCall = false; + + CallLoweringInfo CLI; + CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS) + .setTailCall(IsTailCall); + + return lowerCallTo(CLI); +} + +bool FastISel::selectCall(const User *I) { + const CallInst *Call = cast(I); + + // Handle simple inline asms. + if (const InlineAsm *IA = dyn_cast(Call->getCalledValue())) { + // If the inline asm has side effects, then make sure that no local value + // lives across by flushing the local value map. + if (IA->hasSideEffects()) + flushLocalValueMap(); + + // Don't attempt to handle constraints. + if (!IA->getConstraintString().empty()) + return false; + + unsigned ExtraInfo = 0; + if (IA->hasSideEffects()) + ExtraInfo |= InlineAsm::Extra_HasSideEffects; + if (IA->isAlignStack()) + ExtraInfo |= InlineAsm::Extra_IsAlignStack; - // Handle selected intrinsic function calls. - unsigned IID = F->getIntrinsicID(); - switch (IID) { - default: break; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::INLINEASM)) + .addExternalSymbol(IA->getAsmString().c_str()) + .addImm(ExtraInfo); + return true; + } + + MachineModuleInfo &MMI = FuncInfo.MF->getMMI(); + ComputeUsesVAFloatArgument(*Call, &MMI); + + // Handle intrinsic function calls. + if (const auto *II = dyn_cast(Call)) + return selectIntrinsicCall(II); + + // Usually, it does not make sense to initialize a value, + // make an unrelated function call and use the value, because + // it tends to be spilled on the stack. So, we move the pointer + // to the last local value to the beginning of the block, so that + // all the values which have already been materialized, + // appear after the call. It also makes sense to skip intrinsics + // since they tend to be inlined. + flushLocalValueMap(); + + return lowerCall(Call); +} + +bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) { + switch (II->getIntrinsicID()) { + default: + break; + // At -O0 we don't care about the lifetime intrinsics. + case Intrinsic::lifetime_start: + case Intrinsic::lifetime_end: + // The donothing intrinsic does, well, nothing. + case Intrinsic::donothing: + return true; case Intrinsic::dbg_declare: { - const DbgDeclareInst *DI = cast(I); - if (!DIDescriptor::ValidDebugInfo(DI->getVariable(), CodeGenOpt::None) || - !MF.getMMI().hasDebugInfo()) + const DbgDeclareInst *DI = cast(II); + DIVariable DIVar(DI->getVariable()); + assert((!DIVar || DIVar.isVariable()) && + "Variable in DbgDeclareInst should be either null or a DIVariable."); + if (!DIVar || !FuncInfo.MF->getMMI().hasDebugInfo()) { + DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); return true; + } const Value *Address = DI->getAddress(); - if (!Address) + if (!Address || isa(Address)) { + DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); return true; - if (isa(Address)) - return true; - const AllocaInst *AI = dyn_cast(Address); - // Don't handle byval struct arguments or VLAs, for example. - if (!AI) break; - DenseMap::iterator SI = - StaticAllocaMap.find(AI); - if (SI == StaticAllocaMap.end()) break; // VLAs. - int FI = SI->second; - if (!DI->getDebugLoc().isUnknown()) - MF.getMMI().setVariableDbgInfo(DI->getVariable(), FI, DI->getDebugLoc()); - - // Building the map above is target independent. Generating DBG_VALUE - // inline is target dependent; do this now. - (void)TargetSelectInstruction(cast(I)); + } + + unsigned Offset = 0; + Optional Op; + if (const auto *Arg = dyn_cast(Address)) + // Some arguments' frame index is recorded during argument lowering. + Offset = FuncInfo.getArgumentFrameIndex(Arg); + if (Offset) + Op = MachineOperand::CreateFI(Offset); + if (!Op) + if (unsigned Reg = lookUpRegForValue(Address)) + Op = MachineOperand::CreateReg(Reg, false); + + // If we have a VLA that has a "use" in a metadata node that's then used + // here but it has no other uses, then we have a problem. E.g., + // + // int foo (const int *x) { + // char a[*x]; + // return 0; + // } + // + // If we assign 'a' a vreg and fast isel later on has to use the selection + // DAG isel, it will want to copy the value to the vreg. However, there are + // no uses, which goes counter to what selection DAG isel expects. + if (!Op && !Address->use_empty() && isa(Address) && + (!isa(Address) || + !FuncInfo.StaticAllocaMap.count(cast(Address)))) + Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address), + false); + + if (Op) { + if (Op->isReg()) { + Op->setIsDebug(true); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0, + DI->getVariable(), DI->getExpression()); + } else + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::DBG_VALUE)) + .addOperand(*Op) + .addImm(0) + .addMetadata(DI->getVariable()) + .addMetadata(DI->getExpression()); + } else { + // We can't yet handle anything else here because it would require + // generating code, thus altering codegen because of debug info. + DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); + } return true; } case Intrinsic::dbg_value: { // This form of DBG_VALUE is target-independent. - const DbgValueInst *DI = cast(I); - const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); + const DbgValueInst *DI = cast(II); + const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); const Value *V = DI->getValue(); if (!V) { // Currently the optimizer can produce this; insert an undef to // help debugging. Probably the optimizer should not do this. - BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()). - addMetadata(DI->getVariable()); - } else if (const ConstantInt *CI = dyn_cast(V)) { - BuildMI(MBB, DL, II).addImm(CI->getZExtValue()).addImm(DI->getOffset()). - addMetadata(DI->getVariable()); - } else if (const ConstantFP *CF = dyn_cast(V)) { - BuildMI(MBB, DL, II).addFPImm(CF).addImm(DI->getOffset()). - addMetadata(DI->getVariable()); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + .addReg(0U) + .addImm(DI->getOffset()) + .addMetadata(DI->getVariable()) + .addMetadata(DI->getExpression()); + } else if (const auto *CI = dyn_cast(V)) { + if (CI->getBitWidth() > 64) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + .addCImm(CI) + .addImm(DI->getOffset()) + .addMetadata(DI->getVariable()) + .addMetadata(DI->getExpression()); + else + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + .addImm(CI->getZExtValue()) + .addImm(DI->getOffset()) + .addMetadata(DI->getVariable()) + .addMetadata(DI->getExpression()); + } else if (const auto *CF = dyn_cast(V)) { + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + .addFPImm(CF) + .addImm(DI->getOffset()) + .addMetadata(DI->getVariable()) + .addMetadata(DI->getExpression()); } else if (unsigned Reg = lookUpRegForValue(V)) { - BuildMI(MBB, DL, II).addReg(Reg, RegState::Debug).addImm(DI->getOffset()). - addMetadata(DI->getVariable()); + // FIXME: This does not handle register-indirect values at offset 0. + bool IsIndirect = DI->getOffset() != 0; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg, + DI->getOffset(), DI->getVariable(), DI->getExpression()); } else { // We can't yet handle anything else here because it would require // generating code, thus altering codegen because of debug info. - // Insert an undef so we can see what we dropped. - BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()). - addMetadata(DI->getVariable()); - } + DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); + } return true; } - case Intrinsic::eh_exception: { - EVT VT = TLI.getValueType(I->getType()); - switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) { - default: break; - case TargetLowering::Expand: { - assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!"); - unsigned Reg = TLI.getExceptionAddressRegister(); - const TargetRegisterClass *RC = TLI.getRegClassFor(VT); - unsigned ResultReg = createResultReg(RC); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - Reg, RC, RC); - assert(InsertedCopy && "Can't copy address registers!"); - InsertedCopy = InsertedCopy; - UpdateValueMap(I, ResultReg); - return true; - } - } - break; + case Intrinsic::objectsize: { + ConstantInt *CI = cast(II->getArgOperand(1)); + unsigned long long Res = CI->isZero() ? -1ULL : 0; + Constant *ResCI = ConstantInt::get(II->getType(), Res); + unsigned ResultReg = getRegForValue(ResCI); + if (!ResultReg) + return false; + updateValueMap(II, ResultReg); + return true; } - case Intrinsic::eh_selector: { - EVT VT = TLI.getValueType(I->getType()); - switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) { - default: break; - case TargetLowering::Expand: { - if (MBB->isLandingPad()) - AddCatchInfo(*cast(I), &MF.getMMI(), MBB); - else { -#ifndef NDEBUG - CatchInfoLost.insert(cast(I)); -#endif - // FIXME: Mark exception selector register as live in. Hack for PR1508. - unsigned Reg = TLI.getExceptionSelectorRegister(); - if (Reg) MBB->addLiveIn(Reg); - } - - unsigned Reg = TLI.getExceptionSelectorRegister(); - EVT SrcVT = TLI.getPointerTy(); - const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT); - unsigned ResultReg = createResultReg(RC); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg, - RC, RC); - assert(InsertedCopy && "Can't copy address registers!"); - InsertedCopy = InsertedCopy; - - // Cast the register to the type of the selector. - if (SrcVT.bitsGT(MVT::i32)) - ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE, - ResultReg); - else if (SrcVT.bitsLT(MVT::i32)) - ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, - ISD::SIGN_EXTEND, ResultReg); - if (ResultReg == 0) - // Unhandled operand. Halt "fast" selection and bail. - return false; - - UpdateValueMap(I, ResultReg); - - return true; - } - } - break; + case Intrinsic::expect: { + unsigned ResultReg = getRegForValue(II->getArgOperand(0)); + if (!ResultReg) + return false; + updateValueMap(II, ResultReg); + return true; } + case Intrinsic::experimental_stackmap: + return selectStackmap(II); + case Intrinsic::experimental_patchpoint_void: + case Intrinsic::experimental_patchpoint_i64: + return selectPatchpoint(II); } - // An arbitrary call. Bail. - return false; + return fastLowerIntrinsicCall(II); } -bool FastISel::SelectCast(const User *I, unsigned Opcode) { +bool FastISel::selectCast(const User *I, unsigned Opcode) { EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); EVT DstVT = TLI.getValueType(I->getType()); - - if (SrcVT == MVT::Other || !SrcVT.isSimple() || - DstVT == MVT::Other || !DstVT.isSimple()) + + if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other || + !DstVT.isSimple()) // Unhandled type. Halt "fast" selection and bail. return false; - - // Check if the destination type is legal. Or as a special case, - // it may be i1 if we're doing a truncate because that's - // easy and somewhat common. + + // Check if the destination type is legal. if (!TLI.isTypeLegal(DstVT)) - if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE) - // Unhandled type. Halt "fast" selection and bail. - return false; + return false; - // Check if the source operand is legal. Or as a special case, - // it may be i1 if we're doing zero-extension because that's - // easy and somewhat common. + // Check if the source operand is legal. if (!TLI.isTypeLegal(SrcVT)) - if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND) - // Unhandled type. Halt "fast" selection and bail. - return false; + return false; unsigned InputReg = getRegForValue(I->getOperand(0)); if (!InputReg) // Unhandled operand. Halt "fast" selection and bail. return false; - // If the operand is i1, arrange for the high bits in the register to be zero. - if (SrcVT == MVT::i1) { - SrcVT = TLI.getTypeToTransformTo(I->getContext(), SrcVT); - InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg); - if (!InputReg) - return false; - } - // If the result is i1, truncate to the target's type for i1 first. - if (DstVT == MVT::i1) - DstVT = TLI.getTypeToTransformTo(I->getContext(), DstVT); - - unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(), - DstVT.getSimpleVT(), - Opcode, - InputReg); + bool InputRegIsKill = hasTrivialKill(I->getOperand(0)); + + unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), + Opcode, InputReg, InputRegIsKill); if (!ResultReg) return false; - - UpdateValueMap(I, ResultReg); + + updateValueMap(I, ResultReg); return true; } -bool FastISel::SelectBitCast(const User *I) { +bool FastISel::selectBitCast(const User *I) { // If the bitcast doesn't change the type, just use the operand value. if (I->getType() == I->getOperand(0)->getType()) { unsigned Reg = getRegForValue(I->getOperand(0)); - if (Reg == 0) + if (!Reg) return false; - UpdateValueMap(I, Reg); + updateValueMap(I, Reg); return true; } - // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators. - EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); - EVT DstVT = TLI.getValueType(I->getType()); - - if (SrcVT == MVT::Other || !SrcVT.isSimple() || - DstVT == MVT::Other || !DstVT.isSimple() || - !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT)) + // Bitcasts of other values become reg-reg copies or BITCAST operators. + EVT SrcEVT = TLI.getValueType(I->getOperand(0)->getType()); + EVT DstEVT = TLI.getValueType(I->getType()); + if (SrcEVT == MVT::Other || DstEVT == MVT::Other || + !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT)) // Unhandled type. Halt "fast" selection and bail. return false; - + + MVT SrcVT = SrcEVT.getSimpleVT(); + MVT DstVT = DstEVT.getSimpleVT(); unsigned Op0 = getRegForValue(I->getOperand(0)); - if (Op0 == 0) - // Unhandled operand. Halt "fast" selection and bail. + if (!Op0) // Unhandled operand. Halt "fast" selection and bail. return false; - + bool Op0IsKill = hasTrivialKill(I->getOperand(0)); + // First, try to perform the bitcast by inserting a reg-reg copy. unsigned ResultReg = 0; - if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) { - TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT); - TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT); - ResultReg = createResultReg(DstClass); - - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - Op0, DstClass, SrcClass); - if (!InsertedCopy) - ResultReg = 0; + if (SrcVT == DstVT) { + const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT); + const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT); + // Don't attempt a cross-class copy. It will likely fail. + if (SrcClass == DstClass) { + ResultReg = createResultReg(DstClass); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0); + } } - - // If the reg-reg copy failed, select a BIT_CONVERT opcode. + + // If the reg-reg copy failed, select a BITCAST opcode. if (!ResultReg) - ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), - ISD::BIT_CONVERT, Op0); - + ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill); + if (!ResultReg) return false; - - UpdateValueMap(I, ResultReg); + + updateValueMap(I, ResultReg); return true; } -bool -FastISel::SelectInstruction(const Instruction *I) { - DL = I->getDebugLoc(); +bool FastISel::selectInstruction(const Instruction *I) { + // Just before the terminator instruction, insert instructions to + // feed PHI nodes in successor blocks. + if (isa(I)) + if (!handlePHINodesInSuccessorBlocks(I->getParent())) + return false; - // First, try doing target-independent selection. - if (SelectOperator(I, I->getOpcode())) { - DL = DebugLoc(); - return true; + DbgLoc = I->getDebugLoc(); + + SavedInsertPt = FuncInfo.InsertPt; + + if (const auto *Call = dyn_cast(I)) { + const Function *F = Call->getCalledFunction(); + LibFunc::Func Func; + + // As a special case, don't handle calls to builtin library functions that + // may be translated directly to target instructions. + if (F && !F->hasLocalLinkage() && F->hasName() && + LibInfo->getLibFunc(F->getName(), Func) && + LibInfo->hasOptimizedCodeGen(Func)) + return false; + + // Don't handle Intrinsic::trap if a trap funciton is specified. + if (F && F->getIntrinsicID() == Intrinsic::trap && + !TM.Options.getTrapFunctionName().empty()) + return false; } + // First, try doing target-independent selection. + if (!SkipTargetIndependentISel) { + if (selectOperator(I, I->getOpcode())) { + ++NumFastIselSuccessIndependent; + DbgLoc = DebugLoc(); + return true; + } + // Remove dead code. + recomputeInsertPt(); + if (SavedInsertPt != FuncInfo.InsertPt) + removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); + SavedInsertPt = FuncInfo.InsertPt; + } // Next, try calling the target to attempt to handle the instruction. - if (TargetSelectInstruction(I)) { - DL = DebugLoc(); + if (fastSelectInstruction(I)) { + ++NumFastIselSuccessTarget; + DbgLoc = DebugLoc(); return true; } - - DL = DebugLoc(); + // Remove dead code. + recomputeInsertPt(); + if (SavedInsertPt != FuncInfo.InsertPt) + removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); + + DbgLoc = DebugLoc(); + // Undo phi node updates, because they will be added again by SelectionDAG. + if (isa(I)) + FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); return false; } -/// FastEmitBranch - Emit an unconditional branch to the given block, -/// unless it is the immediate (fall-through) successor, and update -/// the CFG. -void -FastISel::FastEmitBranch(MachineBasicBlock *MSucc) { - if (MBB->isLayoutSuccessor(MSucc)) { - // The unconditional fall-through case, which needs no instructions. +/// Emit an unconditional branch to the given block, unless it is the immediate +/// (fall-through) successor, and update the CFG. +void FastISel::fastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DbgLoc) { + if (FuncInfo.MBB->getBasicBlock()->size() > 1 && + FuncInfo.MBB->isLayoutSuccessor(MSucc)) { + // For more accurate line information if this is the only instruction + // in the block then emit it, otherwise we have the unconditional + // fall-through case, which needs no instructions. } else { // The unconditional branch case. - TII.InsertBranch(*MBB, MSucc, NULL, SmallVector()); + TII.InsertBranch(*FuncInfo.MBB, MSucc, nullptr, + SmallVector(), DbgLoc); } - MBB->addSuccessor(MSucc); + uint32_t BranchWeight = 0; + if (FuncInfo.BPI) + BranchWeight = FuncInfo.BPI->getEdgeWeight(FuncInfo.MBB->getBasicBlock(), + MSucc->getBasicBlock()); + FuncInfo.MBB->addSuccessor(MSucc, BranchWeight); } -/// SelectFNeg - Emit an FNeg operation. -/// -bool -FastISel::SelectFNeg(const User *I) { +/// Emit an FNeg operation. +bool FastISel::selectFNeg(const User *I) { unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I)); - if (OpReg == 0) return false; + if (!OpReg) + return false; + bool OpRegIsKill = hasTrivialKill(I); // If the target has ISD::FNEG, use it. EVT VT = TLI.getValueType(I->getType()); - unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), - ISD::FNEG, OpReg); - if (ResultReg != 0) { - UpdateValueMap(I, ResultReg); + unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG, + OpReg, OpRegIsKill); + if (ResultReg) { + updateValueMap(I, ResultReg); return true; } // Bitcast the value to integer, twiddle the sign bit with xor, // and then bitcast it back to floating-point. - if (VT.getSizeInBits() > 64) return false; + if (VT.getSizeInBits() > 64) + return false; EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits()); if (!TLI.isTypeLegal(IntVT)) return false; - unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(), - ISD::BIT_CONVERT, OpReg); - if (IntReg == 0) + unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(), + ISD::BITCAST, OpReg, OpRegIsKill); + if (!IntReg) return false; - unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR, IntReg, - UINT64_C(1) << (VT.getSizeInBits()-1), - IntVT.getSimpleVT()); - if (IntResultReg == 0) + unsigned IntResultReg = fastEmit_ri_( + IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true, + UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT()); + if (!IntResultReg) return false; - ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), - ISD::BIT_CONVERT, IntResultReg); - if (ResultReg == 0) + ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST, + IntResultReg, /*IsKill=*/true); + if (!ResultReg) return false; - UpdateValueMap(I, ResultReg); + updateValueMap(I, ResultReg); return true; } -bool -FastISel::SelectOperator(const User *I, unsigned Opcode) { +bool FastISel::selectExtractValue(const User *U) { + const ExtractValueInst *EVI = dyn_cast(U); + if (!EVI) + return false; + + // Make sure we only try to handle extracts with a legal result. But also + // allow i1 because it's easy. + EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true); + if (!RealVT.isSimple()) + return false; + MVT VT = RealVT.getSimpleVT(); + if (!TLI.isTypeLegal(VT) && VT != MVT::i1) + return false; + + const Value *Op0 = EVI->getOperand(0); + Type *AggTy = Op0->getType(); + + // Get the base result register. + unsigned ResultReg; + DenseMap::iterator I = FuncInfo.ValueMap.find(Op0); + if (I != FuncInfo.ValueMap.end()) + ResultReg = I->second; + else if (isa(Op0)) + ResultReg = FuncInfo.InitializeRegForValue(Op0); + else + return false; // fast-isel can't handle aggregate constants at the moment + + // Get the actual result register, which is an offset from the base register. + unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices()); + + SmallVector AggValueVTs; + ComputeValueVTs(TLI, AggTy, AggValueVTs); + + for (unsigned i = 0; i < VTIndex; i++) + ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]); + + updateValueMap(EVI, ResultReg); + return true; +} + +bool FastISel::selectOperator(const User *I, unsigned Opcode) { switch (Opcode) { case Instruction::Add: - return SelectBinaryOp(I, ISD::ADD); + return selectBinaryOp(I, ISD::ADD); case Instruction::FAdd: - return SelectBinaryOp(I, ISD::FADD); + return selectBinaryOp(I, ISD::FADD); case Instruction::Sub: - return SelectBinaryOp(I, ISD::SUB); + return selectBinaryOp(I, ISD::SUB); case Instruction::FSub: // FNeg is currently represented in LLVM IR as a special case of FSub. if (BinaryOperator::isFNeg(I)) - return SelectFNeg(I); - return SelectBinaryOp(I, ISD::FSUB); + return selectFNeg(I); + return selectBinaryOp(I, ISD::FSUB); case Instruction::Mul: - return SelectBinaryOp(I, ISD::MUL); + return selectBinaryOp(I, ISD::MUL); case Instruction::FMul: - return SelectBinaryOp(I, ISD::FMUL); + return selectBinaryOp(I, ISD::FMUL); case Instruction::SDiv: - return SelectBinaryOp(I, ISD::SDIV); + return selectBinaryOp(I, ISD::SDIV); case Instruction::UDiv: - return SelectBinaryOp(I, ISD::UDIV); + return selectBinaryOp(I, ISD::UDIV); case Instruction::FDiv: - return SelectBinaryOp(I, ISD::FDIV); + return selectBinaryOp(I, ISD::FDIV); case Instruction::SRem: - return SelectBinaryOp(I, ISD::SREM); + return selectBinaryOp(I, ISD::SREM); case Instruction::URem: - return SelectBinaryOp(I, ISD::UREM); + return selectBinaryOp(I, ISD::UREM); case Instruction::FRem: - return SelectBinaryOp(I, ISD::FREM); + return selectBinaryOp(I, ISD::FREM); case Instruction::Shl: - return SelectBinaryOp(I, ISD::SHL); + return selectBinaryOp(I, ISD::SHL); case Instruction::LShr: - return SelectBinaryOp(I, ISD::SRL); + return selectBinaryOp(I, ISD::SRL); case Instruction::AShr: - return SelectBinaryOp(I, ISD::SRA); + return selectBinaryOp(I, ISD::SRA); case Instruction::And: - return SelectBinaryOp(I, ISD::AND); + return selectBinaryOp(I, ISD::AND); case Instruction::Or: - return SelectBinaryOp(I, ISD::OR); + return selectBinaryOp(I, ISD::OR); case Instruction::Xor: - return SelectBinaryOp(I, ISD::XOR); + return selectBinaryOp(I, ISD::XOR); case Instruction::GetElementPtr: - return SelectGetElementPtr(I); + return selectGetElementPtr(I); case Instruction::Br: { const BranchInst *BI = cast(I); if (BI->isUnconditional()) { const BasicBlock *LLVMSucc = BI->getSuccessor(0); - MachineBasicBlock *MSucc = MBBMap[LLVMSucc]; - FastEmitBranch(MSucc); + MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc]; + fastEmitBranch(MSucc, BI->getDebugLoc()); return true; } @@ -690,48 +1517,54 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) { } case Instruction::Unreachable: - // Nothing to emit. - return true; + if (TM.Options.TrapUnreachable) + return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0; + else + return true; case Instruction::Alloca: // FunctionLowering has the static-sized case covered. - if (StaticAllocaMap.count(cast(I))) + if (FuncInfo.StaticAllocaMap.count(cast(I))) return true; // Dynamic-sized alloca is not handled yet. return false; - + case Instruction::Call: - return SelectCall(I); - + return selectCall(I); + case Instruction::BitCast: - return SelectBitCast(I); + return selectBitCast(I); case Instruction::FPToSI: - return SelectCast(I, ISD::FP_TO_SINT); + return selectCast(I, ISD::FP_TO_SINT); case Instruction::ZExt: - return SelectCast(I, ISD::ZERO_EXTEND); + return selectCast(I, ISD::ZERO_EXTEND); case Instruction::SExt: - return SelectCast(I, ISD::SIGN_EXTEND); + return selectCast(I, ISD::SIGN_EXTEND); case Instruction::Trunc: - return SelectCast(I, ISD::TRUNCATE); + return selectCast(I, ISD::TRUNCATE); case Instruction::SIToFP: - return SelectCast(I, ISD::SINT_TO_FP); + return selectCast(I, ISD::SINT_TO_FP); case Instruction::IntToPtr: // Deliberate fall-through. case Instruction::PtrToInt: { EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); EVT DstVT = TLI.getValueType(I->getType()); if (DstVT.bitsGT(SrcVT)) - return SelectCast(I, ISD::ZERO_EXTEND); + return selectCast(I, ISD::ZERO_EXTEND); if (DstVT.bitsLT(SrcVT)) - return SelectCast(I, ISD::TRUNCATE); + return selectCast(I, ISD::TRUNCATE); unsigned Reg = getRegForValue(I->getOperand(0)); - if (Reg == 0) return false; - UpdateValueMap(I, Reg); + if (!Reg) + return false; + updateValueMap(I, Reg); return true; } + case Instruction::ExtractValue: + return selectExtractValue(I); + case Instruction::PHI: llvm_unreachable("FastISel shouldn't visit PHI nodes!"); @@ -741,286 +1574,384 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) { } } -FastISel::FastISel(MachineFunction &mf, - DenseMap &vm, - DenseMap &bm, - DenseMap &am, - std::vector > &pn -#ifndef NDEBUG - , SmallSet &cil -#endif - ) - : MBB(0), - ValueMap(vm), - MBBMap(bm), - StaticAllocaMap(am), - PHINodesToUpdate(pn), -#ifndef NDEBUG - CatchInfoLost(cil), -#endif - MF(mf), - MRI(MF.getRegInfo()), - MFI(*MF.getFrameInfo()), - MCP(*MF.getConstantPool()), - TM(MF.getTarget()), - TD(*TM.getTargetData()), - TII(*TM.getInstrInfo()), - TLI(*TM.getTargetLowering()) { -} +FastISel::FastISel(FunctionLoweringInfo &FuncInfo, + const TargetLibraryInfo *LibInfo, + bool SkipTargetIndependentISel) + : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()), + MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()), + TM(FuncInfo.MF->getTarget()), DL(*TM.getSubtargetImpl()->getDataLayout()), + TII(*TM.getSubtargetImpl()->getInstrInfo()), + TLI(*TM.getSubtargetImpl()->getTargetLowering()), + TRI(*TM.getSubtargetImpl()->getRegisterInfo()), LibInfo(LibInfo), + SkipTargetIndependentISel(SkipTargetIndependentISel) {} FastISel::~FastISel() {} -unsigned FastISel::FastEmit_(MVT, MVT, - unsigned) { - return 0; +bool FastISel::fastLowerArguments() { return false; } + +bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; } + +bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) { + return false; } -unsigned FastISel::FastEmit_r(MVT, MVT, - unsigned, unsigned /*Op0*/) { +unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; } + +unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/, + bool /*Op0IsKill*/) { return 0; } -unsigned FastISel::FastEmit_rr(MVT, MVT, - unsigned, unsigned /*Op0*/, - unsigned /*Op0*/) { +unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/, + bool /*Op0IsKill*/, unsigned /*Op1*/, + bool /*Op1IsKill*/) { return 0; } -unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) { +unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) { return 0; } -unsigned FastISel::FastEmit_f(MVT, MVT, - unsigned, const ConstantFP * /*FPImm*/) { +unsigned FastISel::fastEmit_f(MVT, MVT, unsigned, + const ConstantFP * /*FPImm*/) { return 0; } -unsigned FastISel::FastEmit_ri(MVT, MVT, - unsigned, unsigned /*Op0*/, - uint64_t /*Imm*/) { +unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/, + bool /*Op0IsKill*/, uint64_t /*Imm*/) { return 0; } -unsigned FastISel::FastEmit_rf(MVT, MVT, - unsigned, unsigned /*Op0*/, +unsigned FastISel::fastEmit_rf(MVT, MVT, unsigned, unsigned /*Op0*/, + bool /*Op0IsKill*/, const ConstantFP * /*FPImm*/) { return 0; } -unsigned FastISel::FastEmit_rri(MVT, MVT, - unsigned, - unsigned /*Op0*/, unsigned /*Op1*/, - uint64_t /*Imm*/) { +unsigned FastISel::fastEmit_rri(MVT, MVT, unsigned, unsigned /*Op0*/, + bool /*Op0IsKill*/, unsigned /*Op1*/, + bool /*Op1IsKill*/, uint64_t /*Imm*/) { return 0; } -/// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries -/// to emit an instruction with an immediate operand using FastEmit_ri. +/// This method is a wrapper of fastEmit_ri. It first tries to emit an +/// instruction with an immediate operand using fastEmit_ri. /// If that fails, it materializes the immediate into a register and try -/// FastEmit_rr instead. -unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode, - unsigned Op0, uint64_t Imm, - MVT ImmType) { - // First check if immediate type is legal. If not, we can't use the ri form. - unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm); - if (ResultReg != 0) - return ResultReg; - unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm); - if (MaterialReg == 0) +/// fastEmit_rr instead. +unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, + bool Op0IsKill, uint64_t Imm, MVT ImmType) { + // If this is a multiply by a power of two, emit this as a shift left. + if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) { + Opcode = ISD::SHL; + Imm = Log2_64(Imm); + } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) { + // div x, 8 -> srl x, 3 + Opcode = ISD::SRL; + Imm = Log2_64(Imm); + } + + // Horrible hack (to be removed), check to make sure shift amounts are + // in-range. + if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) && + Imm >= VT.getSizeInBits()) return 0; - return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg); -} -/// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries -/// to emit an instruction with a floating-point immediate operand using -/// FastEmit_rf. If that fails, it materializes the immediate into a register -/// and try FastEmit_rr instead. -unsigned FastISel::FastEmit_rf_(MVT VT, unsigned Opcode, - unsigned Op0, const ConstantFP *FPImm, - MVT ImmType) { - // First check if immediate type is legal. If not, we can't use the rf form. - unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm); - if (ResultReg != 0) + // First check if immediate type is legal. If not, we can't use the ri form. + unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm); + if (ResultReg) return ResultReg; - - // Materialize the constant in a register. - unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm); - if (MaterialReg == 0) { - // If the target doesn't have a way to directly enter a floating-point - // value into a register, use an alternate approach. - // TODO: The current approach only supports floating-point constants - // that can be constructed by conversion from integer values. This should - // be replaced by code that creates a load from a constant-pool entry, - // which will require some target-specific work. - const APFloat &Flt = FPImm->getValueAPF(); - EVT IntVT = TLI.getPointerTy(); - - uint64_t x[2]; - uint32_t IntBitWidth = IntVT.getSizeInBits(); - bool isExact; - (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true, - APFloat::rmTowardZero, &isExact); - if (!isExact) - return 0; - APInt IntVal(IntBitWidth, 2, x); - - unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(), - ISD::Constant, IntVal.getZExtValue()); - if (IntegerReg == 0) - return 0; - MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT, - ISD::SINT_TO_FP, IntegerReg); - if (MaterialReg == 0) + unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm); + if (!MaterialReg) { + // This is a bit ugly/slow, but failing here means falling out of + // fast-isel, which would be very slow. + IntegerType *ITy = + IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits()); + MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm)); + if (!MaterialReg) return 0; } - return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg); + return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, + /*IsKill=*/true); } -unsigned FastISel::createResultReg(const TargetRegisterClass* RC) { +unsigned FastISel::createResultReg(const TargetRegisterClass *RC) { return MRI.createVirtualRegister(RC); } -unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode, - const TargetRegisterClass* RC) { +unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op, + unsigned OpNum) { + if (TargetRegisterInfo::isVirtualRegister(Op)) { + const TargetRegisterClass *RegClass = + TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF); + if (!MRI.constrainRegClass(Op, RegClass)) { + // If it's not legal to COPY between the register classes, something + // has gone very wrong before we got here. + unsigned NewOp = createResultReg(RegClass); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), NewOp).addReg(Op); + return NewOp; + } + } + return Op; +} + +unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode, + const TargetRegisterClass *RC) { unsigned ResultReg = createResultReg(RC); - const TargetInstrDesc &II = TII.get(MachineInstOpcode); + const MCInstrDesc &II = TII.get(MachineInstOpcode); - BuildMI(MBB, DL, II, ResultReg); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg); return ResultReg; } -unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0) { +unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, unsigned Op0, + bool Op0IsKill) { + const MCInstrDesc &II = TII.get(MachineInstOpcode); + unsigned ResultReg = createResultReg(RC); - const TargetInstrDesc &II = TII.get(MachineInstOpcode); + Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); if (II.getNumDefs() >= 1) - BuildMI(MBB, DL, II, ResultReg).addReg(Op0); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + .addReg(Op0, getKillRegState(Op0IsKill)); else { - BuildMI(MBB, DL, II).addReg(Op0); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - II.ImplicitDefs[0], RC, RC); - if (!InsertedCopy) - ResultReg = 0; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + .addReg(Op0, getKillRegState(Op0IsKill)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; } -unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, unsigned Op1) { +unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, unsigned Op0, + bool Op0IsKill, unsigned Op1, + bool Op1IsKill) { + const MCInstrDesc &II = TII.get(MachineInstOpcode); + unsigned ResultReg = createResultReg(RC); - const TargetInstrDesc &II = TII.get(MachineInstOpcode); + Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); + Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); if (II.getNumDefs() >= 1) - BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + .addReg(Op0, getKillRegState(Op0IsKill)) + .addReg(Op1, getKillRegState(Op1IsKill)); else { - BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - II.ImplicitDefs[0], RC, RC); - if (!InsertedCopy) - ResultReg = 0; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + .addReg(Op0, getKillRegState(Op0IsKill)) + .addReg(Op1, getKillRegState(Op1IsKill)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; } -unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, uint64_t Imm) { +unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, unsigned Op0, + bool Op0IsKill, unsigned Op1, + bool Op1IsKill, unsigned Op2, + bool Op2IsKill) { + const MCInstrDesc &II = TII.get(MachineInstOpcode); + unsigned ResultReg = createResultReg(RC); - const TargetInstrDesc &II = TII.get(MachineInstOpcode); + Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); + Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); + Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2); if (II.getNumDefs() >= 1) - BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Imm); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + .addReg(Op0, getKillRegState(Op0IsKill)) + .addReg(Op1, getKillRegState(Op1IsKill)) + .addReg(Op2, getKillRegState(Op2IsKill)); else { - BuildMI(MBB, DL, II).addReg(Op0).addImm(Imm); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - II.ImplicitDefs[0], RC, RC); - if (!InsertedCopy) - ResultReg = 0; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + .addReg(Op0, getKillRegState(Op0IsKill)) + .addReg(Op1, getKillRegState(Op1IsKill)) + .addReg(Op2, getKillRegState(Op2IsKill)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; } -unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, const ConstantFP *FPImm) { +unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, unsigned Op0, + bool Op0IsKill, uint64_t Imm) { + const MCInstrDesc &II = TII.get(MachineInstOpcode); + unsigned ResultReg = createResultReg(RC); - const TargetInstrDesc &II = TII.get(MachineInstOpcode); + Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); if (II.getNumDefs() >= 1) - BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addFPImm(FPImm); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + .addReg(Op0, getKillRegState(Op0IsKill)) + .addImm(Imm); else { - BuildMI(MBB, DL, II).addReg(Op0).addFPImm(FPImm); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - II.ImplicitDefs[0], RC, RC); - if (!InsertedCopy) - ResultReg = 0; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + .addReg(Op0, getKillRegState(Op0IsKill)) + .addImm(Imm); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; } -unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, unsigned Op1, uint64_t Imm) { +unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, unsigned Op0, + bool Op0IsKill, uint64_t Imm1, + uint64_t Imm2) { + const MCInstrDesc &II = TII.get(MachineInstOpcode); + unsigned ResultReg = createResultReg(RC); - const TargetInstrDesc &II = TII.get(MachineInstOpcode); + Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); if (II.getNumDefs() >= 1) - BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + .addReg(Op0, getKillRegState(Op0IsKill)) + .addImm(Imm1) + .addImm(Imm2); else { - BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1).addImm(Imm); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - II.ImplicitDefs[0], RC, RC); - if (!InsertedCopy) - ResultReg = 0; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + .addReg(Op0, getKillRegState(Op0IsKill)) + .addImm(Imm1) + .addImm(Imm2); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; } -unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - uint64_t Imm) { +unsigned FastISel::fastEmitInst_rf(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, unsigned Op0, + bool Op0IsKill, const ConstantFP *FPImm) { + const MCInstrDesc &II = TII.get(MachineInstOpcode); + unsigned ResultReg = createResultReg(RC); - const TargetInstrDesc &II = TII.get(MachineInstOpcode); - + Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); + if (II.getNumDefs() >= 1) - BuildMI(MBB, DL, II, ResultReg).addImm(Imm); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + .addReg(Op0, getKillRegState(Op0IsKill)) + .addFPImm(FPImm); else { - BuildMI(MBB, DL, II).addImm(Imm); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - II.ImplicitDefs[0], RC, RC); - if (!InsertedCopy) - ResultReg = 0; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + .addReg(Op0, getKillRegState(Op0IsKill)) + .addFPImm(FPImm); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; } -unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT, - unsigned Op0, uint32_t Idx) { - const TargetRegisterClass* RC = MRI.getRegClass(Op0); - - unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); - const TargetInstrDesc &II = TII.get(TargetOpcode::EXTRACT_SUBREG); - +unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, unsigned Op0, + bool Op0IsKill, unsigned Op1, + bool Op1IsKill, uint64_t Imm) { + const MCInstrDesc &II = TII.get(MachineInstOpcode); + + unsigned ResultReg = createResultReg(RC); + Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); + Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); + if (II.getNumDefs() >= 1) - BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Idx); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + .addReg(Op0, getKillRegState(Op0IsKill)) + .addReg(Op1, getKillRegState(Op1IsKill)) + .addImm(Imm); else { - BuildMI(MBB, DL, II).addReg(Op0).addImm(Idx); - bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, - II.ImplicitDefs[0], RC, RC); - if (!InsertedCopy) - ResultReg = 0; + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + .addReg(Op0, getKillRegState(Op0IsKill)) + .addReg(Op1, getKillRegState(Op1IsKill)) + .addImm(Imm); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); } return ResultReg; } -/// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op -/// with all but the least significant bit set to zero. -unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op) { - return FastEmit_ri(VT, VT, ISD::AND, Op, 1); +unsigned FastISel::fastEmitInst_rrii(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, bool Op0IsKill, unsigned Op1, + bool Op1IsKill, uint64_t Imm1, + uint64_t Imm2) { + const MCInstrDesc &II = TII.get(MachineInstOpcode); + + unsigned ResultReg = createResultReg(RC); + Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); + Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); + + if (II.getNumDefs() >= 1) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + .addReg(Op0, getKillRegState(Op0IsKill)) + .addReg(Op1, getKillRegState(Op1IsKill)) + .addImm(Imm1) + .addImm(Imm2); + else { + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) + .addReg(Op0, getKillRegState(Op0IsKill)) + .addReg(Op1, getKillRegState(Op1IsKill)) + .addImm(Imm1) + .addImm(Imm2); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); + } + return ResultReg; +} + +unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, uint64_t Imm) { + unsigned ResultReg = createResultReg(RC); + const MCInstrDesc &II = TII.get(MachineInstOpcode); + + if (II.getNumDefs() >= 1) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + .addImm(Imm); + else { + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); + } + return ResultReg; +} + +unsigned FastISel::fastEmitInst_ii(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, uint64_t Imm1, + uint64_t Imm2) { + unsigned ResultReg = createResultReg(RC); + const MCInstrDesc &II = TII.get(MachineInstOpcode); + + if (II.getNumDefs() >= 1) + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) + .addImm(Imm1) + .addImm(Imm2); + else { + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm1) + .addImm(Imm2); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); + } + return ResultReg; +} + +unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, + bool Op0IsKill, uint32_t Idx) { + unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); + assert(TargetRegisterInfo::isVirtualRegister(Op0) && + "Cannot yet extract from physregs"); + const TargetRegisterClass *RC = MRI.getRegClass(Op0); + MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx)); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), + ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx); + return ResultReg; +} + +/// Emit MachineInstrs to compute the value of Op with all but the least +/// significant bit set to zero. +unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) { + return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1); } /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks. @@ -1029,22 +1960,24 @@ unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op) { /// nodes as input. We cannot just directly add them, because expansion /// might result in multiple MBB's for one BB. As such, the start of the /// BB might correspond to a different MBB than the end. -bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { +bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { const TerminatorInst *TI = LLVMBB->getTerminator(); SmallPtrSet SuccsHandled; - unsigned OrigNumPHINodesToUpdate = PHINodesToUpdate.size(); + FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size(); // Check successor nodes' PHI nodes that expect a constant to be available // from this block. for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { const BasicBlock *SuccBB = TI->getSuccessor(succ); - if (!isa(SuccBB->begin())) continue; - MachineBasicBlock *SuccMBB = MBBMap[SuccBB]; + if (!isa(SuccBB->begin())) + continue; + MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB]; // If this terminator has multiple identical successors (common for // switches), only handle each succ once. - if (!SuccsHandled.insert(SuccMBB)) continue; + if (!SuccsHandled.insert(SuccMBB)) + continue; MachineBasicBlock::iterator MBBI = SuccMBB->begin(); @@ -1052,37 +1985,203 @@ bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { // nodes and Machine PHI nodes, but the incoming operands have not been // emitted yet. for (BasicBlock::const_iterator I = SuccBB->begin(); - const PHINode *PN = dyn_cast(I); ++I) { + const auto *PN = dyn_cast(I); ++I) { + // Ignore dead phi's. - if (PN->use_empty()) continue; + if (PN->use_empty()) + continue; // Only handle legal types. Two interesting things to note here. First, // by bailing out early, we may leave behind some dead instructions, // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its - // own moves. Second, this check is necessary becuase FastISel doesn't - // use CreateRegForValue to create registers, so it always creates + // own moves. Second, this check is necessary because FastISel doesn't + // use CreateRegs to create registers, so it always creates // exactly one register for each non-void instruction. EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true); if (VT == MVT::Other || !TLI.isTypeLegal(VT)) { - // Promote MVT::i1. - if (VT == MVT::i1) - VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT); - else { - PHINodesToUpdate.resize(OrigNumPHINodesToUpdate); + // Handle integer promotions, though, because they're common and easy. + if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) { + FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); return false; } } const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB); + // Set the DebugLoc for the copy. Prefer the location of the operand + // if there is one; use the location of the PHI otherwise. + DbgLoc = PN->getDebugLoc(); + if (const auto *Inst = dyn_cast(PHIOp)) + DbgLoc = Inst->getDebugLoc(); + unsigned Reg = getRegForValue(PHIOp); - if (Reg == 0) { - PHINodesToUpdate.resize(OrigNumPHINodesToUpdate); + if (!Reg) { + FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); return false; } - PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg)); + FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg)); + DbgLoc = DebugLoc(); } } return true; } + +bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) { + assert(LI->hasOneUse() && + "tryToFoldLoad expected a LoadInst with a single use"); + // We know that the load has a single use, but don't know what it is. If it + // isn't one of the folded instructions, then we can't succeed here. Handle + // this by scanning the single-use users of the load until we get to FoldInst. + unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs. + + const Instruction *TheUser = LI->user_back(); + while (TheUser != FoldInst && // Scan up until we find FoldInst. + // Stay in the right block. + TheUser->getParent() == FoldInst->getParent() && + --MaxUsers) { // Don't scan too far. + // If there are multiple or no uses of this instruction, then bail out. + if (!TheUser->hasOneUse()) + return false; + + TheUser = TheUser->user_back(); + } + + // If we didn't find the fold instruction, then we failed to collapse the + // sequence. + if (TheUser != FoldInst) + return false; + + // Don't try to fold volatile loads. Target has to deal with alignment + // constraints. + if (LI->isVolatile()) + return false; + + // Figure out which vreg this is going into. If there is no assigned vreg yet + // then there actually was no reference to it. Perhaps the load is referenced + // by a dead instruction. + unsigned LoadReg = getRegForValue(LI); + if (!LoadReg) + return false; + + // We can't fold if this vreg has no uses or more than one use. Multiple uses + // may mean that the instruction got lowered to multiple MIs, or the use of + // the loaded value ended up being multiple operands of the result. + if (!MRI.hasOneUse(LoadReg)) + return false; + + MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg); + MachineInstr *User = RI->getParent(); + + // Set the insertion point properly. Folding the load can cause generation of + // other random instructions (like sign extends) for addressing modes; make + // sure they get inserted in a logical place before the new instruction. + FuncInfo.InsertPt = User; + FuncInfo.MBB = User->getParent(); + + // Ask the target to try folding the load. + return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI); +} + +bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) { + // Must be an add. + if (!isa(Add)) + return false; + // Type size needs to match. + if (DL.getTypeSizeInBits(GEP->getType()) != + DL.getTypeSizeInBits(Add->getType())) + return false; + // Must be in the same basic block. + if (isa(Add) && + FuncInfo.MBBMap[cast(Add)->getParent()] != FuncInfo.MBB) + return false; + // Must have a constant operand. + return isa(cast(Add)->getOperand(1)); +} + +MachineMemOperand * +FastISel::createMachineMemOperandFor(const Instruction *I) const { + const Value *Ptr; + Type *ValTy; + unsigned Alignment; + unsigned Flags; + bool IsVolatile; + + if (const auto *LI = dyn_cast(I)) { + Alignment = LI->getAlignment(); + IsVolatile = LI->isVolatile(); + Flags = MachineMemOperand::MOLoad; + Ptr = LI->getPointerOperand(); + ValTy = LI->getType(); + } else if (const auto *SI = dyn_cast(I)) { + Alignment = SI->getAlignment(); + IsVolatile = SI->isVolatile(); + Flags = MachineMemOperand::MOStore; + Ptr = SI->getPointerOperand(); + ValTy = SI->getValueOperand()->getType(); + } else + return nullptr; + + bool IsNonTemporal = I->getMetadata("nontemporal") != nullptr; + bool IsInvariant = I->getMetadata("invariant.load") != nullptr; + const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range); + + AAMDNodes AAInfo; + I->getAAMetadata(AAInfo); + + if (Alignment == 0) // Ensure that codegen never sees alignment 0. + Alignment = DL.getABITypeAlignment(ValTy); + + unsigned Size = + TM.getSubtargetImpl()->getDataLayout()->getTypeStoreSize(ValTy); + + if (IsVolatile) + Flags |= MachineMemOperand::MOVolatile; + if (IsNonTemporal) + Flags |= MachineMemOperand::MONonTemporal; + if (IsInvariant) + Flags |= MachineMemOperand::MOInvariant; + + return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size, + Alignment, AAInfo, Ranges); +} + +CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const { + // If both operands are the same, then try to optimize or fold the cmp. + CmpInst::Predicate Predicate = CI->getPredicate(); + if (CI->getOperand(0) != CI->getOperand(1)) + return Predicate; + + switch (Predicate) { + default: llvm_unreachable("Invalid predicate!"); + case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break; + case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break; + case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break; + case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break; + case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break; + case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break; + case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break; + case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break; + case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break; + case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break; + case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break; + case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; + case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break; + case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; + case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break; + case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break; + + case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break; + case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break; + case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break; + case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; + case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break; + case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; + case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break; + case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break; + case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break; + case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break; + } + + return Predicate; +}