//
// This file contains the implementation of the FastISel class.
//
+// "Fast" instruction selection is designed to emit very poor code quickly.
+// Also, it is not designed to be able to do much lowering, so most illegal
+// types (e.g. i64 on 32-bit targets) and operations are not supported. It is
+// also not intended to be able to do much optimization, except in a few cases
+// where doing optimizations reduces overall compile time. For example, folding
+// constants into immediate fields is often done, because it's cheap and it
+// reduces the number of instructions later phases have to examine.
+//
+// "Fast" instruction selection is able to fail gracefully and transfer
+// control to the SelectionDAG selector for operations that it doesn't
+// support. In many cases, this allows us to avoid duplicating a lot of
+// the complicated lowering logic that SelectionDAG currently has.
+//
+// The intended use for "fast" instruction selection is "-O0" mode
+// compilation, where the quality of the generated code is irrelevant when
+// weighed against the speed at which the code can be generated. Also,
+// at -O0, the LLVM optimizers are not running, and this makes the
+// compile time of codegen a much higher portion of the overall compile
+// time. Despite its limitations, "fast" instruction selection is able to
+// handle enough code on its own to provide noticeable overall speedups
+// in -O0 compiles.
+//
+// Basic operations are supported in a target-independent way, by reading
+// the same instruction descriptions that the SelectionDAG selector reads,
+// and identifying simple arithmetic operations that can be directly selected
+// from simple operators. More complicated operations currently require
+// target-specific code.
+//
//===----------------------------------------------------------------------===//
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
+#include "llvm/IntrinsicInst.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/DwarfWriter.h"
+#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
+#include "SelectionDAGBuild.h"
using namespace llvm;
unsigned FastISel::getRegForValue(Value *V) {
+ MVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
+ // Don't handle non-simple values in FastISel.
+ if (!RealVT.isSimple())
+ return 0;
+
+ // Ignore illegal types. We must do this before looking up the value
+ // in ValueMap because Arguments are given virtual registers regardless
+ // of whether FastISel can handle them.
+ MVT::SimpleValueType VT = RealVT.getSimpleVT();
+ if (!TLI.isTypeLegal(VT)) {
+ // Promote MVT::i1 to a legal type though, because it's common and easy.
+ if (VT == MVT::i1)
+ VT = TLI.getTypeToTransformTo(VT).getSimpleVT();
+ else
+ return 0;
+ }
+
// Look up the value to see if we already have a register for it. We
// cache values defined by Instructions across blocks, and other values
// only locally. This is because Instructions already have the SSA
if (Reg != 0)
return Reg;
- MVT::SimpleValueType VT = TLI.getValueType(V->getType()).getSimpleVT();
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
- if (CI->getValue().getActiveBits() > 64)
- return TargetMaterializeConstant(CI,
- MBB->getParent()->getConstantPool());
- // Don't cache constant materializations. To do so would require
- // tracking what uses they dominate.
- Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
+ if (CI->getValue().getActiveBits() <= 64)
+ Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
+ } else if (isa<AllocaInst>(V)) {
+ Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
} else if (isa<ConstantPointerNull>(V)) {
- Reg = FastEmit_i(VT, VT, ISD::Constant, 0);
+ // Translate this as an integer zero so that it can be
+ // local-CSE'd with actual integer zeros.
+ Reg = getRegForValue(V->getContext().getNullValue(TD.getIntPtrType()));
} else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
uint64_t x[2];
uint32_t IntBitWidth = IntVT.getSizeInBits();
- if (Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
- APFloat::rmTowardZero) != APFloat::opOK)
- return TargetMaterializeConstant(CF,
- MBB->getParent()->getConstantPool());
- APInt IntVal(IntBitWidth, 2, x);
-
- unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
- ISD::Constant, IntVal.getZExtValue());
- if (IntegerReg == 0)
- return TargetMaterializeConstant(CF,
- MBB->getParent()->getConstantPool());
- Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
- if (Reg == 0)
- return TargetMaterializeConstant(CF,
- MBB->getParent()->getConstantPool());;
+ bool isExact;
+ (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
+ APFloat::rmTowardZero, &isExact);
+ if (isExact) {
+ APInt IntVal(IntBitWidth, 2, x);
+
+ unsigned IntegerReg =
+ getRegForValue(ConstantInt::get(V->getContext(), IntVal));
+ if (IntegerReg != 0)
+ Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
+ }
}
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
if (!SelectOperator(CE, CE->getOpcode())) return 0;
Reg = LocalValueMap[CE];
} else if (isa<UndefValue>(V)) {
Reg = createResultReg(TLI.getRegClassFor(VT));
- BuildMI(MBB, TII.get(TargetInstrInfo::IMPLICIT_DEF), Reg);
- } else {
- return 0;
+ BuildMI(MBB, DL, TII.get(TargetInstrInfo::IMPLICIT_DEF), Reg);
}
+ // If target-independent code couldn't handle the value, give target-specific
+ // code a try.
if (!Reg && isa<Constant>(V))
- return TargetMaterializeConstant(cast<Constant>(V),
- MBB->getParent()->getConstantPool());
+ Reg = TargetMaterializeConstant(cast<Constant>(V));
- LocalValueMap[V] = Reg;
+ // Don't cache constant materializations in the general ValueMap.
+ // To do so would require tracking what uses they dominate.
+ if (Reg != 0)
+ LocalValueMap[V] = Reg;
return Reg;
}
+unsigned FastISel::lookUpRegForValue(Value *V) {
+ // Look up the value to see if we already have a register for it. We
+ // cache values defined by Instructions across blocks, and other values
+ // only locally. This is because Instructions already have the SSA
+ // def-dominatess-use requirement enforced.
+ if (ValueMap.count(V))
+ return ValueMap[V];
+ return LocalValueMap[V];
+}
+
/// UpdateValueMap - Update the value map to include the new mapping for this
/// instruction, or insert an extra copy to get the result in a previous
/// determined register.
/// NOTE: This is only necessary because we might select a block that uses
/// a value before we select the block that defines the value. It might be
/// possible to fix this by selecting blocks in reverse postorder.
-void FastISel::UpdateValueMap(Value* I, unsigned Reg) {
+unsigned FastISel::UpdateValueMap(Value* I, unsigned Reg) {
if (!isa<Instruction>(I)) {
LocalValueMap[I] = Reg;
- return;
+ return Reg;
}
- if (!ValueMap.count(I))
- ValueMap[I] = Reg;
- else
- TII.copyRegToReg(*MBB, MBB->end(), ValueMap[I],
- Reg, MRI.getRegClass(Reg), MRI.getRegClass(Reg));
+
+ unsigned &AssignedReg = ValueMap[I];
+ if (AssignedReg == 0)
+ AssignedReg = Reg;
+ else if (Reg != AssignedReg) {
+ const TargetRegisterClass *RegClass = MRI.getRegClass(Reg);
+ TII.copyRegToReg(*MBB, MBB->end(), AssignedReg,
+ Reg, RegClass, RegClass);
+ }
+ return AssignedReg;
+}
+
+unsigned FastISel::getRegForGEPIndex(Value *Idx) {
+ unsigned IdxN = getRegForValue(Idx);
+ if (IdxN == 0)
+ // Unhandled operand. Halt "fast" selection and bail.
+ return 0;
+
+ // If the index is smaller or larger than intptr_t, truncate or extend it.
+ MVT PtrVT = TLI.getPointerTy();
+ MVT IdxVT = MVT::getMVT(Idx->getType(), /*HandleUnknown=*/false);
+ if (IdxVT.bitsLT(PtrVT))
+ IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT.getSimpleVT(),
+ ISD::SIGN_EXTEND, IdxN);
+ else if (IdxVT.bitsGT(PtrVT))
+ IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT.getSimpleVT(),
+ ISD::TRUNCATE, IdxN);
+ return IdxN;
}
/// SelectBinaryOp - Select and emit code for a binary operator instruction,
// under the assumption that i64 won't be used if the target doesn't
// support it.
if (!TLI.isTypeLegal(VT)) {
- // MVT::i1 is special. Allow AND and OR (but not XOR) because they
+ // MVT::i1 is special. Allow AND, OR, or XOR because they
// don't require additional zeroing, which makes them easy.
if (VT == MVT::i1 &&
- (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR))
+ (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
+ ISDOpcode == ISD::XOR))
VT = TLI.getTypeToTransformTo(VT);
else
return false;
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->getZExtValue() == 0) continue;
uint64_t Offs =
- TD.getABITypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
+ TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
if (N == 0)
// Unhandled operand. Halt "fast" selection and bail.
}
// N = N + Idx * ElementSize;
- uint64_t ElementSize = TD.getABITypeSize(Ty);
- unsigned IdxN = getRegForValue(Idx);
- if (IdxN == 0)
- // Unhandled operand. Halt "fast" selection and bail.
- return false;
-
- // If the index is smaller or larger than intptr_t, truncate or extend
- // it.
- MVT IdxVT = MVT::getMVT(Idx->getType(), /*HandleUnknown=*/false);
- if (IdxVT.bitsLT(VT))
- IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::SIGN_EXTEND, IdxN);
- else if (IdxVT.bitsGT(VT))
- IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::TRUNCATE, IdxN);
+ uint64_t ElementSize = TD.getTypeAllocSize(Ty);
+ unsigned IdxN = getRegForGEPIndex(Idx);
if (IdxN == 0)
// Unhandled operand. Halt "fast" selection and bail.
return false;
return true;
}
+bool FastISel::SelectCall(User *I) {
+ Function *F = cast<CallInst>(I)->getCalledFunction();
+ if (!F) return false;
+
+ unsigned IID = F->getIntrinsicID();
+ switch (IID) {
+ default: break;
+ case Intrinsic::dbg_stoppoint: {
+ DbgStopPointInst *SPI = cast<DbgStopPointInst>(I);
+ if (isValidDebugInfoIntrinsic(*SPI, CodeGenOpt::None))
+ setCurDebugLoc(ExtractDebugLocation(*SPI, MF.getDebugLocInfo()));
+ return true;
+ }
+ case Intrinsic::dbg_region_start: {
+ DbgRegionStartInst *RSI = cast<DbgRegionStartInst>(I);
+ if (isValidDebugInfoIntrinsic(*RSI, CodeGenOpt::None) && DW
+ && DW->ShouldEmitDwarfDebug()) {
+ unsigned ID =
+ DW->RecordRegionStart(cast<GlobalVariable>(RSI->getContext()));
+ const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
+ BuildMI(MBB, DL, II).addImm(ID);
+ }
+ return true;
+ }
+ case Intrinsic::dbg_region_end: {
+ DbgRegionEndInst *REI = cast<DbgRegionEndInst>(I);
+ if (isValidDebugInfoIntrinsic(*REI, CodeGenOpt::None) && DW
+ && DW->ShouldEmitDwarfDebug()) {
+ unsigned ID = 0;
+ DISubprogram Subprogram(cast<GlobalVariable>(REI->getContext()));
+ if (isInlinedFnEnd(*REI, MF.getFunction())) {
+ // This is end of an inlined function.
+ const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
+ ID = DW->RecordInlinedFnEnd(Subprogram);
+ if (ID)
+ // Returned ID is 0 if this is unbalanced "end of inlined
+ // scope". This could happen if optimizer eats dbg intrinsics
+ // or "beginning of inlined scope" is not recoginized due to
+ // missing location info. In such cases, ignore this region.end.
+ BuildMI(MBB, DL, II).addImm(ID);
+ } else {
+ const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
+ ID = DW->RecordRegionEnd(cast<GlobalVariable>(REI->getContext()));
+ BuildMI(MBB, DL, II).addImm(ID);
+ }
+ }
+ return true;
+ }
+ case Intrinsic::dbg_func_start: {
+ DbgFuncStartInst *FSI = cast<DbgFuncStartInst>(I);
+ if (!isValidDebugInfoIntrinsic(*FSI, CodeGenOpt::None) || !DW
+ || !DW->ShouldEmitDwarfDebug())
+ return true;
+
+ if (isInlinedFnStart(*FSI, MF.getFunction())) {
+ // This is a beginning of an inlined function.
+
+ // If llvm.dbg.func.start is seen in a new block before any
+ // llvm.dbg.stoppoint intrinsic then the location info is unknown.
+ // FIXME : Why DebugLoc is reset at the beginning of each block ?
+ DebugLoc PrevLoc = DL;
+ if (PrevLoc.isUnknown())
+ return true;
+ // Record the source line.
+ setCurDebugLoc(ExtractDebugLocation(*FSI, MF.getDebugLocInfo()));
+
+ DebugLocTuple PrevLocTpl = MF.getDebugLocTuple(PrevLoc);
+ DISubprogram SP(cast<GlobalVariable>(FSI->getSubprogram()));
+ unsigned LabelID = DW->RecordInlinedFnStart(SP,
+ DICompileUnit(PrevLocTpl.CompileUnit),
+ PrevLocTpl.Line,
+ PrevLocTpl.Col);
+ const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
+ BuildMI(MBB, DL, II).addImm(LabelID);
+ return true;
+ }
+
+ // This is a beginning of a new function.
+ MF.setDefaultDebugLoc(ExtractDebugLocation(*FSI, MF.getDebugLocInfo()));
+
+ // llvm.dbg.func_start also defines beginning of function scope.
+ DW->RecordRegionStart(cast<GlobalVariable>(FSI->getSubprogram()));
+ return true;
+ }
+ case Intrinsic::dbg_declare: {
+ DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
+ if (!isValidDebugInfoIntrinsic(*DI, CodeGenOpt::None) || !DW
+ || !DW->ShouldEmitDwarfDebug())
+ return true;
+
+ Value *Variable = DI->getVariable();
+ Value *Address = DI->getAddress();
+ if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
+ Address = BCI->getOperand(0);
+ AllocaInst *AI = dyn_cast<AllocaInst>(Address);
+ // Don't handle byval struct arguments or VLAs, for example.
+ if (!AI) break;
+ DenseMap<const AllocaInst*, int>::iterator SI =
+ StaticAllocaMap.find(AI);
+ if (SI == StaticAllocaMap.end()) break; // VLAs.
+ int FI = SI->second;
+
+ // Determine the debug globalvariable.
+ GlobalValue *GV = cast<GlobalVariable>(Variable);
+
+ // Build the DECLARE instruction.
+ const TargetInstrDesc &II = TII.get(TargetInstrInfo::DECLARE);
+ MachineInstr *DeclareMI
+ = BuildMI(MBB, DL, II).addFrameIndex(FI).addGlobalAddress(GV);
+ DIVariable DV(cast<GlobalVariable>(GV));
+ DW->RecordVariableScope(DV, DeclareMI);
+ return true;
+ }
+ case Intrinsic::eh_exception: {
+ MVT VT = TLI.getValueType(I->getType());
+ switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
+ default: break;
+ case TargetLowering::Expand: {
+ assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!");
+ unsigned Reg = TLI.getExceptionAddressRegister();
+ const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
+ unsigned ResultReg = createResultReg(RC);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ Reg, RC, RC);
+ assert(InsertedCopy && "Can't copy address registers!");
+ InsertedCopy = InsertedCopy;
+ UpdateValueMap(I, ResultReg);
+ return true;
+ }
+ }
+ break;
+ }
+ case Intrinsic::eh_selector_i32:
+ case Intrinsic::eh_selector_i64: {
+ MVT VT = TLI.getValueType(I->getType());
+ switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
+ default: break;
+ case TargetLowering::Expand: {
+ MVT VT = (IID == Intrinsic::eh_selector_i32 ?
+ MVT::i32 : MVT::i64);
+
+ if (MMI) {
+ if (MBB->isLandingPad())
+ AddCatchInfo(*cast<CallInst>(I), MMI, MBB);
+ else {
+#ifndef NDEBUG
+ CatchInfoLost.insert(cast<CallInst>(I));
+#endif
+ // FIXME: Mark exception selector register as live in. Hack for PR1508.
+ unsigned Reg = TLI.getExceptionSelectorRegister();
+ if (Reg) MBB->addLiveIn(Reg);
+ }
+
+ unsigned Reg = TLI.getExceptionSelectorRegister();
+ const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
+ unsigned ResultReg = createResultReg(RC);
+ bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
+ Reg, RC, RC);
+ assert(InsertedCopy && "Can't copy address registers!");
+ InsertedCopy = InsertedCopy;
+ UpdateValueMap(I, ResultReg);
+ } else {
+ unsigned ResultReg =
+ getRegForValue(I->getContext().getNullValue(I->getType()));
+ UpdateValueMap(I, ResultReg);
+ }
+ return true;
+ }
+ }
+ break;
+ }
+ }
+ return false;
+}
+
bool FastISel::SelectCast(User *I, ISD::NodeType Opcode) {
MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
MVT DstVT = TLI.getValueType(I->getType());
if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
- DstVT == MVT::Other || !DstVT.isSimple() ||
- !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
+ DstVT == MVT::Other || !DstVT.isSimple())
// Unhandled type. Halt "fast" selection and bail.
return false;
+ // Check if the destination type is legal. Or as a special case,
+ // it may be i1 if we're doing a truncate because that's
+ // easy and somewhat common.
+ if (!TLI.isTypeLegal(DstVT))
+ if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE)
+ // Unhandled type. Halt "fast" selection and bail.
+ return false;
+
+ // Check if the source operand is legal. Or as a special case,
+ // it may be i1 if we're doing zero-extension because that's
+ // easy and somewhat common.
+ if (!TLI.isTypeLegal(SrcVT))
+ if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND)
+ // Unhandled type. Halt "fast" selection and bail.
+ return false;
+
unsigned InputReg = getRegForValue(I->getOperand(0));
if (!InputReg)
// Unhandled operand. Halt "fast" selection and bail.
return false;
-
+
+ // If the operand is i1, arrange for the high bits in the register to be zero.
+ if (SrcVT == MVT::i1) {
+ SrcVT = TLI.getTypeToTransformTo(SrcVT);
+ InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg);
+ if (!InputReg)
+ return false;
+ }
+ // If the result is i1, truncate to the target's type for i1 first.
+ if (DstVT == MVT::i1)
+ DstVT = TLI.getTypeToTransformTo(DstVT);
+
unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
DstVT.getSimpleVT(),
Opcode,
return SelectOperator(I, I->getOpcode());
}
+/// FastEmitBranch - Emit an unconditional branch to the given block,
+/// unless it is the immediate (fall-through) successor, and update
+/// the CFG.
+void
+FastISel::FastEmitBranch(MachineBasicBlock *MSucc) {
+ MachineFunction::iterator NextMBB =
+ next(MachineFunction::iterator(MBB));
+
+ if (MBB->isLayoutSuccessor(MSucc)) {
+ // The unconditional fall-through case, which needs no instructions.
+ } else {
+ // The unconditional branch case.
+ TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
+ }
+ MBB->addSuccessor(MSucc);
+}
+
bool
FastISel::SelectOperator(User *I, unsigned Opcode) {
switch (Opcode) {
- case Instruction::Add: {
- ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FADD : ISD::ADD;
- return SelectBinaryOp(I, Opc);
- }
- case Instruction::Sub: {
- ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FSUB : ISD::SUB;
- return SelectBinaryOp(I, Opc);
- }
- case Instruction::Mul: {
- ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FMUL : ISD::MUL;
- return SelectBinaryOp(I, Opc);
- }
+ case Instruction::Add:
+ return SelectBinaryOp(I, ISD::ADD);
+ case Instruction::FAdd:
+ return SelectBinaryOp(I, ISD::FADD);
+ case Instruction::Sub:
+ return SelectBinaryOp(I, ISD::SUB);
+ case Instruction::FSub:
+ return SelectBinaryOp(I, ISD::FSUB);
+ case Instruction::Mul:
+ return SelectBinaryOp(I, ISD::MUL);
+ case Instruction::FMul:
+ return SelectBinaryOp(I, ISD::FMUL);
case Instruction::SDiv:
return SelectBinaryOp(I, ISD::SDIV);
case Instruction::UDiv:
BranchInst *BI = cast<BranchInst>(I);
if (BI->isUnconditional()) {
- MachineFunction::iterator NextMBB =
- next(MachineFunction::iterator(MBB));
BasicBlock *LLVMSucc = BI->getSuccessor(0);
MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
-
- if (NextMBB != MF.end() && MSucc == NextMBB) {
- // The unconditional fall-through case, which needs no instructions.
- } else {
- // The unconditional branch case.
- TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
- }
- MBB->addSuccessor(MSucc);
+ FastEmitBranch(MSucc);
return true;
}
case Instruction::PHI:
// PHI nodes are already emitted.
return true;
+
+ case Instruction::Alloca:
+ // FunctionLowering has the static-sized case covered.
+ if (StaticAllocaMap.count(cast<AllocaInst>(I)))
+ return true;
+
+ // Dynamic-sized alloca is not handled yet.
+ return false;
+ case Instruction::Call:
+ return SelectCall(I);
+
case Instruction::BitCast:
return SelectBitCast(I);
UpdateValueMap(I, Reg);
return true;
}
-
+
default:
// Unhandled instruction. Halt "fast" selection and bail.
return false;
}
FastISel::FastISel(MachineFunction &mf,
+ MachineModuleInfo *mmi,
+ DwarfWriter *dw,
DenseMap<const Value *, unsigned> &vm,
- DenseMap<const BasicBlock *, MachineBasicBlock *> &bm)
+ DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
+ DenseMap<const AllocaInst *, int> &am
+#ifndef NDEBUG
+ , SmallSet<Instruction*, 8> &cil
+#endif
+ )
: MBB(0),
ValueMap(vm),
MBBMap(bm),
+ StaticAllocaMap(am),
+#ifndef NDEBUG
+ CatchInfoLost(cil),
+#endif
MF(mf),
+ MMI(mmi),
+ DW(dw),
MRI(MF.getRegInfo()),
+ MFI(*MF.getFrameInfo()),
+ MCP(*MF.getConstantPool()),
TM(MF.getTarget()),
TD(*TM.getTargetData()),
TII(*TM.getInstrInfo()),
uint64_t x[2];
uint32_t IntBitWidth = IntVT.getSizeInBits();
- if (Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
- APFloat::rmTowardZero) != APFloat::opOK)
+ bool isExact;
+ (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
+ APFloat::rmTowardZero, &isExact);
+ if (!isExact)
return 0;
APInt IntVal(IntBitWidth, 2, x);
unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
- BuildMI(MBB, II, ResultReg);
+ BuildMI(MBB, DL, II, ResultReg);
return ResultReg;
}
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, II, ResultReg).addReg(Op0);
+ BuildMI(MBB, DL, II, ResultReg).addReg(Op0);
else {
- BuildMI(MBB, II).addReg(Op0);
+ BuildMI(MBB, DL, II).addReg(Op0);
bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
II.ImplicitDefs[0], RC, RC);
if (!InsertedCopy)
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1);
+ BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1);
else {
- BuildMI(MBB, II).addReg(Op0).addReg(Op1);
+ BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1);
bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
II.ImplicitDefs[0], RC, RC);
if (!InsertedCopy)
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Imm);
+ BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Imm);
else {
- BuildMI(MBB, II).addReg(Op0).addImm(Imm);
+ BuildMI(MBB, DL, II).addReg(Op0).addImm(Imm);
bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
II.ImplicitDefs[0], RC, RC);
if (!InsertedCopy)
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, II, ResultReg).addReg(Op0).addFPImm(FPImm);
+ BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addFPImm(FPImm);
else {
- BuildMI(MBB, II).addReg(Op0).addFPImm(FPImm);
+ BuildMI(MBB, DL, II).addReg(Op0).addFPImm(FPImm);
bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
II.ImplicitDefs[0], RC, RC);
if (!InsertedCopy)
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm);
+ BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm);
else {
- BuildMI(MBB, II).addReg(Op0).addReg(Op1).addImm(Imm);
+ BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1).addImm(Imm);
bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
II.ImplicitDefs[0], RC, RC);
if (!InsertedCopy)
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, II, ResultReg).addImm(Imm);
+ BuildMI(MBB, DL, II, ResultReg).addImm(Imm);
else {
- BuildMI(MBB, II).addImm(Imm);
+ BuildMI(MBB, DL, II).addImm(Imm);
bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
II.ImplicitDefs[0], RC, RC);
if (!InsertedCopy)
return ResultReg;
}
-unsigned FastISel::FastEmitInst_extractsubreg(unsigned Op0, uint32_t Idx) {
+unsigned FastISel::FastEmitInst_extractsubreg(MVT::SimpleValueType RetVT,
+ unsigned Op0, uint32_t Idx) {
const TargetRegisterClass* RC = MRI.getRegClass(Op0);
- const TargetRegisterClass* SRC = *(RC->subregclasses_begin()+Idx-1);
- unsigned ResultReg = createResultReg(SRC);
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
const TargetInstrDesc &II = TII.get(TargetInstrInfo::EXTRACT_SUBREG);
if (II.getNumDefs() >= 1)
- BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Idx);
+ BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Idx);
else {
- BuildMI(MBB, II).addReg(Op0).addImm(Idx);
+ BuildMI(MBB, DL, II).addReg(Op0).addImm(Idx);
bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
II.ImplicitDefs[0], RC, RC);
if (!InsertedCopy)
}
return ResultReg;
}
+
+/// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
+/// with all but the least significant bit set to zero.
+unsigned FastISel::FastEmitZExtFromI1(MVT::SimpleValueType VT, unsigned Op) {
+ return FastEmit_ri(VT, VT, ISD::AND, Op, 1);
+}