1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #define DEBUG_TYPE "isel"
43 #include "llvm/Function.h"
44 #include "llvm/GlobalVariable.h"
45 #include "llvm/Instructions.h"
46 #include "llvm/IntrinsicInst.h"
47 #include "llvm/Operator.h"
48 #include "llvm/CodeGen/Analysis.h"
49 #include "llvm/CodeGen/FastISel.h"
50 #include "llvm/CodeGen/FunctionLoweringInfo.h"
51 #include "llvm/CodeGen/MachineInstrBuilder.h"
52 #include "llvm/CodeGen/MachineModuleInfo.h"
53 #include "llvm/CodeGen/MachineRegisterInfo.h"
54 #include "llvm/Analysis/DebugInfo.h"
55 #include "llvm/Analysis/Loads.h"
56 #include "llvm/Target/TargetData.h"
57 #include "llvm/Target/TargetInstrInfo.h"
58 #include "llvm/Target/TargetLowering.h"
59 #include "llvm/Target/TargetMachine.h"
60 #include "llvm/Support/ErrorHandling.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/ADT/Statistic.h"
65 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
66 "target-independent selector");
67 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
68 "target-specific selector");
70 /// startNewBlock - Set the current block to which generated machine
71 /// instructions will be appended, and clear the local CSE map.
73 void FastISel::startNewBlock() {
74 LocalValueMap.clear();
78 // Advance the emit start point past any EH_LABEL instructions.
79 MachineBasicBlock::iterator
80 I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
81 while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
85 LastLocalValue = EmitStartPt;
88 void FastISel::flushLocalValueMap() {
89 LocalValueMap.clear();
90 LastLocalValue = EmitStartPt;
94 bool FastISel::hasTrivialKill(const Value *V) const {
95 // Don't consider constants or arguments to have trivial kills.
96 const Instruction *I = dyn_cast<Instruction>(V);
100 // No-op casts are trivially coalesced by fast-isel.
101 if (const CastInst *Cast = dyn_cast<CastInst>(I))
102 if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
103 !hasTrivialKill(Cast->getOperand(0)))
106 // GEPs with all zero indices are trivially coalesced by fast-isel.
107 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I))
108 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
111 // Only instructions with a single use in the same basic block are considered
112 // to have trivial kills.
113 return I->hasOneUse() &&
114 !(I->getOpcode() == Instruction::BitCast ||
115 I->getOpcode() == Instruction::PtrToInt ||
116 I->getOpcode() == Instruction::IntToPtr) &&
117 cast<Instruction>(*I->use_begin())->getParent() == I->getParent();
120 unsigned FastISel::getRegForValue(const Value *V) {
121 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
122 // Don't handle non-simple values in FastISel.
123 if (!RealVT.isSimple())
126 // Ignore illegal types. We must do this before looking up the value
127 // in ValueMap because Arguments are given virtual registers regardless
128 // of whether FastISel can handle them.
129 MVT VT = RealVT.getSimpleVT();
130 if (!TLI.isTypeLegal(VT)) {
131 // Handle integer promotions, though, because they're common and easy.
132 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
133 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
138 // Look up the value to see if we already have a register for it. We
139 // cache values defined by Instructions across blocks, and other values
140 // only locally. This is because Instructions already have the SSA
141 // def-dominates-use requirement enforced.
142 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
143 if (I != FuncInfo.ValueMap.end())
146 unsigned Reg = LocalValueMap[V];
150 // In bottom-up mode, just create the virtual register which will be used
151 // to hold the value. It will be materialized later.
152 if (isa<Instruction>(V) &&
153 (!isa<AllocaInst>(V) ||
154 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
155 return FuncInfo.InitializeRegForValue(V);
157 SavePoint SaveInsertPt = enterLocalValueArea();
159 // Materialize the value in a register. Emit any instructions in the
161 Reg = materializeRegForValue(V, VT);
163 leaveLocalValueArea(SaveInsertPt);
168 /// materializeRegForValue - Helper for getRegForValue. This function is
169 /// called when the value isn't already available in a register and must
170 /// be materialized with new instructions.
171 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
174 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
175 if (CI->getValue().getActiveBits() <= 64)
176 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
177 } else if (isa<AllocaInst>(V)) {
178 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
179 } else if (isa<ConstantPointerNull>(V)) {
180 // Translate this as an integer zero so that it can be
181 // local-CSE'd with actual integer zeros.
183 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
184 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
185 if (CF->isNullValue()) {
186 Reg = TargetMaterializeFloatZero(CF);
188 // Try to emit the constant directly.
189 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
193 // Try to emit the constant by using an integer constant with a cast.
194 const APFloat &Flt = CF->getValueAPF();
195 EVT IntVT = TLI.getPointerTy();
198 uint32_t IntBitWidth = IntVT.getSizeInBits();
200 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
201 APFloat::rmTowardZero, &isExact);
203 APInt IntVal(IntBitWidth, x);
205 unsigned IntegerReg =
206 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
208 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
209 IntegerReg, /*Kill=*/false);
212 } else if (const Operator *Op = dyn_cast<Operator>(V)) {
213 if (!SelectOperator(Op, Op->getOpcode()))
214 if (!isa<Instruction>(Op) ||
215 !TargetSelectInstruction(cast<Instruction>(Op)))
217 Reg = lookUpRegForValue(Op);
218 } else if (isa<UndefValue>(V)) {
219 Reg = createResultReg(TLI.getRegClassFor(VT));
220 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
221 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
224 // If target-independent code couldn't handle the value, give target-specific
226 if (!Reg && isa<Constant>(V))
227 Reg = TargetMaterializeConstant(cast<Constant>(V));
229 // Don't cache constant materializations in the general ValueMap.
230 // To do so would require tracking what uses they dominate.
232 LocalValueMap[V] = Reg;
233 LastLocalValue = MRI.getVRegDef(Reg);
238 unsigned FastISel::lookUpRegForValue(const Value *V) {
239 // Look up the value to see if we already have a register for it. We
240 // cache values defined by Instructions across blocks, and other values
241 // only locally. This is because Instructions already have the SSA
242 // def-dominates-use requirement enforced.
243 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
244 if (I != FuncInfo.ValueMap.end())
246 return LocalValueMap[V];
249 /// UpdateValueMap - Update the value map to include the new mapping for this
250 /// instruction, or insert an extra copy to get the result in a previous
251 /// determined register.
252 /// NOTE: This is only necessary because we might select a block that uses
253 /// a value before we select the block that defines the value. It might be
254 /// possible to fix this by selecting blocks in reverse postorder.
255 void FastISel::UpdateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
256 if (!isa<Instruction>(I)) {
257 LocalValueMap[I] = Reg;
261 unsigned &AssignedReg = FuncInfo.ValueMap[I];
262 if (AssignedReg == 0)
263 // Use the new register.
265 else if (Reg != AssignedReg) {
266 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
267 for (unsigned i = 0; i < NumRegs; i++)
268 FuncInfo.RegFixups[AssignedReg+i] = Reg+i;
274 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
275 unsigned IdxN = getRegForValue(Idx);
277 // Unhandled operand. Halt "fast" selection and bail.
278 return std::pair<unsigned, bool>(0, false);
280 bool IdxNIsKill = hasTrivialKill(Idx);
282 // If the index is smaller or larger than intptr_t, truncate or extend it.
283 MVT PtrVT = TLI.getPointerTy();
284 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
285 if (IdxVT.bitsLT(PtrVT)) {
286 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
290 else if (IdxVT.bitsGT(PtrVT)) {
291 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
295 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
298 void FastISel::recomputeInsertPt() {
299 if (getLastLocalValue()) {
300 FuncInfo.InsertPt = getLastLocalValue();
301 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
304 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
306 // Now skip past any EH_LABELs, which must remain at the beginning.
307 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
308 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
312 FastISel::SavePoint FastISel::enterLocalValueArea() {
313 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
317 SavePoint SP = { OldInsertPt, OldDL };
321 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
322 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
323 LastLocalValue = llvm::prior(FuncInfo.InsertPt);
325 // Restore the previous insert position.
326 FuncInfo.InsertPt = OldInsertPt.InsertPt;
330 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
331 /// which has an opcode which directly corresponds to the given ISD opcode.
333 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
334 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
335 if (VT == MVT::Other || !VT.isSimple())
336 // Unhandled type. Halt "fast" selection and bail.
339 // We only handle legal types. For example, on x86-32 the instruction
340 // selector contains all of the 64-bit instructions from x86-64,
341 // under the assumption that i64 won't be used if the target doesn't
343 if (!TLI.isTypeLegal(VT)) {
344 // MVT::i1 is special. Allow AND, OR, or XOR because they
345 // don't require additional zeroing, which makes them easy.
347 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
348 ISDOpcode == ISD::XOR))
349 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
354 // Check if the first operand is a constant, and handle it as "ri". At -O0,
355 // we don't have anything that canonicalizes operand order.
356 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
357 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
358 unsigned Op1 = getRegForValue(I->getOperand(1));
359 if (Op1 == 0) return false;
361 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
363 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1,
364 Op1IsKill, CI->getZExtValue(),
366 if (ResultReg == 0) return false;
368 // We successfully emitted code for the given LLVM Instruction.
369 UpdateValueMap(I, ResultReg);
374 unsigned Op0 = getRegForValue(I->getOperand(0));
375 if (Op0 == 0) // Unhandled operand. Halt "fast" selection and bail.
378 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
380 // Check if the second operand is a constant and handle it appropriately.
381 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
382 uint64_t Imm = CI->getZExtValue();
384 // Transform "sdiv exact X, 8" -> "sra X, 3".
385 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
386 cast<BinaryOperator>(I)->isExact() &&
387 isPowerOf2_64(Imm)) {
389 ISDOpcode = ISD::SRA;
392 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
393 Op0IsKill, Imm, VT.getSimpleVT());
394 if (ResultReg == 0) return false;
396 // We successfully emitted code for the given LLVM Instruction.
397 UpdateValueMap(I, ResultReg);
401 // Check if the second operand is a constant float.
402 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
403 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
404 ISDOpcode, Op0, Op0IsKill, CF);
405 if (ResultReg != 0) {
406 // We successfully emitted code for the given LLVM Instruction.
407 UpdateValueMap(I, ResultReg);
412 unsigned Op1 = getRegForValue(I->getOperand(1));
414 // Unhandled operand. Halt "fast" selection and bail.
417 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
419 // Now we have both operands in registers. Emit the instruction.
420 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
425 // Target-specific code wasn't able to find a machine opcode for
426 // the given ISD opcode and type. Halt "fast" selection and bail.
429 // We successfully emitted code for the given LLVM Instruction.
430 UpdateValueMap(I, ResultReg);
434 bool FastISel::SelectGetElementPtr(const User *I) {
435 unsigned N = getRegForValue(I->getOperand(0));
437 // Unhandled operand. Halt "fast" selection and bail.
440 bool NIsKill = hasTrivialKill(I->getOperand(0));
442 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
443 // into a single N = N + TotalOffset.
444 uint64_t TotalOffs = 0;
445 // FIXME: What's a good SWAG number for MaxOffs?
446 uint64_t MaxOffs = 2048;
447 Type *Ty = I->getOperand(0)->getType();
448 MVT VT = TLI.getPointerTy();
449 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
450 E = I->op_end(); OI != E; ++OI) {
451 const Value *Idx = *OI;
452 if (StructType *StTy = dyn_cast<StructType>(Ty)) {
453 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
456 TotalOffs += TD.getStructLayout(StTy)->getElementOffset(Field);
457 if (TotalOffs >= MaxOffs) {
458 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
460 // Unhandled operand. Halt "fast" selection and bail.
466 Ty = StTy->getElementType(Field);
468 Ty = cast<SequentialType>(Ty)->getElementType();
470 // If this is a constant subscript, handle it quickly.
471 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
472 if (CI->isZero()) continue;
475 TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
476 if (TotalOffs >= MaxOffs) {
477 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
479 // Unhandled operand. Halt "fast" selection and bail.
487 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
489 // Unhandled operand. Halt "fast" selection and bail.
495 // N = N + Idx * ElementSize;
496 uint64_t ElementSize = TD.getTypeAllocSize(Ty);
497 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
498 unsigned IdxN = Pair.first;
499 bool IdxNIsKill = Pair.second;
501 // Unhandled operand. Halt "fast" selection and bail.
504 if (ElementSize != 1) {
505 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
507 // Unhandled operand. Halt "fast" selection and bail.
511 N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
513 // Unhandled operand. Halt "fast" selection and bail.
518 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
520 // Unhandled operand. Halt "fast" selection and bail.
524 // We successfully emitted code for the given LLVM Instruction.
525 UpdateValueMap(I, N);
529 bool FastISel::SelectCall(const User *I) {
530 const CallInst *Call = cast<CallInst>(I);
532 // Handle simple inline asms.
533 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
534 // Don't attempt to handle constraints.
535 if (!IA->getConstraintString().empty())
538 unsigned ExtraInfo = 0;
539 if (IA->hasSideEffects())
540 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
541 if (IA->isAlignStack())
542 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
544 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
545 TII.get(TargetOpcode::INLINEASM))
546 .addExternalSymbol(IA->getAsmString().c_str())
551 const Function *F = Call->getCalledFunction();
552 if (!F) return false;
554 // Handle selected intrinsic function calls.
555 switch (F->getIntrinsicID()) {
557 case Intrinsic::dbg_declare: {
558 const DbgDeclareInst *DI = cast<DbgDeclareInst>(Call);
559 if (!DIVariable(DI->getVariable()).Verify() ||
560 !FuncInfo.MF->getMMI().hasDebugInfo())
563 const Value *Address = DI->getAddress();
564 if (!Address || isa<UndefValue>(Address) || isa<AllocaInst>(Address))
569 if (const Argument *Arg = dyn_cast<Argument>(Address)) {
570 // Some arguments' frame index is recorded during argument lowering.
571 Offset = FuncInfo.getArgumentFrameIndex(Arg);
573 Reg = TRI.getFrameRegister(*FuncInfo.MF);
576 Reg = getRegForValue(Address);
579 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
580 TII.get(TargetOpcode::DBG_VALUE))
581 .addReg(Reg, RegState::Debug).addImm(Offset)
582 .addMetadata(DI->getVariable());
585 case Intrinsic::dbg_value: {
586 // This form of DBG_VALUE is target-independent.
587 const DbgValueInst *DI = cast<DbgValueInst>(Call);
588 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
589 const Value *V = DI->getValue();
591 // Currently the optimizer can produce this; insert an undef to
592 // help debugging. Probably the optimizer should not do this.
593 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
594 .addReg(0U).addImm(DI->getOffset())
595 .addMetadata(DI->getVariable());
596 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
597 if (CI->getBitWidth() > 64)
598 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
599 .addCImm(CI).addImm(DI->getOffset())
600 .addMetadata(DI->getVariable());
602 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
603 .addImm(CI->getZExtValue()).addImm(DI->getOffset())
604 .addMetadata(DI->getVariable());
605 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
606 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
607 .addFPImm(CF).addImm(DI->getOffset())
608 .addMetadata(DI->getVariable());
609 } else if (unsigned Reg = lookUpRegForValue(V)) {
610 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
611 .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
612 .addMetadata(DI->getVariable());
614 // We can't yet handle anything else here because it would require
615 // generating code, thus altering codegen because of debug info.
616 DEBUG(dbgs() << "Dropping debug info for " << DI);
620 case Intrinsic::eh_exception: {
621 EVT VT = TLI.getValueType(Call->getType());
622 if (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)!=TargetLowering::Expand)
625 assert(FuncInfo.MBB->isLandingPad() &&
626 "Call to eh.exception not in landing pad!");
627 unsigned Reg = TLI.getExceptionAddressRegister();
628 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
629 unsigned ResultReg = createResultReg(RC);
630 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
631 ResultReg).addReg(Reg);
632 UpdateValueMap(Call, ResultReg);
635 case Intrinsic::eh_selector: {
636 EVT VT = TLI.getValueType(Call->getType());
637 if (TLI.getOperationAction(ISD::EHSELECTION, VT) != TargetLowering::Expand)
639 if (FuncInfo.MBB->isLandingPad())
640 AddCatchInfo(*Call, &FuncInfo.MF->getMMI(), FuncInfo.MBB);
643 FuncInfo.CatchInfoLost.insert(Call);
645 // FIXME: Mark exception selector register as live in. Hack for PR1508.
646 unsigned Reg = TLI.getExceptionSelectorRegister();
647 if (Reg) FuncInfo.MBB->addLiveIn(Reg);
650 unsigned Reg = TLI.getExceptionSelectorRegister();
651 EVT SrcVT = TLI.getPointerTy();
652 const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
653 unsigned ResultReg = createResultReg(RC);
654 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
655 ResultReg).addReg(Reg);
657 bool ResultRegIsKill = hasTrivialKill(Call);
659 // Cast the register to the type of the selector.
660 if (SrcVT.bitsGT(MVT::i32))
661 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
662 ResultReg, ResultRegIsKill);
663 else if (SrcVT.bitsLT(MVT::i32))
664 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
665 ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill);
667 // Unhandled operand. Halt "fast" selection and bail.
670 UpdateValueMap(Call, ResultReg);
674 case Intrinsic::objectsize: {
675 ConstantInt *CI = cast<ConstantInt>(Call->getArgOperand(1));
676 unsigned long long Res = CI->isZero() ? -1ULL : 0;
677 Constant *ResCI = ConstantInt::get(Call->getType(), Res);
678 unsigned ResultReg = getRegForValue(ResCI);
681 UpdateValueMap(Call, ResultReg);
686 // Usually, it does not make sense to initialize a value,
687 // make an unrelated function call and use the value, because
688 // it tends to be spilled on the stack. So, we move the pointer
689 // to the last local value to the beginning of the block, so that
690 // all the values which have already been materialized,
691 // appear after the call. It also makes sense to skip intrinsics
692 // since they tend to be inlined.
693 if (!isa<IntrinsicInst>(F))
694 flushLocalValueMap();
696 // An arbitrary call. Bail.
700 bool FastISel::SelectCast(const User *I, unsigned Opcode) {
701 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
702 EVT DstVT = TLI.getValueType(I->getType());
704 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
705 DstVT == MVT::Other || !DstVT.isSimple())
706 // Unhandled type. Halt "fast" selection and bail.
709 // Check if the destination type is legal.
710 if (!TLI.isTypeLegal(DstVT))
713 // Check if the source operand is legal.
714 if (!TLI.isTypeLegal(SrcVT))
717 unsigned InputReg = getRegForValue(I->getOperand(0));
719 // Unhandled operand. Halt "fast" selection and bail.
722 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
724 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
727 InputReg, InputRegIsKill);
731 UpdateValueMap(I, ResultReg);
735 bool FastISel::SelectBitCast(const User *I) {
736 // If the bitcast doesn't change the type, just use the operand value.
737 if (I->getType() == I->getOperand(0)->getType()) {
738 unsigned Reg = getRegForValue(I->getOperand(0));
741 UpdateValueMap(I, Reg);
745 // Bitcasts of other values become reg-reg copies or BITCAST operators.
746 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
747 EVT DstVT = TLI.getValueType(I->getType());
749 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
750 DstVT == MVT::Other || !DstVT.isSimple() ||
751 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
752 // Unhandled type. Halt "fast" selection and bail.
755 unsigned Op0 = getRegForValue(I->getOperand(0));
757 // Unhandled operand. Halt "fast" selection and bail.
760 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
762 // First, try to perform the bitcast by inserting a reg-reg copy.
763 unsigned ResultReg = 0;
764 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
765 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
766 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
767 // Don't attempt a cross-class copy. It will likely fail.
768 if (SrcClass == DstClass) {
769 ResultReg = createResultReg(DstClass);
770 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
771 ResultReg).addReg(Op0);
775 // If the reg-reg copy failed, select a BITCAST opcode.
777 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
778 ISD::BITCAST, Op0, Op0IsKill);
783 UpdateValueMap(I, ResultReg);
788 FastISel::SelectInstruction(const Instruction *I) {
789 // Just before the terminator instruction, insert instructions to
790 // feed PHI nodes in successor blocks.
791 if (isa<TerminatorInst>(I))
792 if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
795 DL = I->getDebugLoc();
797 // First, try doing target-independent selection.
798 if (SelectOperator(I, I->getOpcode())) {
799 ++NumFastIselSuccessIndependent;
804 // Next, try calling the target to attempt to handle the instruction.
805 if (TargetSelectInstruction(I)) {
806 ++NumFastIselSuccessTarget;
815 /// FastEmitBranch - Emit an unconditional branch to the given block,
816 /// unless it is the immediate (fall-through) successor, and update
819 FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
820 if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
821 // The unconditional fall-through case, which needs no instructions.
823 // The unconditional branch case.
824 TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
825 SmallVector<MachineOperand, 0>(), DL);
827 FuncInfo.MBB->addSuccessor(MSucc);
830 /// SelectFNeg - Emit an FNeg operation.
833 FastISel::SelectFNeg(const User *I) {
834 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
835 if (OpReg == 0) return false;
837 bool OpRegIsKill = hasTrivialKill(I);
839 // If the target has ISD::FNEG, use it.
840 EVT VT = TLI.getValueType(I->getType());
841 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
842 ISD::FNEG, OpReg, OpRegIsKill);
843 if (ResultReg != 0) {
844 UpdateValueMap(I, ResultReg);
848 // Bitcast the value to integer, twiddle the sign bit with xor,
849 // and then bitcast it back to floating-point.
850 if (VT.getSizeInBits() > 64) return false;
851 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
852 if (!TLI.isTypeLegal(IntVT))
855 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
856 ISD::BITCAST, OpReg, OpRegIsKill);
860 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
861 IntReg, /*Kill=*/true,
862 UINT64_C(1) << (VT.getSizeInBits()-1),
863 IntVT.getSimpleVT());
864 if (IntResultReg == 0)
867 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
868 ISD::BITCAST, IntResultReg, /*Kill=*/true);
872 UpdateValueMap(I, ResultReg);
877 FastISel::SelectExtractValue(const User *U) {
878 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
882 // Make sure we only try to handle extracts with a legal result. But also
883 // allow i1 because it's easy.
884 EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true);
885 if (!RealVT.isSimple())
887 MVT VT = RealVT.getSimpleVT();
888 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
891 const Value *Op0 = EVI->getOperand(0);
892 Type *AggTy = Op0->getType();
894 // Get the base result register.
896 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
897 if (I != FuncInfo.ValueMap.end())
898 ResultReg = I->second;
899 else if (isa<Instruction>(Op0))
900 ResultReg = FuncInfo.InitializeRegForValue(Op0);
902 return false; // fast-isel can't handle aggregate constants at the moment
904 // Get the actual result register, which is an offset from the base register.
905 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
907 SmallVector<EVT, 4> AggValueVTs;
908 ComputeValueVTs(TLI, AggTy, AggValueVTs);
910 for (unsigned i = 0; i < VTIndex; i++)
911 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
913 UpdateValueMap(EVI, ResultReg);
918 FastISel::SelectOperator(const User *I, unsigned Opcode) {
920 case Instruction::Add:
921 return SelectBinaryOp(I, ISD::ADD);
922 case Instruction::FAdd:
923 return SelectBinaryOp(I, ISD::FADD);
924 case Instruction::Sub:
925 return SelectBinaryOp(I, ISD::SUB);
926 case Instruction::FSub:
927 // FNeg is currently represented in LLVM IR as a special case of FSub.
928 if (BinaryOperator::isFNeg(I))
929 return SelectFNeg(I);
930 return SelectBinaryOp(I, ISD::FSUB);
931 case Instruction::Mul:
932 return SelectBinaryOp(I, ISD::MUL);
933 case Instruction::FMul:
934 return SelectBinaryOp(I, ISD::FMUL);
935 case Instruction::SDiv:
936 return SelectBinaryOp(I, ISD::SDIV);
937 case Instruction::UDiv:
938 return SelectBinaryOp(I, ISD::UDIV);
939 case Instruction::FDiv:
940 return SelectBinaryOp(I, ISD::FDIV);
941 case Instruction::SRem:
942 return SelectBinaryOp(I, ISD::SREM);
943 case Instruction::URem:
944 return SelectBinaryOp(I, ISD::UREM);
945 case Instruction::FRem:
946 return SelectBinaryOp(I, ISD::FREM);
947 case Instruction::Shl:
948 return SelectBinaryOp(I, ISD::SHL);
949 case Instruction::LShr:
950 return SelectBinaryOp(I, ISD::SRL);
951 case Instruction::AShr:
952 return SelectBinaryOp(I, ISD::SRA);
953 case Instruction::And:
954 return SelectBinaryOp(I, ISD::AND);
955 case Instruction::Or:
956 return SelectBinaryOp(I, ISD::OR);
957 case Instruction::Xor:
958 return SelectBinaryOp(I, ISD::XOR);
960 case Instruction::GetElementPtr:
961 return SelectGetElementPtr(I);
963 case Instruction::Br: {
964 const BranchInst *BI = cast<BranchInst>(I);
966 if (BI->isUnconditional()) {
967 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
968 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
969 FastEmitBranch(MSucc, BI->getDebugLoc());
973 // Conditional branches are not handed yet.
974 // Halt "fast" selection and bail.
978 case Instruction::Unreachable:
982 case Instruction::Alloca:
983 // FunctionLowering has the static-sized case covered.
984 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
987 // Dynamic-sized alloca is not handled yet.
990 case Instruction::Call:
991 return SelectCall(I);
993 case Instruction::BitCast:
994 return SelectBitCast(I);
996 case Instruction::FPToSI:
997 return SelectCast(I, ISD::FP_TO_SINT);
998 case Instruction::ZExt:
999 return SelectCast(I, ISD::ZERO_EXTEND);
1000 case Instruction::SExt:
1001 return SelectCast(I, ISD::SIGN_EXTEND);
1002 case Instruction::Trunc:
1003 return SelectCast(I, ISD::TRUNCATE);
1004 case Instruction::SIToFP:
1005 return SelectCast(I, ISD::SINT_TO_FP);
1007 case Instruction::IntToPtr: // Deliberate fall-through.
1008 case Instruction::PtrToInt: {
1009 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
1010 EVT DstVT = TLI.getValueType(I->getType());
1011 if (DstVT.bitsGT(SrcVT))
1012 return SelectCast(I, ISD::ZERO_EXTEND);
1013 if (DstVT.bitsLT(SrcVT))
1014 return SelectCast(I, ISD::TRUNCATE);
1015 unsigned Reg = getRegForValue(I->getOperand(0));
1016 if (Reg == 0) return false;
1017 UpdateValueMap(I, Reg);
1021 case Instruction::ExtractValue:
1022 return SelectExtractValue(I);
1024 case Instruction::PHI:
1025 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1028 // Unhandled instruction. Halt "fast" selection and bail.
1033 FastISel::FastISel(FunctionLoweringInfo &funcInfo)
1034 : FuncInfo(funcInfo),
1035 MRI(FuncInfo.MF->getRegInfo()),
1036 MFI(*FuncInfo.MF->getFrameInfo()),
1037 MCP(*FuncInfo.MF->getConstantPool()),
1038 TM(FuncInfo.MF->getTarget()),
1039 TD(*TM.getTargetData()),
1040 TII(*TM.getInstrInfo()),
1041 TLI(*TM.getTargetLowering()),
1042 TRI(*TM.getRegisterInfo()) {
1045 FastISel::~FastISel() {}
1047 unsigned FastISel::FastEmit_(MVT, MVT,
1052 unsigned FastISel::FastEmit_r(MVT, MVT,
1054 unsigned /*Op0*/, bool /*Op0IsKill*/) {
1058 unsigned FastISel::FastEmit_rr(MVT, MVT,
1060 unsigned /*Op0*/, bool /*Op0IsKill*/,
1061 unsigned /*Op1*/, bool /*Op1IsKill*/) {
1065 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1069 unsigned FastISel::FastEmit_f(MVT, MVT,
1070 unsigned, const ConstantFP * /*FPImm*/) {
1074 unsigned FastISel::FastEmit_ri(MVT, MVT,
1076 unsigned /*Op0*/, bool /*Op0IsKill*/,
1081 unsigned FastISel::FastEmit_rf(MVT, MVT,
1083 unsigned /*Op0*/, bool /*Op0IsKill*/,
1084 const ConstantFP * /*FPImm*/) {
1088 unsigned FastISel::FastEmit_rri(MVT, MVT,
1090 unsigned /*Op0*/, bool /*Op0IsKill*/,
1091 unsigned /*Op1*/, bool /*Op1IsKill*/,
1096 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
1097 /// to emit an instruction with an immediate operand using FastEmit_ri.
1098 /// If that fails, it materializes the immediate into a register and try
1099 /// FastEmit_rr instead.
1100 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
1101 unsigned Op0, bool Op0IsKill,
1102 uint64_t Imm, MVT ImmType) {
1103 // If this is a multiply by a power of two, emit this as a shift left.
1104 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1107 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1108 // div x, 8 -> srl x, 3
1113 // Horrible hack (to be removed), check to make sure shift amounts are
1115 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1116 Imm >= VT.getSizeInBits())
1119 // First check if immediate type is legal. If not, we can't use the ri form.
1120 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1123 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1124 if (MaterialReg == 0) {
1125 // This is a bit ugly/slow, but failing here means falling out of
1126 // fast-isel, which would be very slow.
1127 IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(),
1128 VT.getSizeInBits());
1129 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1131 return FastEmit_rr(VT, VT, Opcode,
1133 MaterialReg, /*Kill=*/true);
1136 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
1137 return MRI.createVirtualRegister(RC);
1140 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
1141 const TargetRegisterClass* RC) {
1142 unsigned ResultReg = createResultReg(RC);
1143 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1145 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
1149 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
1150 const TargetRegisterClass *RC,
1151 unsigned Op0, bool Op0IsKill) {
1152 unsigned ResultReg = createResultReg(RC);
1153 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1155 if (II.getNumDefs() >= 1)
1156 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1157 .addReg(Op0, Op0IsKill * RegState::Kill);
1159 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1160 .addReg(Op0, Op0IsKill * RegState::Kill);
1161 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1162 ResultReg).addReg(II.ImplicitDefs[0]);
1168 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
1169 const TargetRegisterClass *RC,
1170 unsigned Op0, bool Op0IsKill,
1171 unsigned Op1, bool Op1IsKill) {
1172 unsigned ResultReg = createResultReg(RC);
1173 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1175 if (II.getNumDefs() >= 1)
1176 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1177 .addReg(Op0, Op0IsKill * RegState::Kill)
1178 .addReg(Op1, Op1IsKill * RegState::Kill);
1180 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1181 .addReg(Op0, Op0IsKill * RegState::Kill)
1182 .addReg(Op1, Op1IsKill * RegState::Kill);
1183 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1184 ResultReg).addReg(II.ImplicitDefs[0]);
1189 unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
1190 const TargetRegisterClass *RC,
1191 unsigned Op0, bool Op0IsKill,
1192 unsigned Op1, bool Op1IsKill,
1193 unsigned Op2, bool Op2IsKill) {
1194 unsigned ResultReg = createResultReg(RC);
1195 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1197 if (II.getNumDefs() >= 1)
1198 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1199 .addReg(Op0, Op0IsKill * RegState::Kill)
1200 .addReg(Op1, Op1IsKill * RegState::Kill)
1201 .addReg(Op2, Op2IsKill * RegState::Kill);
1203 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1204 .addReg(Op0, Op0IsKill * RegState::Kill)
1205 .addReg(Op1, Op1IsKill * RegState::Kill)
1206 .addReg(Op2, Op2IsKill * RegState::Kill);
1207 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1208 ResultReg).addReg(II.ImplicitDefs[0]);
1213 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
1214 const TargetRegisterClass *RC,
1215 unsigned Op0, bool Op0IsKill,
1217 unsigned ResultReg = createResultReg(RC);
1218 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1220 if (II.getNumDefs() >= 1)
1221 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1222 .addReg(Op0, Op0IsKill * RegState::Kill)
1225 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1226 .addReg(Op0, Op0IsKill * RegState::Kill)
1228 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1229 ResultReg).addReg(II.ImplicitDefs[0]);
1234 unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
1235 const TargetRegisterClass *RC,
1236 unsigned Op0, bool Op0IsKill,
1237 uint64_t Imm1, uint64_t Imm2) {
1238 unsigned ResultReg = createResultReg(RC);
1239 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1241 if (II.getNumDefs() >= 1)
1242 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1243 .addReg(Op0, Op0IsKill * RegState::Kill)
1247 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1248 .addReg(Op0, Op0IsKill * RegState::Kill)
1251 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1252 ResultReg).addReg(II.ImplicitDefs[0]);
1257 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
1258 const TargetRegisterClass *RC,
1259 unsigned Op0, bool Op0IsKill,
1260 const ConstantFP *FPImm) {
1261 unsigned ResultReg = createResultReg(RC);
1262 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1264 if (II.getNumDefs() >= 1)
1265 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1266 .addReg(Op0, Op0IsKill * RegState::Kill)
1269 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1270 .addReg(Op0, Op0IsKill * RegState::Kill)
1272 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1273 ResultReg).addReg(II.ImplicitDefs[0]);
1278 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
1279 const TargetRegisterClass *RC,
1280 unsigned Op0, bool Op0IsKill,
1281 unsigned Op1, bool Op1IsKill,
1283 unsigned ResultReg = createResultReg(RC);
1284 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1286 if (II.getNumDefs() >= 1)
1287 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1288 .addReg(Op0, Op0IsKill * RegState::Kill)
1289 .addReg(Op1, Op1IsKill * RegState::Kill)
1292 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1293 .addReg(Op0, Op0IsKill * RegState::Kill)
1294 .addReg(Op1, Op1IsKill * RegState::Kill)
1296 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1297 ResultReg).addReg(II.ImplicitDefs[0]);
1302 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
1303 const TargetRegisterClass *RC,
1305 unsigned ResultReg = createResultReg(RC);
1306 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1308 if (II.getNumDefs() >= 1)
1309 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
1311 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
1312 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1313 ResultReg).addReg(II.ImplicitDefs[0]);
1318 unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
1319 const TargetRegisterClass *RC,
1320 uint64_t Imm1, uint64_t Imm2) {
1321 unsigned ResultReg = createResultReg(RC);
1322 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1324 if (II.getNumDefs() >= 1)
1325 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1326 .addImm(Imm1).addImm(Imm2);
1328 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm1).addImm(Imm2);
1329 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1330 ResultReg).addReg(II.ImplicitDefs[0]);
1335 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1336 unsigned Op0, bool Op0IsKill,
1338 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1339 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1340 "Cannot yet extract from physregs");
1341 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
1342 DL, TII.get(TargetOpcode::COPY), ResultReg)
1343 .addReg(Op0, getKillRegState(Op0IsKill), Idx);
1347 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1348 /// with all but the least significant bit set to zero.
1349 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1350 return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1353 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1354 /// Emit code to ensure constants are copied into registers when needed.
1355 /// Remember the virtual registers that need to be added to the Machine PHI
1356 /// nodes as input. We cannot just directly add them, because expansion
1357 /// might result in multiple MBB's for one BB. As such, the start of the
1358 /// BB might correspond to a different MBB than the end.
1359 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1360 const TerminatorInst *TI = LLVMBB->getTerminator();
1362 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1363 unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1365 // Check successor nodes' PHI nodes that expect a constant to be available
1367 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1368 const BasicBlock *SuccBB = TI->getSuccessor(succ);
1369 if (!isa<PHINode>(SuccBB->begin())) continue;
1370 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1372 // If this terminator has multiple identical successors (common for
1373 // switches), only handle each succ once.
1374 if (!SuccsHandled.insert(SuccMBB)) continue;
1376 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
1378 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1379 // nodes and Machine PHI nodes, but the incoming operands have not been
1381 for (BasicBlock::const_iterator I = SuccBB->begin();
1382 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1384 // Ignore dead phi's.
1385 if (PN->use_empty()) continue;
1387 // Only handle legal types. Two interesting things to note here. First,
1388 // by bailing out early, we may leave behind some dead instructions,
1389 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1390 // own moves. Second, this check is necessary because FastISel doesn't
1391 // use CreateRegs to create registers, so it always creates
1392 // exactly one register for each non-void instruction.
1393 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
1394 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
1397 VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
1399 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1404 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1406 // Set the DebugLoc for the copy. Prefer the location of the operand
1407 // if there is one; use the location of the PHI otherwise.
1408 DL = PN->getDebugLoc();
1409 if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
1410 DL = Inst->getDebugLoc();
1412 unsigned Reg = getRegForValue(PHIOp);
1414 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1417 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));