1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #define DEBUG_TYPE "isel"
43 #include "llvm/Function.h"
44 #include "llvm/GlobalVariable.h"
45 #include "llvm/Instructions.h"
46 #include "llvm/IntrinsicInst.h"
47 #include "llvm/Operator.h"
48 #include "llvm/CodeGen/Analysis.h"
49 #include "llvm/CodeGen/FastISel.h"
50 #include "llvm/CodeGen/FunctionLoweringInfo.h"
51 #include "llvm/CodeGen/MachineInstrBuilder.h"
52 #include "llvm/CodeGen/MachineModuleInfo.h"
53 #include "llvm/CodeGen/MachineRegisterInfo.h"
54 #include "llvm/Analysis/DebugInfo.h"
55 #include "llvm/Analysis/Loads.h"
56 #include "llvm/Target/TargetData.h"
57 #include "llvm/Target/TargetInstrInfo.h"
58 #include "llvm/Target/TargetLowering.h"
59 #include "llvm/Target/TargetMachine.h"
60 #include "llvm/Support/ErrorHandling.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/ADT/Statistic.h"
65 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
66 "target-independent selector");
67 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
68 "target-specific selector");
69 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
71 /// startNewBlock - Set the current block to which generated machine
72 /// instructions will be appended, and clear the local CSE map.
74 void FastISel::startNewBlock() {
75 LocalValueMap.clear();
79 // Advance the emit start point past any EH_LABEL instructions.
80 MachineBasicBlock::iterator
81 I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
82 while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
86 LastLocalValue = EmitStartPt;
89 void FastISel::flushLocalValueMap() {
90 LocalValueMap.clear();
91 LastLocalValue = EmitStartPt;
95 bool FastISel::hasTrivialKill(const Value *V) const {
96 // Don't consider constants or arguments to have trivial kills.
97 const Instruction *I = dyn_cast<Instruction>(V);
101 // No-op casts are trivially coalesced by fast-isel.
102 if (const CastInst *Cast = dyn_cast<CastInst>(I))
103 if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
104 !hasTrivialKill(Cast->getOperand(0)))
107 // GEPs with all zero indices are trivially coalesced by fast-isel.
108 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I))
109 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
112 // Only instructions with a single use in the same basic block are considered
113 // to have trivial kills.
114 return I->hasOneUse() &&
115 !(I->getOpcode() == Instruction::BitCast ||
116 I->getOpcode() == Instruction::PtrToInt ||
117 I->getOpcode() == Instruction::IntToPtr) &&
118 cast<Instruction>(*I->use_begin())->getParent() == I->getParent();
121 unsigned FastISel::getRegForValue(const Value *V) {
122 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
123 // Don't handle non-simple values in FastISel.
124 if (!RealVT.isSimple())
127 // Ignore illegal types. We must do this before looking up the value
128 // in ValueMap because Arguments are given virtual registers regardless
129 // of whether FastISel can handle them.
130 MVT VT = RealVT.getSimpleVT();
131 if (!TLI.isTypeLegal(VT)) {
132 // Handle integer promotions, though, because they're common and easy.
133 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
134 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
139 // Look up the value to see if we already have a register for it. We
140 // cache values defined by Instructions across blocks, and other values
141 // only locally. This is because Instructions already have the SSA
142 // def-dominates-use requirement enforced.
143 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
144 if (I != FuncInfo.ValueMap.end())
147 unsigned Reg = LocalValueMap[V];
151 // In bottom-up mode, just create the virtual register which will be used
152 // to hold the value. It will be materialized later.
153 if (isa<Instruction>(V) &&
154 (!isa<AllocaInst>(V) ||
155 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
156 return FuncInfo.InitializeRegForValue(V);
158 SavePoint SaveInsertPt = enterLocalValueArea();
160 // Materialize the value in a register. Emit any instructions in the
162 Reg = materializeRegForValue(V, VT);
164 leaveLocalValueArea(SaveInsertPt);
169 /// materializeRegForValue - Helper for getRegForValue. This function is
170 /// called when the value isn't already available in a register and must
171 /// be materialized with new instructions.
172 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
175 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
176 if (CI->getValue().getActiveBits() <= 64)
177 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
178 } else if (isa<AllocaInst>(V)) {
179 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
180 } else if (isa<ConstantPointerNull>(V)) {
181 // Translate this as an integer zero so that it can be
182 // local-CSE'd with actual integer zeros.
184 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
185 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
186 if (CF->isNullValue()) {
187 Reg = TargetMaterializeFloatZero(CF);
189 // Try to emit the constant directly.
190 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
194 // Try to emit the constant by using an integer constant with a cast.
195 const APFloat &Flt = CF->getValueAPF();
196 EVT IntVT = TLI.getPointerTy();
199 uint32_t IntBitWidth = IntVT.getSizeInBits();
201 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
202 APFloat::rmTowardZero, &isExact);
204 APInt IntVal(IntBitWidth, x);
206 unsigned IntegerReg =
207 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
209 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
210 IntegerReg, /*Kill=*/false);
213 } else if (const Operator *Op = dyn_cast<Operator>(V)) {
214 if (!SelectOperator(Op, Op->getOpcode()))
215 if (!isa<Instruction>(Op) ||
216 !TargetSelectInstruction(cast<Instruction>(Op)))
218 Reg = lookUpRegForValue(Op);
219 } else if (isa<UndefValue>(V)) {
220 Reg = createResultReg(TLI.getRegClassFor(VT));
221 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
222 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
225 // If target-independent code couldn't handle the value, give target-specific
227 if (!Reg && isa<Constant>(V))
228 Reg = TargetMaterializeConstant(cast<Constant>(V));
230 // Don't cache constant materializations in the general ValueMap.
231 // To do so would require tracking what uses they dominate.
233 LocalValueMap[V] = Reg;
234 LastLocalValue = MRI.getVRegDef(Reg);
239 unsigned FastISel::lookUpRegForValue(const Value *V) {
240 // Look up the value to see if we already have a register for it. We
241 // cache values defined by Instructions across blocks, and other values
242 // only locally. This is because Instructions already have the SSA
243 // def-dominates-use requirement enforced.
244 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
245 if (I != FuncInfo.ValueMap.end())
247 return LocalValueMap[V];
250 /// UpdateValueMap - Update the value map to include the new mapping for this
251 /// instruction, or insert an extra copy to get the result in a previous
252 /// determined register.
253 /// NOTE: This is only necessary because we might select a block that uses
254 /// a value before we select the block that defines the value. It might be
255 /// possible to fix this by selecting blocks in reverse postorder.
256 void FastISel::UpdateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
257 if (!isa<Instruction>(I)) {
258 LocalValueMap[I] = Reg;
262 unsigned &AssignedReg = FuncInfo.ValueMap[I];
263 if (AssignedReg == 0)
264 // Use the new register.
266 else if (Reg != AssignedReg) {
267 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
268 for (unsigned i = 0; i < NumRegs; i++)
269 FuncInfo.RegFixups[AssignedReg+i] = Reg+i;
275 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
276 unsigned IdxN = getRegForValue(Idx);
278 // Unhandled operand. Halt "fast" selection and bail.
279 return std::pair<unsigned, bool>(0, false);
281 bool IdxNIsKill = hasTrivialKill(Idx);
283 // If the index is smaller or larger than intptr_t, truncate or extend it.
284 MVT PtrVT = TLI.getPointerTy();
285 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
286 if (IdxVT.bitsLT(PtrVT)) {
287 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
291 else if (IdxVT.bitsGT(PtrVT)) {
292 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
296 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
299 void FastISel::recomputeInsertPt() {
300 if (getLastLocalValue()) {
301 FuncInfo.InsertPt = getLastLocalValue();
302 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
305 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
307 // Now skip past any EH_LABELs, which must remain at the beginning.
308 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
309 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
313 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
314 MachineBasicBlock::iterator E) {
315 assert (I && E && std::distance(I, E) > 0 && "Invalid iterator!");
317 MachineInstr *Dead = &*I;
319 Dead->eraseFromParent();
325 FastISel::SavePoint FastISel::enterLocalValueArea() {
326 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
330 SavePoint SP = { OldInsertPt, OldDL };
334 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
335 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
336 LastLocalValue = llvm::prior(FuncInfo.InsertPt);
338 // Restore the previous insert position.
339 FuncInfo.InsertPt = OldInsertPt.InsertPt;
343 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
344 /// which has an opcode which directly corresponds to the given ISD opcode.
346 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
347 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
348 if (VT == MVT::Other || !VT.isSimple())
349 // Unhandled type. Halt "fast" selection and bail.
352 // We only handle legal types. For example, on x86-32 the instruction
353 // selector contains all of the 64-bit instructions from x86-64,
354 // under the assumption that i64 won't be used if the target doesn't
356 if (!TLI.isTypeLegal(VT)) {
357 // MVT::i1 is special. Allow AND, OR, or XOR because they
358 // don't require additional zeroing, which makes them easy.
360 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
361 ISDOpcode == ISD::XOR))
362 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
367 // Check if the first operand is a constant, and handle it as "ri". At -O0,
368 // we don't have anything that canonicalizes operand order.
369 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
370 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
371 unsigned Op1 = getRegForValue(I->getOperand(1));
372 if (Op1 == 0) return false;
374 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
376 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1,
377 Op1IsKill, CI->getZExtValue(),
379 if (ResultReg == 0) return false;
381 // We successfully emitted code for the given LLVM Instruction.
382 UpdateValueMap(I, ResultReg);
387 unsigned Op0 = getRegForValue(I->getOperand(0));
388 if (Op0 == 0) // Unhandled operand. Halt "fast" selection and bail.
391 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
393 // Check if the second operand is a constant and handle it appropriately.
394 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
395 uint64_t Imm = CI->getZExtValue();
397 // Transform "sdiv exact X, 8" -> "sra X, 3".
398 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
399 cast<BinaryOperator>(I)->isExact() &&
400 isPowerOf2_64(Imm)) {
402 ISDOpcode = ISD::SRA;
405 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
406 Op0IsKill, Imm, VT.getSimpleVT());
407 if (ResultReg == 0) return false;
409 // We successfully emitted code for the given LLVM Instruction.
410 UpdateValueMap(I, ResultReg);
414 // Check if the second operand is a constant float.
415 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
416 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
417 ISDOpcode, Op0, Op0IsKill, CF);
418 if (ResultReg != 0) {
419 // We successfully emitted code for the given LLVM Instruction.
420 UpdateValueMap(I, ResultReg);
425 unsigned Op1 = getRegForValue(I->getOperand(1));
427 // Unhandled operand. Halt "fast" selection and bail.
430 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
432 // Now we have both operands in registers. Emit the instruction.
433 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
438 // Target-specific code wasn't able to find a machine opcode for
439 // the given ISD opcode and type. Halt "fast" selection and bail.
442 // We successfully emitted code for the given LLVM Instruction.
443 UpdateValueMap(I, ResultReg);
447 bool FastISel::SelectGetElementPtr(const User *I) {
448 unsigned N = getRegForValue(I->getOperand(0));
450 // Unhandled operand. Halt "fast" selection and bail.
453 bool NIsKill = hasTrivialKill(I->getOperand(0));
455 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
456 // into a single N = N + TotalOffset.
457 uint64_t TotalOffs = 0;
458 // FIXME: What's a good SWAG number for MaxOffs?
459 uint64_t MaxOffs = 2048;
460 Type *Ty = I->getOperand(0)->getType();
461 MVT VT = TLI.getPointerTy();
462 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
463 E = I->op_end(); OI != E; ++OI) {
464 const Value *Idx = *OI;
465 if (StructType *StTy = dyn_cast<StructType>(Ty)) {
466 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
469 TotalOffs += TD.getStructLayout(StTy)->getElementOffset(Field);
470 if (TotalOffs >= MaxOffs) {
471 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
473 // Unhandled operand. Halt "fast" selection and bail.
479 Ty = StTy->getElementType(Field);
481 Ty = cast<SequentialType>(Ty)->getElementType();
483 // If this is a constant subscript, handle it quickly.
484 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
485 if (CI->isZero()) continue;
488 TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
489 if (TotalOffs >= MaxOffs) {
490 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
492 // Unhandled operand. Halt "fast" selection and bail.
500 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
502 // Unhandled operand. Halt "fast" selection and bail.
508 // N = N + Idx * ElementSize;
509 uint64_t ElementSize = TD.getTypeAllocSize(Ty);
510 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
511 unsigned IdxN = Pair.first;
512 bool IdxNIsKill = Pair.second;
514 // Unhandled operand. Halt "fast" selection and bail.
517 if (ElementSize != 1) {
518 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
520 // Unhandled operand. Halt "fast" selection and bail.
524 N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
526 // Unhandled operand. Halt "fast" selection and bail.
531 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
533 // Unhandled operand. Halt "fast" selection and bail.
537 // We successfully emitted code for the given LLVM Instruction.
538 UpdateValueMap(I, N);
542 bool FastISel::SelectCall(const User *I) {
543 const CallInst *Call = cast<CallInst>(I);
545 // Handle simple inline asms.
546 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
547 // Don't attempt to handle constraints.
548 if (!IA->getConstraintString().empty())
551 unsigned ExtraInfo = 0;
552 if (IA->hasSideEffects())
553 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
554 if (IA->isAlignStack())
555 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
557 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
558 TII.get(TargetOpcode::INLINEASM))
559 .addExternalSymbol(IA->getAsmString().c_str())
564 const Function *F = Call->getCalledFunction();
565 if (!F) return false;
567 // Handle selected intrinsic function calls.
568 switch (F->getIntrinsicID()) {
570 case Intrinsic::dbg_declare: {
571 const DbgDeclareInst *DI = cast<DbgDeclareInst>(Call);
572 if (!DIVariable(DI->getVariable()).Verify() ||
573 !FuncInfo.MF->getMMI().hasDebugInfo())
576 const Value *Address = DI->getAddress();
577 if (!Address || isa<UndefValue>(Address) || isa<AllocaInst>(Address))
582 if (const Argument *Arg = dyn_cast<Argument>(Address)) {
583 // Some arguments' frame index is recorded during argument lowering.
584 Offset = FuncInfo.getArgumentFrameIndex(Arg);
586 Reg = TRI.getFrameRegister(*FuncInfo.MF);
589 Reg = getRegForValue(Address);
592 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
593 TII.get(TargetOpcode::DBG_VALUE))
594 .addReg(Reg, RegState::Debug).addImm(Offset)
595 .addMetadata(DI->getVariable());
598 case Intrinsic::dbg_value: {
599 // This form of DBG_VALUE is target-independent.
600 const DbgValueInst *DI = cast<DbgValueInst>(Call);
601 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
602 const Value *V = DI->getValue();
604 // Currently the optimizer can produce this; insert an undef to
605 // help debugging. Probably the optimizer should not do this.
606 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
607 .addReg(0U).addImm(DI->getOffset())
608 .addMetadata(DI->getVariable());
609 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
610 if (CI->getBitWidth() > 64)
611 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
612 .addCImm(CI).addImm(DI->getOffset())
613 .addMetadata(DI->getVariable());
615 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
616 .addImm(CI->getZExtValue()).addImm(DI->getOffset())
617 .addMetadata(DI->getVariable());
618 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
619 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
620 .addFPImm(CF).addImm(DI->getOffset())
621 .addMetadata(DI->getVariable());
622 } else if (unsigned Reg = lookUpRegForValue(V)) {
623 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
624 .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
625 .addMetadata(DI->getVariable());
627 // We can't yet handle anything else here because it would require
628 // generating code, thus altering codegen because of debug info.
629 DEBUG(dbgs() << "Dropping debug info for " << DI);
633 case Intrinsic::eh_exception: {
634 EVT VT = TLI.getValueType(Call->getType());
635 if (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)!=TargetLowering::Expand)
638 assert(FuncInfo.MBB->isLandingPad() &&
639 "Call to eh.exception not in landing pad!");
640 unsigned Reg = TLI.getExceptionAddressRegister();
641 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
642 unsigned ResultReg = createResultReg(RC);
643 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
644 ResultReg).addReg(Reg);
645 UpdateValueMap(Call, ResultReg);
648 case Intrinsic::eh_selector: {
649 EVT VT = TLI.getValueType(Call->getType());
650 if (TLI.getOperationAction(ISD::EHSELECTION, VT) != TargetLowering::Expand)
652 if (FuncInfo.MBB->isLandingPad())
653 AddCatchInfo(*Call, &FuncInfo.MF->getMMI(), FuncInfo.MBB);
656 FuncInfo.CatchInfoLost.insert(Call);
658 // FIXME: Mark exception selector register as live in. Hack for PR1508.
659 unsigned Reg = TLI.getExceptionSelectorRegister();
660 if (Reg) FuncInfo.MBB->addLiveIn(Reg);
663 unsigned Reg = TLI.getExceptionSelectorRegister();
664 EVT SrcVT = TLI.getPointerTy();
665 const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
666 unsigned ResultReg = createResultReg(RC);
667 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
668 ResultReg).addReg(Reg);
670 bool ResultRegIsKill = hasTrivialKill(Call);
672 // Cast the register to the type of the selector.
673 if (SrcVT.bitsGT(MVT::i32))
674 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
675 ResultReg, ResultRegIsKill);
676 else if (SrcVT.bitsLT(MVT::i32))
677 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
678 ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill);
680 // Unhandled operand. Halt "fast" selection and bail.
683 UpdateValueMap(Call, ResultReg);
687 case Intrinsic::objectsize: {
688 ConstantInt *CI = cast<ConstantInt>(Call->getArgOperand(1));
689 unsigned long long Res = CI->isZero() ? -1ULL : 0;
690 Constant *ResCI = ConstantInt::get(Call->getType(), Res);
691 unsigned ResultReg = getRegForValue(ResCI);
694 UpdateValueMap(Call, ResultReg);
699 // Usually, it does not make sense to initialize a value,
700 // make an unrelated function call and use the value, because
701 // it tends to be spilled on the stack. So, we move the pointer
702 // to the last local value to the beginning of the block, so that
703 // all the values which have already been materialized,
704 // appear after the call. It also makes sense to skip intrinsics
705 // since they tend to be inlined.
706 if (!isa<IntrinsicInst>(F))
707 flushLocalValueMap();
709 // An arbitrary call. Bail.
713 bool FastISel::SelectCast(const User *I, unsigned Opcode) {
714 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
715 EVT DstVT = TLI.getValueType(I->getType());
717 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
718 DstVT == MVT::Other || !DstVT.isSimple())
719 // Unhandled type. Halt "fast" selection and bail.
722 // Check if the destination type is legal.
723 if (!TLI.isTypeLegal(DstVT))
726 // Check if the source operand is legal.
727 if (!TLI.isTypeLegal(SrcVT))
730 unsigned InputReg = getRegForValue(I->getOperand(0));
732 // Unhandled operand. Halt "fast" selection and bail.
735 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
737 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
740 InputReg, InputRegIsKill);
744 UpdateValueMap(I, ResultReg);
748 bool FastISel::SelectBitCast(const User *I) {
749 // If the bitcast doesn't change the type, just use the operand value.
750 if (I->getType() == I->getOperand(0)->getType()) {
751 unsigned Reg = getRegForValue(I->getOperand(0));
754 UpdateValueMap(I, Reg);
758 // Bitcasts of other values become reg-reg copies or BITCAST operators.
759 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
760 EVT DstVT = TLI.getValueType(I->getType());
762 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
763 DstVT == MVT::Other || !DstVT.isSimple() ||
764 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
765 // Unhandled type. Halt "fast" selection and bail.
768 unsigned Op0 = getRegForValue(I->getOperand(0));
770 // Unhandled operand. Halt "fast" selection and bail.
773 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
775 // First, try to perform the bitcast by inserting a reg-reg copy.
776 unsigned ResultReg = 0;
777 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
778 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
779 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
780 // Don't attempt a cross-class copy. It will likely fail.
781 if (SrcClass == DstClass) {
782 ResultReg = createResultReg(DstClass);
783 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
784 ResultReg).addReg(Op0);
788 // If the reg-reg copy failed, select a BITCAST opcode.
790 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
791 ISD::BITCAST, Op0, Op0IsKill);
796 UpdateValueMap(I, ResultReg);
801 FastISel::SelectInstruction(const Instruction *I) {
802 // Just before the terminator instruction, insert instructions to
803 // feed PHI nodes in successor blocks.
804 if (isa<TerminatorInst>(I))
805 if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
808 DL = I->getDebugLoc();
810 MachineBasicBlock::iterator SavedInsertPt = FuncInfo.InsertPt;
812 // First, try doing target-independent selection.
813 if (SelectOperator(I, I->getOpcode())) {
814 ++NumFastIselSuccessIndependent;
818 // Remove dead code. However, ignore call instructions since we've flushed
819 // the local value map and recomputed the insert point.
820 if (!isa<CallInst>(I)) {
822 if (SavedInsertPt != FuncInfo.InsertPt)
823 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
826 // Next, try calling the target to attempt to handle the instruction.
827 SavedInsertPt = FuncInfo.InsertPt;
828 if (TargetSelectInstruction(I)) {
829 ++NumFastIselSuccessTarget;
833 // Check for dead code and remove as necessary.
835 if (SavedInsertPt != FuncInfo.InsertPt)
836 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
842 /// FastEmitBranch - Emit an unconditional branch to the given block,
843 /// unless it is the immediate (fall-through) successor, and update
846 FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
847 if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
848 // The unconditional fall-through case, which needs no instructions.
850 // The unconditional branch case.
851 TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
852 SmallVector<MachineOperand, 0>(), DL);
854 FuncInfo.MBB->addSuccessor(MSucc);
857 /// SelectFNeg - Emit an FNeg operation.
860 FastISel::SelectFNeg(const User *I) {
861 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
862 if (OpReg == 0) return false;
864 bool OpRegIsKill = hasTrivialKill(I);
866 // If the target has ISD::FNEG, use it.
867 EVT VT = TLI.getValueType(I->getType());
868 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
869 ISD::FNEG, OpReg, OpRegIsKill);
870 if (ResultReg != 0) {
871 UpdateValueMap(I, ResultReg);
875 // Bitcast the value to integer, twiddle the sign bit with xor,
876 // and then bitcast it back to floating-point.
877 if (VT.getSizeInBits() > 64) return false;
878 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
879 if (!TLI.isTypeLegal(IntVT))
882 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
883 ISD::BITCAST, OpReg, OpRegIsKill);
887 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
888 IntReg, /*Kill=*/true,
889 UINT64_C(1) << (VT.getSizeInBits()-1),
890 IntVT.getSimpleVT());
891 if (IntResultReg == 0)
894 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
895 ISD::BITCAST, IntResultReg, /*Kill=*/true);
899 UpdateValueMap(I, ResultReg);
904 FastISel::SelectExtractValue(const User *U) {
905 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
909 // Make sure we only try to handle extracts with a legal result. But also
910 // allow i1 because it's easy.
911 EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true);
912 if (!RealVT.isSimple())
914 MVT VT = RealVT.getSimpleVT();
915 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
918 const Value *Op0 = EVI->getOperand(0);
919 Type *AggTy = Op0->getType();
921 // Get the base result register.
923 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
924 if (I != FuncInfo.ValueMap.end())
925 ResultReg = I->second;
926 else if (isa<Instruction>(Op0))
927 ResultReg = FuncInfo.InitializeRegForValue(Op0);
929 return false; // fast-isel can't handle aggregate constants at the moment
931 // Get the actual result register, which is an offset from the base register.
932 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
934 SmallVector<EVT, 4> AggValueVTs;
935 ComputeValueVTs(TLI, AggTy, AggValueVTs);
937 for (unsigned i = 0; i < VTIndex; i++)
938 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
940 UpdateValueMap(EVI, ResultReg);
945 FastISel::SelectInsertValue(const User *U) {
946 const InsertValueInst *IVI = dyn_cast<InsertValueInst>(U);
950 // Only try to handle inserts of legal types. But also allow i16/i8/i1 because
952 const Value *Val = IVI->getOperand(1);
953 Type *ValTy = Val->getType();
954 EVT ValVT = TLI.getValueType(ValTy, /*AllowUnknown=*/true);
955 if (!ValVT.isSimple())
957 MVT VT = ValVT.getSimpleVT();
958 if (!TLI.isTypeLegal(VT) && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
961 // Get the Val register.
962 unsigned ValReg = getRegForValue(Val);
963 if (ValReg == 0) return false;
965 const Value *Agg = IVI->getOperand(0);
966 Type *AggTy = Agg->getType();
968 // TODO: Is there a better way to do this? For each insertvalue we allocate
969 // a new set of virtual registers, which results in a large number of
970 // loads/stores from/to the stack that copies the aggregate all over the place
971 // and results in lots of spill code. I believe this is necessary to preserve
972 // SSA form, but maybe there's something we coul do to improve this.
974 // Get the Aggregate base register.
976 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Agg);
977 if (I != FuncInfo.ValueMap.end())
978 AggBaseReg = I->second;
979 else if (isa<Instruction>(Agg))
980 AggBaseReg = FuncInfo.InitializeRegForValue(Agg);
981 else if (isa<UndefValue>(Agg))
982 // In this case we don't need to allocate a new set of register that will
983 // never be defined. Just copy Val into the proper result registers.
986 return false; // fast-isel can't handle aggregate constants at the moment
988 // Create result register(s).
989 unsigned ResultBaseReg = FuncInfo.CreateRegs(AggTy);
991 // Get the actual result register, which is an offset from the base register.
992 unsigned LinearIndex = ComputeLinearIndex(Agg->getType(), IVI->getIndices());
994 SmallVector<EVT, 4> AggValueVTs;
995 ComputeValueVTs(TLI, AggTy, AggValueVTs);
997 // Copy the beginning value(s) from the original aggregate.
1000 unsigned BaseRegOff = 0;
1002 for (; i != LinearIndex; ++i) {
1003 unsigned NRE = TLI.getNumRegisters(FuncInfo.Fn->getContext(),
1005 for (unsigned NRI = 0; NRI != NRE; NRI++) {
1007 SrcReg = AggBaseReg + BaseRegOff + NRI;
1008 DestReg = ResultBaseReg + BaseRegOff + NRI;
1009 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1010 DestReg).addReg(SrcReg);
1016 // FIXME: Handle aggregate inserts. Haven't seen these in practice, but..
1017 // Copy value(s) from the inserted value(s).
1018 DestReg = ResultBaseReg + BaseRegOff;
1019 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1020 DestReg).addReg(ValReg);
1024 // Copy remaining value(s) from the original aggregate.
1026 for (unsigned NumAggValues = AggValueVTs.size(); i != NumAggValues; ++i) {
1027 unsigned NRE = TLI.getNumRegisters(FuncInfo.Fn->getContext(),
1029 for (unsigned NRI = 0; NRI != NRE; NRI++) {
1030 SrcReg = AggBaseReg + BaseRegOff + NRI;
1031 DestReg = ResultBaseReg + BaseRegOff + NRI;
1032 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1033 DestReg).addReg(SrcReg);
1039 UpdateValueMap(IVI, ResultBaseReg);
1044 FastISel::SelectOperator(const User *I, unsigned Opcode) {
1046 case Instruction::Add:
1047 return SelectBinaryOp(I, ISD::ADD);
1048 case Instruction::FAdd:
1049 return SelectBinaryOp(I, ISD::FADD);
1050 case Instruction::Sub:
1051 return SelectBinaryOp(I, ISD::SUB);
1052 case Instruction::FSub:
1053 // FNeg is currently represented in LLVM IR as a special case of FSub.
1054 if (BinaryOperator::isFNeg(I))
1055 return SelectFNeg(I);
1056 return SelectBinaryOp(I, ISD::FSUB);
1057 case Instruction::Mul:
1058 return SelectBinaryOp(I, ISD::MUL);
1059 case Instruction::FMul:
1060 return SelectBinaryOp(I, ISD::FMUL);
1061 case Instruction::SDiv:
1062 return SelectBinaryOp(I, ISD::SDIV);
1063 case Instruction::UDiv:
1064 return SelectBinaryOp(I, ISD::UDIV);
1065 case Instruction::FDiv:
1066 return SelectBinaryOp(I, ISD::FDIV);
1067 case Instruction::SRem:
1068 return SelectBinaryOp(I, ISD::SREM);
1069 case Instruction::URem:
1070 return SelectBinaryOp(I, ISD::UREM);
1071 case Instruction::FRem:
1072 return SelectBinaryOp(I, ISD::FREM);
1073 case Instruction::Shl:
1074 return SelectBinaryOp(I, ISD::SHL);
1075 case Instruction::LShr:
1076 return SelectBinaryOp(I, ISD::SRL);
1077 case Instruction::AShr:
1078 return SelectBinaryOp(I, ISD::SRA);
1079 case Instruction::And:
1080 return SelectBinaryOp(I, ISD::AND);
1081 case Instruction::Or:
1082 return SelectBinaryOp(I, ISD::OR);
1083 case Instruction::Xor:
1084 return SelectBinaryOp(I, ISD::XOR);
1086 case Instruction::GetElementPtr:
1087 return SelectGetElementPtr(I);
1089 case Instruction::Br: {
1090 const BranchInst *BI = cast<BranchInst>(I);
1092 if (BI->isUnconditional()) {
1093 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1094 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1095 FastEmitBranch(MSucc, BI->getDebugLoc());
1099 // Conditional branches are not handed yet.
1100 // Halt "fast" selection and bail.
1104 case Instruction::Unreachable:
1108 case Instruction::Alloca:
1109 // FunctionLowering has the static-sized case covered.
1110 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1113 // Dynamic-sized alloca is not handled yet.
1116 case Instruction::Call:
1117 return SelectCall(I);
1119 case Instruction::BitCast:
1120 return SelectBitCast(I);
1122 case Instruction::FPToSI:
1123 return SelectCast(I, ISD::FP_TO_SINT);
1124 case Instruction::ZExt:
1125 return SelectCast(I, ISD::ZERO_EXTEND);
1126 case Instruction::SExt:
1127 return SelectCast(I, ISD::SIGN_EXTEND);
1128 case Instruction::Trunc:
1129 return SelectCast(I, ISD::TRUNCATE);
1130 case Instruction::SIToFP:
1131 return SelectCast(I, ISD::SINT_TO_FP);
1133 case Instruction::IntToPtr: // Deliberate fall-through.
1134 case Instruction::PtrToInt: {
1135 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
1136 EVT DstVT = TLI.getValueType(I->getType());
1137 if (DstVT.bitsGT(SrcVT))
1138 return SelectCast(I, ISD::ZERO_EXTEND);
1139 if (DstVT.bitsLT(SrcVT))
1140 return SelectCast(I, ISD::TRUNCATE);
1141 unsigned Reg = getRegForValue(I->getOperand(0));
1142 if (Reg == 0) return false;
1143 UpdateValueMap(I, Reg);
1147 case Instruction::ExtractValue:
1148 return SelectExtractValue(I);
1150 case Instruction::InsertValue:
1151 return SelectInsertValue(I);
1153 case Instruction::PHI:
1154 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1157 // Unhandled instruction. Halt "fast" selection and bail.
1162 FastISel::FastISel(FunctionLoweringInfo &funcInfo)
1163 : FuncInfo(funcInfo),
1164 MRI(FuncInfo.MF->getRegInfo()),
1165 MFI(*FuncInfo.MF->getFrameInfo()),
1166 MCP(*FuncInfo.MF->getConstantPool()),
1167 TM(FuncInfo.MF->getTarget()),
1168 TD(*TM.getTargetData()),
1169 TII(*TM.getInstrInfo()),
1170 TLI(*TM.getTargetLowering()),
1171 TRI(*TM.getRegisterInfo()) {
1174 FastISel::~FastISel() {}
1176 unsigned FastISel::FastEmit_(MVT, MVT,
1181 unsigned FastISel::FastEmit_r(MVT, MVT,
1183 unsigned /*Op0*/, bool /*Op0IsKill*/) {
1187 unsigned FastISel::FastEmit_rr(MVT, MVT,
1189 unsigned /*Op0*/, bool /*Op0IsKill*/,
1190 unsigned /*Op1*/, bool /*Op1IsKill*/) {
1194 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1198 unsigned FastISel::FastEmit_f(MVT, MVT,
1199 unsigned, const ConstantFP * /*FPImm*/) {
1203 unsigned FastISel::FastEmit_ri(MVT, MVT,
1205 unsigned /*Op0*/, bool /*Op0IsKill*/,
1210 unsigned FastISel::FastEmit_rf(MVT, MVT,
1212 unsigned /*Op0*/, bool /*Op0IsKill*/,
1213 const ConstantFP * /*FPImm*/) {
1217 unsigned FastISel::FastEmit_rri(MVT, MVT,
1219 unsigned /*Op0*/, bool /*Op0IsKill*/,
1220 unsigned /*Op1*/, bool /*Op1IsKill*/,
1225 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
1226 /// to emit an instruction with an immediate operand using FastEmit_ri.
1227 /// If that fails, it materializes the immediate into a register and try
1228 /// FastEmit_rr instead.
1229 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
1230 unsigned Op0, bool Op0IsKill,
1231 uint64_t Imm, MVT ImmType) {
1232 // If this is a multiply by a power of two, emit this as a shift left.
1233 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1236 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1237 // div x, 8 -> srl x, 3
1242 // Horrible hack (to be removed), check to make sure shift amounts are
1244 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1245 Imm >= VT.getSizeInBits())
1248 // First check if immediate type is legal. If not, we can't use the ri form.
1249 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1252 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1253 if (MaterialReg == 0) {
1254 // This is a bit ugly/slow, but failing here means falling out of
1255 // fast-isel, which would be very slow.
1256 IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(),
1257 VT.getSizeInBits());
1258 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1260 return FastEmit_rr(VT, VT, Opcode,
1262 MaterialReg, /*Kill=*/true);
1265 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
1266 return MRI.createVirtualRegister(RC);
1269 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
1270 const TargetRegisterClass* RC) {
1271 unsigned ResultReg = createResultReg(RC);
1272 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1274 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
1278 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
1279 const TargetRegisterClass *RC,
1280 unsigned Op0, bool Op0IsKill) {
1281 unsigned ResultReg = createResultReg(RC);
1282 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1284 if (II.getNumDefs() >= 1)
1285 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1286 .addReg(Op0, Op0IsKill * RegState::Kill);
1288 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1289 .addReg(Op0, Op0IsKill * RegState::Kill);
1290 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1291 ResultReg).addReg(II.ImplicitDefs[0]);
1297 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
1298 const TargetRegisterClass *RC,
1299 unsigned Op0, bool Op0IsKill,
1300 unsigned Op1, bool Op1IsKill) {
1301 unsigned ResultReg = createResultReg(RC);
1302 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1304 if (II.getNumDefs() >= 1)
1305 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1306 .addReg(Op0, Op0IsKill * RegState::Kill)
1307 .addReg(Op1, Op1IsKill * RegState::Kill);
1309 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1310 .addReg(Op0, Op0IsKill * RegState::Kill)
1311 .addReg(Op1, Op1IsKill * RegState::Kill);
1312 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1313 ResultReg).addReg(II.ImplicitDefs[0]);
1318 unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
1319 const TargetRegisterClass *RC,
1320 unsigned Op0, bool Op0IsKill,
1321 unsigned Op1, bool Op1IsKill,
1322 unsigned Op2, bool Op2IsKill) {
1323 unsigned ResultReg = createResultReg(RC);
1324 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1326 if (II.getNumDefs() >= 1)
1327 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1328 .addReg(Op0, Op0IsKill * RegState::Kill)
1329 .addReg(Op1, Op1IsKill * RegState::Kill)
1330 .addReg(Op2, Op2IsKill * RegState::Kill);
1332 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1333 .addReg(Op0, Op0IsKill * RegState::Kill)
1334 .addReg(Op1, Op1IsKill * RegState::Kill)
1335 .addReg(Op2, Op2IsKill * RegState::Kill);
1336 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1337 ResultReg).addReg(II.ImplicitDefs[0]);
1342 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
1343 const TargetRegisterClass *RC,
1344 unsigned Op0, bool Op0IsKill,
1346 unsigned ResultReg = createResultReg(RC);
1347 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1349 if (II.getNumDefs() >= 1)
1350 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1351 .addReg(Op0, Op0IsKill * RegState::Kill)
1354 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1355 .addReg(Op0, Op0IsKill * RegState::Kill)
1357 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1358 ResultReg).addReg(II.ImplicitDefs[0]);
1363 unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
1364 const TargetRegisterClass *RC,
1365 unsigned Op0, bool Op0IsKill,
1366 uint64_t Imm1, uint64_t Imm2) {
1367 unsigned ResultReg = createResultReg(RC);
1368 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1370 if (II.getNumDefs() >= 1)
1371 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1372 .addReg(Op0, Op0IsKill * RegState::Kill)
1376 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1377 .addReg(Op0, Op0IsKill * RegState::Kill)
1380 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1381 ResultReg).addReg(II.ImplicitDefs[0]);
1386 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
1387 const TargetRegisterClass *RC,
1388 unsigned Op0, bool Op0IsKill,
1389 const ConstantFP *FPImm) {
1390 unsigned ResultReg = createResultReg(RC);
1391 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1393 if (II.getNumDefs() >= 1)
1394 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1395 .addReg(Op0, Op0IsKill * RegState::Kill)
1398 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1399 .addReg(Op0, Op0IsKill * RegState::Kill)
1401 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1402 ResultReg).addReg(II.ImplicitDefs[0]);
1407 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
1408 const TargetRegisterClass *RC,
1409 unsigned Op0, bool Op0IsKill,
1410 unsigned Op1, bool Op1IsKill,
1412 unsigned ResultReg = createResultReg(RC);
1413 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1415 if (II.getNumDefs() >= 1)
1416 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1417 .addReg(Op0, Op0IsKill * RegState::Kill)
1418 .addReg(Op1, Op1IsKill * RegState::Kill)
1421 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1422 .addReg(Op0, Op0IsKill * RegState::Kill)
1423 .addReg(Op1, Op1IsKill * RegState::Kill)
1425 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1426 ResultReg).addReg(II.ImplicitDefs[0]);
1431 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
1432 const TargetRegisterClass *RC,
1434 unsigned ResultReg = createResultReg(RC);
1435 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1437 if (II.getNumDefs() >= 1)
1438 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
1440 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
1441 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1442 ResultReg).addReg(II.ImplicitDefs[0]);
1447 unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
1448 const TargetRegisterClass *RC,
1449 uint64_t Imm1, uint64_t Imm2) {
1450 unsigned ResultReg = createResultReg(RC);
1451 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1453 if (II.getNumDefs() >= 1)
1454 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1455 .addImm(Imm1).addImm(Imm2);
1457 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm1).addImm(Imm2);
1458 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1459 ResultReg).addReg(II.ImplicitDefs[0]);
1464 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1465 unsigned Op0, bool Op0IsKill,
1467 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1468 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1469 "Cannot yet extract from physregs");
1470 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
1471 DL, TII.get(TargetOpcode::COPY), ResultReg)
1472 .addReg(Op0, getKillRegState(Op0IsKill), Idx);
1476 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1477 /// with all but the least significant bit set to zero.
1478 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1479 return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1482 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1483 /// Emit code to ensure constants are copied into registers when needed.
1484 /// Remember the virtual registers that need to be added to the Machine PHI
1485 /// nodes as input. We cannot just directly add them, because expansion
1486 /// might result in multiple MBB's for one BB. As such, the start of the
1487 /// BB might correspond to a different MBB than the end.
1488 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1489 const TerminatorInst *TI = LLVMBB->getTerminator();
1491 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1492 unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1494 // Check successor nodes' PHI nodes that expect a constant to be available
1496 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1497 const BasicBlock *SuccBB = TI->getSuccessor(succ);
1498 if (!isa<PHINode>(SuccBB->begin())) continue;
1499 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1501 // If this terminator has multiple identical successors (common for
1502 // switches), only handle each succ once.
1503 if (!SuccsHandled.insert(SuccMBB)) continue;
1505 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
1507 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1508 // nodes and Machine PHI nodes, but the incoming operands have not been
1510 for (BasicBlock::const_iterator I = SuccBB->begin();
1511 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1513 // Ignore dead phi's.
1514 if (PN->use_empty()) continue;
1516 // Only handle legal types. Two interesting things to note here. First,
1517 // by bailing out early, we may leave behind some dead instructions,
1518 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1519 // own moves. Second, this check is necessary because FastISel doesn't
1520 // use CreateRegs to create registers, so it always creates
1521 // exactly one register for each non-void instruction.
1522 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
1523 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
1526 VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
1528 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1533 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1535 // Set the DebugLoc for the copy. Prefer the location of the operand
1536 // if there is one; use the location of the PHI otherwise.
1537 DL = PN->getDebugLoc();
1538 if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
1539 DL = Inst->getDebugLoc();
1541 unsigned Reg = getRegForValue(PHIOp);
1543 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1546 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));