1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/Function.h"
43 #include "llvm/GlobalVariable.h"
44 #include "llvm/Instructions.h"
45 #include "llvm/IntrinsicInst.h"
46 #include "llvm/Operator.h"
47 #include "llvm/CodeGen/FastISel.h"
48 #include "llvm/CodeGen/FunctionLoweringInfo.h"
49 #include "llvm/CodeGen/MachineInstrBuilder.h"
50 #include "llvm/CodeGen/MachineModuleInfo.h"
51 #include "llvm/CodeGen/MachineRegisterInfo.h"
52 #include "llvm/Analysis/DebugInfo.h"
53 #include "llvm/Analysis/Loads.h"
54 #include "llvm/Target/TargetData.h"
55 #include "llvm/Target/TargetInstrInfo.h"
56 #include "llvm/Target/TargetLowering.h"
57 #include "llvm/Target/TargetMachine.h"
58 #include "llvm/Support/ErrorHandling.h"
59 #include "llvm/Support/Debug.h"
62 /// startNewBlock - Set the current block to which generated machine
63 /// instructions will be appended, and clear the local CSE map.
65 void FastISel::startNewBlock() {
66 LocalValueMap.clear();
68 // Start out as null, meaining no local-value instructions have
72 // Advance the last local value past any EH_LABEL instructions.
73 MachineBasicBlock::iterator
74 I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
75 while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
81 bool FastISel::hasTrivialKill(const Value *V) const {
82 // Don't consider constants or arguments to have trivial kills.
83 const Instruction *I = dyn_cast<Instruction>(V);
87 // No-op casts are trivially coalesced by fast-isel.
88 if (const CastInst *Cast = dyn_cast<CastInst>(I))
89 if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
90 !hasTrivialKill(Cast->getOperand(0)))
93 // Only instructions with a single use in the same basic block are considered
94 // to have trivial kills.
95 return I->hasOneUse() &&
96 !(I->getOpcode() == Instruction::BitCast ||
97 I->getOpcode() == Instruction::PtrToInt ||
98 I->getOpcode() == Instruction::IntToPtr) &&
99 cast<Instruction>(*I->use_begin())->getParent() == I->getParent();
102 unsigned FastISel::getRegForValue(const Value *V) {
103 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
104 // Don't handle non-simple values in FastISel.
105 if (!RealVT.isSimple())
108 // Ignore illegal types. We must do this before looking up the value
109 // in ValueMap because Arguments are given virtual registers regardless
110 // of whether FastISel can handle them.
111 MVT VT = RealVT.getSimpleVT();
112 if (!TLI.isTypeLegal(VT)) {
113 // Promote MVT::i1 to a legal type though, because it's common and easy.
115 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
120 // Look up the value to see if we already have a register for it. We
121 // cache values defined by Instructions across blocks, and other values
122 // only locally. This is because Instructions already have the SSA
123 // def-dominates-use requirement enforced.
124 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
125 if (I != FuncInfo.ValueMap.end())
128 unsigned Reg = LocalValueMap[V];
132 // In bottom-up mode, just create the virtual register which will be used
133 // to hold the value. It will be materialized later.
134 if (isa<Instruction>(V) &&
135 (!isa<AllocaInst>(V) ||
136 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
137 return FuncInfo.InitializeRegForValue(V);
139 SavePoint SaveInsertPt = enterLocalValueArea();
141 // Materialize the value in a register. Emit any instructions in the
143 Reg = materializeRegForValue(V, VT);
145 leaveLocalValueArea(SaveInsertPt);
150 /// materializeRegForValue - Helper for getRegForValue. This function is
151 /// called when the value isn't already available in a register and must
152 /// be materialized with new instructions.
153 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
156 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
157 if (CI->getValue().getActiveBits() <= 64)
158 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
159 } else if (isa<AllocaInst>(V)) {
160 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
161 } else if (isa<ConstantPointerNull>(V)) {
162 // Translate this as an integer zero so that it can be
163 // local-CSE'd with actual integer zeros.
165 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
166 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
167 if (CF->isNullValue()) {
168 Reg = TargetMaterializeFloatZero(CF);
170 // Try to emit the constant directly.
171 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
175 // Try to emit the constant by using an integer constant with a cast.
176 const APFloat &Flt = CF->getValueAPF();
177 EVT IntVT = TLI.getPointerTy();
180 uint32_t IntBitWidth = IntVT.getSizeInBits();
182 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
183 APFloat::rmTowardZero, &isExact);
185 APInt IntVal(IntBitWidth, 2, x);
187 unsigned IntegerReg =
188 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
190 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
191 IntegerReg, /*Kill=*/false);
194 } else if (const Operator *Op = dyn_cast<Operator>(V)) {
195 if (!SelectOperator(Op, Op->getOpcode()))
196 if (!isa<Instruction>(Op) ||
197 !TargetSelectInstruction(cast<Instruction>(Op)))
199 Reg = lookUpRegForValue(Op);
200 } else if (isa<UndefValue>(V)) {
201 Reg = createResultReg(TLI.getRegClassFor(VT));
202 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
203 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
206 // If target-independent code couldn't handle the value, give target-specific
208 if (!Reg && isa<Constant>(V))
209 Reg = TargetMaterializeConstant(cast<Constant>(V));
211 // Don't cache constant materializations in the general ValueMap.
212 // To do so would require tracking what uses they dominate.
214 LocalValueMap[V] = Reg;
215 LastLocalValue = MRI.getVRegDef(Reg);
220 unsigned FastISel::lookUpRegForValue(const Value *V) {
221 // Look up the value to see if we already have a register for it. We
222 // cache values defined by Instructions across blocks, and other values
223 // only locally. This is because Instructions already have the SSA
224 // def-dominates-use requirement enforced.
225 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
226 if (I != FuncInfo.ValueMap.end())
228 return LocalValueMap[V];
231 /// UpdateValueMap - Update the value map to include the new mapping for this
232 /// instruction, or insert an extra copy to get the result in a previous
233 /// determined register.
234 /// NOTE: This is only necessary because we might select a block that uses
235 /// a value before we select the block that defines the value. It might be
236 /// possible to fix this by selecting blocks in reverse postorder.
237 unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
238 if (!isa<Instruction>(I)) {
239 LocalValueMap[I] = Reg;
243 unsigned &AssignedReg = FuncInfo.ValueMap[I];
244 if (AssignedReg == 0)
245 // Use the new register.
247 else if (Reg != AssignedReg) {
248 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
249 FuncInfo.RegFixups[AssignedReg] = Reg;
257 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
258 unsigned IdxN = getRegForValue(Idx);
260 // Unhandled operand. Halt "fast" selection and bail.
261 return std::pair<unsigned, bool>(0, false);
263 bool IdxNIsKill = hasTrivialKill(Idx);
265 // If the index is smaller or larger than intptr_t, truncate or extend it.
266 MVT PtrVT = TLI.getPointerTy();
267 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
268 if (IdxVT.bitsLT(PtrVT)) {
269 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
273 else if (IdxVT.bitsGT(PtrVT)) {
274 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
278 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
281 void FastISel::recomputeInsertPt() {
282 if (getLastLocalValue()) {
283 FuncInfo.InsertPt = getLastLocalValue();
284 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
287 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
289 // Now skip past any EH_LABELs, which must remain at the beginning.
290 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
291 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
295 FastISel::SavePoint FastISel::enterLocalValueArea() {
296 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
300 SavePoint SP = { OldInsertPt, OldDL };
304 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
305 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
306 LastLocalValue = llvm::prior(FuncInfo.InsertPt);
308 // Restore the previous insert position.
309 FuncInfo.InsertPt = OldInsertPt.InsertPt;
313 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
314 /// which has an opcode which directly corresponds to the given ISD opcode.
316 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
317 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
318 if (VT == MVT::Other || !VT.isSimple())
319 // Unhandled type. Halt "fast" selection and bail.
322 // We only handle legal types. For example, on x86-32 the instruction
323 // selector contains all of the 64-bit instructions from x86-64,
324 // under the assumption that i64 won't be used if the target doesn't
326 if (!TLI.isTypeLegal(VT)) {
327 // MVT::i1 is special. Allow AND, OR, or XOR because they
328 // don't require additional zeroing, which makes them easy.
330 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
331 ISDOpcode == ISD::XOR))
332 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
337 // Check if the first operand is a constant, and handle it as "ri". At -O0,
338 // we don't have anything that canonicalizes operand order.
339 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
340 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
341 unsigned Op1 = getRegForValue(I->getOperand(1));
342 if (Op1 == 0) return false;
344 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
346 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1,
347 Op1IsKill, CI->getZExtValue(),
349 if (ResultReg == 0) return false;
351 // We successfully emitted code for the given LLVM Instruction.
352 UpdateValueMap(I, ResultReg);
357 unsigned Op0 = getRegForValue(I->getOperand(0));
358 if (Op0 == 0) // Unhandled operand. Halt "fast" selection and bail.
361 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
363 // Check if the second operand is a constant and handle it appropriately.
364 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
365 uint64_t Imm = CI->getZExtValue();
367 // Transform "sdiv exact X, 8" -> "sra X, 3".
368 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
369 cast<BinaryOperator>(I)->isExact() &&
370 isPowerOf2_64(Imm)) {
372 ISDOpcode = ISD::SRA;
375 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
376 Op0IsKill, Imm, VT.getSimpleVT());
377 if (ResultReg == 0) return false;
379 // We successfully emitted code for the given LLVM Instruction.
380 UpdateValueMap(I, ResultReg);
384 // Check if the second operand is a constant float.
385 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
386 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
387 ISDOpcode, Op0, Op0IsKill, CF);
388 if (ResultReg != 0) {
389 // We successfully emitted code for the given LLVM Instruction.
390 UpdateValueMap(I, ResultReg);
395 unsigned Op1 = getRegForValue(I->getOperand(1));
397 // Unhandled operand. Halt "fast" selection and bail.
400 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
402 // Now we have both operands in registers. Emit the instruction.
403 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
408 // Target-specific code wasn't able to find a machine opcode for
409 // the given ISD opcode and type. Halt "fast" selection and bail.
412 // We successfully emitted code for the given LLVM Instruction.
413 UpdateValueMap(I, ResultReg);
417 bool FastISel::SelectGetElementPtr(const User *I) {
418 unsigned N = getRegForValue(I->getOperand(0));
420 // Unhandled operand. Halt "fast" selection and bail.
423 bool NIsKill = hasTrivialKill(I->getOperand(0));
425 const Type *Ty = I->getOperand(0)->getType();
426 MVT VT = TLI.getPointerTy();
427 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
428 E = I->op_end(); OI != E; ++OI) {
429 const Value *Idx = *OI;
430 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
431 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
434 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
435 // FIXME: This can be optimized by combining the add with a
437 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
439 // Unhandled operand. Halt "fast" selection and bail.
443 Ty = StTy->getElementType(Field);
445 Ty = cast<SequentialType>(Ty)->getElementType();
447 // If this is a constant subscript, handle it quickly.
448 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
449 if (CI->isZero()) continue;
451 TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
452 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
454 // Unhandled operand. Halt "fast" selection and bail.
460 // N = N + Idx * ElementSize;
461 uint64_t ElementSize = TD.getTypeAllocSize(Ty);
462 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
463 unsigned IdxN = Pair.first;
464 bool IdxNIsKill = Pair.second;
466 // Unhandled operand. Halt "fast" selection and bail.
469 if (ElementSize != 1) {
470 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
472 // Unhandled operand. Halt "fast" selection and bail.
476 N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
478 // Unhandled operand. Halt "fast" selection and bail.
483 // We successfully emitted code for the given LLVM Instruction.
484 UpdateValueMap(I, N);
488 bool FastISel::SelectCall(const User *I) {
489 const CallInst *Call = cast<CallInst>(I);
491 // Handle simple inline asms.
492 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getArgOperand(0))) {
493 // Don't attempt to handle constraints.
494 if (!IA->getConstraintString().empty())
497 unsigned ExtraInfo = 0;
498 if (IA->hasSideEffects())
499 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
500 if (IA->isAlignStack())
501 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
503 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
504 TII.get(TargetOpcode::INLINEASM))
505 .addExternalSymbol(IA->getAsmString().c_str())
510 const Function *F = Call->getCalledFunction();
511 if (!F) return false;
513 // Handle selected intrinsic function calls.
514 switch (F->getIntrinsicID()) {
516 case Intrinsic::dbg_declare: {
517 const DbgDeclareInst *DI = cast<DbgDeclareInst>(Call);
518 if (!DIVariable(DI->getVariable()).Verify() ||
519 !FuncInfo.MF->getMMI().hasDebugInfo())
522 const Value *Address = DI->getAddress();
523 if (!Address || isa<UndefValue>(Address) || isa<AllocaInst>(Address))
528 if (const Argument *Arg = dyn_cast<Argument>(Address)) {
529 if (Arg->hasByValAttr()) {
530 // Byval arguments' frame index is recorded during argument lowering.
531 // Use this info directly.
532 Offset = FuncInfo.getByValArgumentFrameIndex(Arg);
534 Reg = TRI.getFrameRegister(*FuncInfo.MF);
538 Reg = getRegForValue(Address);
541 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
542 TII.get(TargetOpcode::DBG_VALUE))
543 .addReg(Reg, RegState::Debug).addImm(Offset)
544 .addMetadata(DI->getVariable());
547 case Intrinsic::dbg_value: {
548 // This form of DBG_VALUE is target-independent.
549 const DbgValueInst *DI = cast<DbgValueInst>(Call);
550 const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
551 const Value *V = DI->getValue();
553 // Currently the optimizer can produce this; insert an undef to
554 // help debugging. Probably the optimizer should not do this.
555 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
556 .addReg(0U).addImm(DI->getOffset())
557 .addMetadata(DI->getVariable());
558 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
559 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
560 .addImm(CI->getZExtValue()).addImm(DI->getOffset())
561 .addMetadata(DI->getVariable());
562 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
563 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
564 .addFPImm(CF).addImm(DI->getOffset())
565 .addMetadata(DI->getVariable());
566 } else if (unsigned Reg = lookUpRegForValue(V)) {
567 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
568 .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
569 .addMetadata(DI->getVariable());
571 // We can't yet handle anything else here because it would require
572 // generating code, thus altering codegen because of debug info.
573 DEBUG(dbgs() << "Dropping debug info for " << DI);
577 case Intrinsic::eh_exception: {
578 EVT VT = TLI.getValueType(Call->getType());
579 if (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)!=TargetLowering::Expand)
582 assert(FuncInfo.MBB->isLandingPad() &&
583 "Call to eh.exception not in landing pad!");
584 unsigned Reg = TLI.getExceptionAddressRegister();
585 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
586 unsigned ResultReg = createResultReg(RC);
587 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
588 ResultReg).addReg(Reg);
589 UpdateValueMap(Call, ResultReg);
592 case Intrinsic::eh_selector: {
593 EVT VT = TLI.getValueType(Call->getType());
594 if (TLI.getOperationAction(ISD::EHSELECTION, VT) != TargetLowering::Expand)
596 if (FuncInfo.MBB->isLandingPad())
597 AddCatchInfo(*Call, &FuncInfo.MF->getMMI(), FuncInfo.MBB);
600 FuncInfo.CatchInfoLost.insert(Call);
602 // FIXME: Mark exception selector register as live in. Hack for PR1508.
603 unsigned Reg = TLI.getExceptionSelectorRegister();
604 if (Reg) FuncInfo.MBB->addLiveIn(Reg);
607 unsigned Reg = TLI.getExceptionSelectorRegister();
608 EVT SrcVT = TLI.getPointerTy();
609 const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
610 unsigned ResultReg = createResultReg(RC);
611 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
612 ResultReg).addReg(Reg);
614 bool ResultRegIsKill = hasTrivialKill(Call);
616 // Cast the register to the type of the selector.
617 if (SrcVT.bitsGT(MVT::i32))
618 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
619 ResultReg, ResultRegIsKill);
620 else if (SrcVT.bitsLT(MVT::i32))
621 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
622 ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill);
624 // Unhandled operand. Halt "fast" selection and bail.
627 UpdateValueMap(Call, ResultReg);
633 // An arbitrary call. Bail.
637 bool FastISel::SelectCast(const User *I, unsigned Opcode) {
638 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
639 EVT DstVT = TLI.getValueType(I->getType());
641 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
642 DstVT == MVT::Other || !DstVT.isSimple())
643 // Unhandled type. Halt "fast" selection and bail.
646 // Check if the destination type is legal. Or as a special case,
647 // it may be i1 if we're doing a truncate because that's
648 // easy and somewhat common.
649 if (!TLI.isTypeLegal(DstVT))
650 if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE)
651 // Unhandled type. Halt "fast" selection and bail.
654 // Check if the source operand is legal. Or as a special case,
655 // it may be i1 if we're doing zero-extension because that's
656 // easy and somewhat common.
657 if (!TLI.isTypeLegal(SrcVT))
658 if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND)
659 // Unhandled type. Halt "fast" selection and bail.
662 unsigned InputReg = getRegForValue(I->getOperand(0));
664 // Unhandled operand. Halt "fast" selection and bail.
667 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
669 // If the operand is i1, arrange for the high bits in the register to be zero.
670 if (SrcVT == MVT::i1) {
671 SrcVT = TLI.getTypeToTransformTo(I->getContext(), SrcVT);
672 InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg, InputRegIsKill);
675 InputRegIsKill = true;
677 // If the result is i1, truncate to the target's type for i1 first.
678 if (DstVT == MVT::i1)
679 DstVT = TLI.getTypeToTransformTo(I->getContext(), DstVT);
681 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
684 InputReg, InputRegIsKill);
688 UpdateValueMap(I, ResultReg);
692 bool FastISel::SelectBitCast(const User *I) {
693 // If the bitcast doesn't change the type, just use the operand value.
694 if (I->getType() == I->getOperand(0)->getType()) {
695 unsigned Reg = getRegForValue(I->getOperand(0));
698 UpdateValueMap(I, Reg);
702 // Bitcasts of other values become reg-reg copies or BITCAST operators.
703 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
704 EVT DstVT = TLI.getValueType(I->getType());
706 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
707 DstVT == MVT::Other || !DstVT.isSimple() ||
708 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
709 // Unhandled type. Halt "fast" selection and bail.
712 unsigned Op0 = getRegForValue(I->getOperand(0));
714 // Unhandled operand. Halt "fast" selection and bail.
717 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
719 // First, try to perform the bitcast by inserting a reg-reg copy.
720 unsigned ResultReg = 0;
721 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
722 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
723 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
724 // Don't attempt a cross-class copy. It will likely fail.
725 if (SrcClass == DstClass) {
726 ResultReg = createResultReg(DstClass);
727 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
728 ResultReg).addReg(Op0);
732 // If the reg-reg copy failed, select a BITCAST opcode.
734 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
735 ISD::BITCAST, Op0, Op0IsKill);
740 UpdateValueMap(I, ResultReg);
745 FastISel::SelectInstruction(const Instruction *I) {
746 // Just before the terminator instruction, insert instructions to
747 // feed PHI nodes in successor blocks.
748 if (isa<TerminatorInst>(I))
749 if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
752 DL = I->getDebugLoc();
754 // First, try doing target-independent selection.
755 if (SelectOperator(I, I->getOpcode())) {
760 // Next, try calling the target to attempt to handle the instruction.
761 if (TargetSelectInstruction(I)) {
770 /// FastEmitBranch - Emit an unconditional branch to the given block,
771 /// unless it is the immediate (fall-through) successor, and update
774 FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
775 if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
776 // The unconditional fall-through case, which needs no instructions.
778 // The unconditional branch case.
779 TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
780 SmallVector<MachineOperand, 0>(), DL);
782 FuncInfo.MBB->addSuccessor(MSucc);
785 /// SelectFNeg - Emit an FNeg operation.
788 FastISel::SelectFNeg(const User *I) {
789 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
790 if (OpReg == 0) return false;
792 bool OpRegIsKill = hasTrivialKill(I);
794 // If the target has ISD::FNEG, use it.
795 EVT VT = TLI.getValueType(I->getType());
796 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
797 ISD::FNEG, OpReg, OpRegIsKill);
798 if (ResultReg != 0) {
799 UpdateValueMap(I, ResultReg);
803 // Bitcast the value to integer, twiddle the sign bit with xor,
804 // and then bitcast it back to floating-point.
805 if (VT.getSizeInBits() > 64) return false;
806 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
807 if (!TLI.isTypeLegal(IntVT))
810 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
811 ISD::BITCAST, OpReg, OpRegIsKill);
815 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
816 IntReg, /*Kill=*/true,
817 UINT64_C(1) << (VT.getSizeInBits()-1),
818 IntVT.getSimpleVT());
819 if (IntResultReg == 0)
822 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
823 ISD::BITCAST, IntResultReg, /*Kill=*/true);
827 UpdateValueMap(I, ResultReg);
832 FastISel::SelectOperator(const User *I, unsigned Opcode) {
834 case Instruction::Add:
835 return SelectBinaryOp(I, ISD::ADD);
836 case Instruction::FAdd:
837 return SelectBinaryOp(I, ISD::FADD);
838 case Instruction::Sub:
839 return SelectBinaryOp(I, ISD::SUB);
840 case Instruction::FSub:
841 // FNeg is currently represented in LLVM IR as a special case of FSub.
842 if (BinaryOperator::isFNeg(I))
843 return SelectFNeg(I);
844 return SelectBinaryOp(I, ISD::FSUB);
845 case Instruction::Mul:
846 return SelectBinaryOp(I, ISD::MUL);
847 case Instruction::FMul:
848 return SelectBinaryOp(I, ISD::FMUL);
849 case Instruction::SDiv:
850 return SelectBinaryOp(I, ISD::SDIV);
851 case Instruction::UDiv:
852 return SelectBinaryOp(I, ISD::UDIV);
853 case Instruction::FDiv:
854 return SelectBinaryOp(I, ISD::FDIV);
855 case Instruction::SRem:
856 return SelectBinaryOp(I, ISD::SREM);
857 case Instruction::URem:
858 return SelectBinaryOp(I, ISD::UREM);
859 case Instruction::FRem:
860 return SelectBinaryOp(I, ISD::FREM);
861 case Instruction::Shl:
862 return SelectBinaryOp(I, ISD::SHL);
863 case Instruction::LShr:
864 return SelectBinaryOp(I, ISD::SRL);
865 case Instruction::AShr:
866 return SelectBinaryOp(I, ISD::SRA);
867 case Instruction::And:
868 return SelectBinaryOp(I, ISD::AND);
869 case Instruction::Or:
870 return SelectBinaryOp(I, ISD::OR);
871 case Instruction::Xor:
872 return SelectBinaryOp(I, ISD::XOR);
874 case Instruction::GetElementPtr:
875 return SelectGetElementPtr(I);
877 case Instruction::Br: {
878 const BranchInst *BI = cast<BranchInst>(I);
880 if (BI->isUnconditional()) {
881 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
882 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
883 FastEmitBranch(MSucc, BI->getDebugLoc());
887 // Conditional branches are not handed yet.
888 // Halt "fast" selection and bail.
892 case Instruction::Unreachable:
896 case Instruction::Alloca:
897 // FunctionLowering has the static-sized case covered.
898 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
901 // Dynamic-sized alloca is not handled yet.
904 case Instruction::Call:
905 return SelectCall(I);
907 case Instruction::BitCast:
908 return SelectBitCast(I);
910 case Instruction::FPToSI:
911 return SelectCast(I, ISD::FP_TO_SINT);
912 case Instruction::ZExt:
913 return SelectCast(I, ISD::ZERO_EXTEND);
914 case Instruction::SExt:
915 return SelectCast(I, ISD::SIGN_EXTEND);
916 case Instruction::Trunc:
917 return SelectCast(I, ISD::TRUNCATE);
918 case Instruction::SIToFP:
919 return SelectCast(I, ISD::SINT_TO_FP);
921 case Instruction::IntToPtr: // Deliberate fall-through.
922 case Instruction::PtrToInt: {
923 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
924 EVT DstVT = TLI.getValueType(I->getType());
925 if (DstVT.bitsGT(SrcVT))
926 return SelectCast(I, ISD::ZERO_EXTEND);
927 if (DstVT.bitsLT(SrcVT))
928 return SelectCast(I, ISD::TRUNCATE);
929 unsigned Reg = getRegForValue(I->getOperand(0));
930 if (Reg == 0) return false;
931 UpdateValueMap(I, Reg);
935 case Instruction::PHI:
936 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
939 // Unhandled instruction. Halt "fast" selection and bail.
944 FastISel::FastISel(FunctionLoweringInfo &funcInfo)
945 : FuncInfo(funcInfo),
946 MRI(FuncInfo.MF->getRegInfo()),
947 MFI(*FuncInfo.MF->getFrameInfo()),
948 MCP(*FuncInfo.MF->getConstantPool()),
949 TM(FuncInfo.MF->getTarget()),
950 TD(*TM.getTargetData()),
951 TII(*TM.getInstrInfo()),
952 TLI(*TM.getTargetLowering()),
953 TRI(*TM.getRegisterInfo()) {
956 FastISel::~FastISel() {}
958 unsigned FastISel::FastEmit_(MVT, MVT,
963 unsigned FastISel::FastEmit_r(MVT, MVT,
965 unsigned /*Op0*/, bool /*Op0IsKill*/) {
969 unsigned FastISel::FastEmit_rr(MVT, MVT,
971 unsigned /*Op0*/, bool /*Op0IsKill*/,
972 unsigned /*Op1*/, bool /*Op1IsKill*/) {
976 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
980 unsigned FastISel::FastEmit_f(MVT, MVT,
981 unsigned, const ConstantFP * /*FPImm*/) {
985 unsigned FastISel::FastEmit_ri(MVT, MVT,
987 unsigned /*Op0*/, bool /*Op0IsKill*/,
992 unsigned FastISel::FastEmit_rf(MVT, MVT,
994 unsigned /*Op0*/, bool /*Op0IsKill*/,
995 const ConstantFP * /*FPImm*/) {
999 unsigned FastISel::FastEmit_rri(MVT, MVT,
1001 unsigned /*Op0*/, bool /*Op0IsKill*/,
1002 unsigned /*Op1*/, bool /*Op1IsKill*/,
1007 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
1008 /// to emit an instruction with an immediate operand using FastEmit_ri.
1009 /// If that fails, it materializes the immediate into a register and try
1010 /// FastEmit_rr instead.
1011 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
1012 unsigned Op0, bool Op0IsKill,
1013 uint64_t Imm, MVT ImmType) {
1014 // If this is a multiply by a power of two, emit this as a shift left.
1015 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1018 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1019 // div x, 8 -> srl x, 3
1024 // Horrible hack (to be removed), check to make sure shift amounts are
1026 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1027 Imm >= VT.getSizeInBits())
1030 // First check if immediate type is legal. If not, we can't use the ri form.
1031 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1034 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1035 if (MaterialReg == 0)
1037 return FastEmit_rr(VT, VT, Opcode,
1039 MaterialReg, /*Kill=*/true);
1042 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
1043 return MRI.createVirtualRegister(RC);
1046 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
1047 const TargetRegisterClass* RC) {
1048 unsigned ResultReg = createResultReg(RC);
1049 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1051 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
1055 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
1056 const TargetRegisterClass *RC,
1057 unsigned Op0, bool Op0IsKill) {
1058 unsigned ResultReg = createResultReg(RC);
1059 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1061 if (II.getNumDefs() >= 1)
1062 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1063 .addReg(Op0, Op0IsKill * RegState::Kill);
1065 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1066 .addReg(Op0, Op0IsKill * RegState::Kill);
1067 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1068 ResultReg).addReg(II.ImplicitDefs[0]);
1074 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
1075 const TargetRegisterClass *RC,
1076 unsigned Op0, bool Op0IsKill,
1077 unsigned Op1, bool Op1IsKill) {
1078 unsigned ResultReg = createResultReg(RC);
1079 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1081 if (II.getNumDefs() >= 1)
1082 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1083 .addReg(Op0, Op0IsKill * RegState::Kill)
1084 .addReg(Op1, Op1IsKill * RegState::Kill);
1086 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1087 .addReg(Op0, Op0IsKill * RegState::Kill)
1088 .addReg(Op1, Op1IsKill * RegState::Kill);
1089 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1090 ResultReg).addReg(II.ImplicitDefs[0]);
1095 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
1096 const TargetRegisterClass *RC,
1097 unsigned Op0, bool Op0IsKill,
1099 unsigned ResultReg = createResultReg(RC);
1100 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1102 if (II.getNumDefs() >= 1)
1103 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1104 .addReg(Op0, Op0IsKill * RegState::Kill)
1107 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1108 .addReg(Op0, Op0IsKill * RegState::Kill)
1110 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1111 ResultReg).addReg(II.ImplicitDefs[0]);
1116 unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
1117 const TargetRegisterClass *RC,
1118 unsigned Op0, bool Op0IsKill,
1119 uint64_t Imm1, uint64_t Imm2) {
1120 unsigned ResultReg = createResultReg(RC);
1121 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1123 if (II.getNumDefs() >= 1)
1124 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1125 .addReg(Op0, Op0IsKill * RegState::Kill)
1129 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1130 .addReg(Op0, Op0IsKill * RegState::Kill)
1133 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1134 ResultReg).addReg(II.ImplicitDefs[0]);
1139 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
1140 const TargetRegisterClass *RC,
1141 unsigned Op0, bool Op0IsKill,
1142 const ConstantFP *FPImm) {
1143 unsigned ResultReg = createResultReg(RC);
1144 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1146 if (II.getNumDefs() >= 1)
1147 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1148 .addReg(Op0, Op0IsKill * RegState::Kill)
1151 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1152 .addReg(Op0, Op0IsKill * RegState::Kill)
1154 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1155 ResultReg).addReg(II.ImplicitDefs[0]);
1160 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
1161 const TargetRegisterClass *RC,
1162 unsigned Op0, bool Op0IsKill,
1163 unsigned Op1, bool Op1IsKill,
1165 unsigned ResultReg = createResultReg(RC);
1166 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1168 if (II.getNumDefs() >= 1)
1169 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1170 .addReg(Op0, Op0IsKill * RegState::Kill)
1171 .addReg(Op1, Op1IsKill * RegState::Kill)
1174 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1175 .addReg(Op0, Op0IsKill * RegState::Kill)
1176 .addReg(Op1, Op1IsKill * RegState::Kill)
1178 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1179 ResultReg).addReg(II.ImplicitDefs[0]);
1184 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
1185 const TargetRegisterClass *RC,
1187 unsigned ResultReg = createResultReg(RC);
1188 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1190 if (II.getNumDefs() >= 1)
1191 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
1193 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
1194 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1195 ResultReg).addReg(II.ImplicitDefs[0]);
1200 unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
1201 const TargetRegisterClass *RC,
1202 uint64_t Imm1, uint64_t Imm2) {
1203 unsigned ResultReg = createResultReg(RC);
1204 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
1206 if (II.getNumDefs() >= 1)
1207 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1208 .addImm(Imm1).addImm(Imm2);
1210 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm1).addImm(Imm2);
1211 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1212 ResultReg).addReg(II.ImplicitDefs[0]);
1217 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1218 unsigned Op0, bool Op0IsKill,
1220 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1221 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1222 "Cannot yet extract from physregs");
1223 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
1224 DL, TII.get(TargetOpcode::COPY), ResultReg)
1225 .addReg(Op0, getKillRegState(Op0IsKill), Idx);
1229 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1230 /// with all but the least significant bit set to zero.
1231 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1232 return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1235 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1236 /// Emit code to ensure constants are copied into registers when needed.
1237 /// Remember the virtual registers that need to be added to the Machine PHI
1238 /// nodes as input. We cannot just directly add them, because expansion
1239 /// might result in multiple MBB's for one BB. As such, the start of the
1240 /// BB might correspond to a different MBB than the end.
1241 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1242 const TerminatorInst *TI = LLVMBB->getTerminator();
1244 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1245 unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1247 // Check successor nodes' PHI nodes that expect a constant to be available
1249 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1250 const BasicBlock *SuccBB = TI->getSuccessor(succ);
1251 if (!isa<PHINode>(SuccBB->begin())) continue;
1252 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1254 // If this terminator has multiple identical successors (common for
1255 // switches), only handle each succ once.
1256 if (!SuccsHandled.insert(SuccMBB)) continue;
1258 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
1260 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1261 // nodes and Machine PHI nodes, but the incoming operands have not been
1263 for (BasicBlock::const_iterator I = SuccBB->begin();
1264 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1266 // Ignore dead phi's.
1267 if (PN->use_empty()) continue;
1269 // Only handle legal types. Two interesting things to note here. First,
1270 // by bailing out early, we may leave behind some dead instructions,
1271 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1272 // own moves. Second, this check is necessary because FastISel doesn't
1273 // use CreateRegs to create registers, so it always creates
1274 // exactly one register for each non-void instruction.
1275 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
1276 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
1279 VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
1281 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1286 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1288 // Set the DebugLoc for the copy. Prefer the location of the operand
1289 // if there is one; use the location of the PHI otherwise.
1290 DL = PN->getDebugLoc();
1291 if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
1292 DL = Inst->getDebugLoc();
1294 unsigned Reg = getRegForValue(PHIOp);
1296 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1299 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));