1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/Function.h"
43 #include "llvm/GlobalVariable.h"
44 #include "llvm/Instructions.h"
45 #include "llvm/IntrinsicInst.h"
46 #include "llvm/Operator.h"
47 #include "llvm/CodeGen/Analysis.h"
48 #include "llvm/CodeGen/FastISel.h"
49 #include "llvm/CodeGen/FunctionLoweringInfo.h"
50 #include "llvm/CodeGen/MachineInstrBuilder.h"
51 #include "llvm/CodeGen/MachineModuleInfo.h"
52 #include "llvm/CodeGen/MachineRegisterInfo.h"
53 #include "llvm/Analysis/DebugInfo.h"
54 #include "llvm/Analysis/Loads.h"
55 #include "llvm/Target/TargetData.h"
56 #include "llvm/Target/TargetInstrInfo.h"
57 #include "llvm/Target/TargetLowering.h"
58 #include "llvm/Target/TargetMachine.h"
59 #include "llvm/Support/ErrorHandling.h"
60 #include "llvm/Support/Debug.h"
63 /// startNewBlock - Set the current block to which generated machine
64 /// instructions will be appended, and clear the local CSE map.
66 void FastISel::startNewBlock() {
67 LocalValueMap.clear();
69 // Start out as null, meaining no local-value instructions have
73 // Advance the last local value past any EH_LABEL instructions.
74 MachineBasicBlock::iterator
75 I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
76 while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
82 bool FastISel::hasTrivialKill(const Value *V) const {
83 // Don't consider constants or arguments to have trivial kills.
84 const Instruction *I = dyn_cast<Instruction>(V);
88 // No-op casts are trivially coalesced by fast-isel.
89 if (const CastInst *Cast = dyn_cast<CastInst>(I))
90 if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
91 !hasTrivialKill(Cast->getOperand(0)))
94 // Only instructions with a single use in the same basic block are considered
95 // to have trivial kills.
96 return I->hasOneUse() &&
97 !(I->getOpcode() == Instruction::BitCast ||
98 I->getOpcode() == Instruction::PtrToInt ||
99 I->getOpcode() == Instruction::IntToPtr) &&
100 cast<Instruction>(*I->use_begin())->getParent() == I->getParent();
103 unsigned FastISel::getRegForValue(const Value *V) {
104 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
105 // Don't handle non-simple values in FastISel.
106 if (!RealVT.isSimple())
109 // Ignore illegal types. We must do this before looking up the value
110 // in ValueMap because Arguments are given virtual registers regardless
111 // of whether FastISel can handle them.
112 MVT VT = RealVT.getSimpleVT();
113 if (!TLI.isTypeLegal(VT)) {
114 // Handle integer promotions, though, because they're common and easy.
115 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
116 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
121 // Look up the value to see if we already have a register for it. We
122 // cache values defined by Instructions across blocks, and other values
123 // only locally. This is because Instructions already have the SSA
124 // def-dominates-use requirement enforced.
125 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
126 if (I != FuncInfo.ValueMap.end())
129 unsigned Reg = LocalValueMap[V];
133 // In bottom-up mode, just create the virtual register which will be used
134 // to hold the value. It will be materialized later.
135 if (isa<Instruction>(V) &&
136 (!isa<AllocaInst>(V) ||
137 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
138 return FuncInfo.InitializeRegForValue(V);
140 SavePoint SaveInsertPt = enterLocalValueArea();
142 // Materialize the value in a register. Emit any instructions in the
144 Reg = materializeRegForValue(V, VT);
146 leaveLocalValueArea(SaveInsertPt);
151 /// materializeRegForValue - Helper for getRegForValue. This function is
152 /// called when the value isn't already available in a register and must
153 /// be materialized with new instructions.
154 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
157 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
158 if (CI->getValue().getActiveBits() <= 64)
159 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
160 } else if (isa<AllocaInst>(V)) {
161 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
162 } else if (isa<ConstantPointerNull>(V)) {
163 // Translate this as an integer zero so that it can be
164 // local-CSE'd with actual integer zeros.
166 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
167 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
168 if (CF->isNullValue()) {
169 Reg = TargetMaterializeFloatZero(CF);
171 // Try to emit the constant directly.
172 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
176 // Try to emit the constant by using an integer constant with a cast.
177 const APFloat &Flt = CF->getValueAPF();
178 EVT IntVT = TLI.getPointerTy();
181 uint32_t IntBitWidth = IntVT.getSizeInBits();
183 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
184 APFloat::rmTowardZero, &isExact);
186 APInt IntVal(IntBitWidth, x);
188 unsigned IntegerReg =
189 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
191 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
192 IntegerReg, /*Kill=*/false);
195 } else if (const Operator *Op = dyn_cast<Operator>(V)) {
196 if (!SelectOperator(Op, Op->getOpcode()))
197 if (!isa<Instruction>(Op) ||
198 !TargetSelectInstruction(cast<Instruction>(Op)))
200 Reg = lookUpRegForValue(Op);
201 } else if (isa<UndefValue>(V)) {
202 Reg = createResultReg(TLI.getRegClassFor(VT));
203 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
204 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
207 // If target-independent code couldn't handle the value, give target-specific
209 if (!Reg && isa<Constant>(V))
210 Reg = TargetMaterializeConstant(cast<Constant>(V));
212 // Don't cache constant materializations in the general ValueMap.
213 // To do so would require tracking what uses they dominate.
215 LocalValueMap[V] = Reg;
216 LastLocalValue = MRI.getVRegDef(Reg);
221 unsigned FastISel::lookUpRegForValue(const Value *V) {
222 // Look up the value to see if we already have a register for it. We
223 // cache values defined by Instructions across blocks, and other values
224 // only locally. This is because Instructions already have the SSA
225 // def-dominates-use requirement enforced.
226 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
227 if (I != FuncInfo.ValueMap.end())
229 return LocalValueMap[V];
232 /// UpdateValueMap - Update the value map to include the new mapping for this
233 /// instruction, or insert an extra copy to get the result in a previous
234 /// determined register.
235 /// NOTE: This is only necessary because we might select a block that uses
236 /// a value before we select the block that defines the value. It might be
237 /// possible to fix this by selecting blocks in reverse postorder.
238 void FastISel::UpdateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
239 if (!isa<Instruction>(I)) {
240 LocalValueMap[I] = Reg;
244 unsigned &AssignedReg = FuncInfo.ValueMap[I];
245 if (AssignedReg == 0)
246 // Use the new register.
248 else if (Reg != AssignedReg) {
249 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
250 for (unsigned i = 0; i < NumRegs; i++)
251 FuncInfo.RegFixups[AssignedReg+i] = Reg+i;
257 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
258 unsigned IdxN = getRegForValue(Idx);
260 // Unhandled operand. Halt "fast" selection and bail.
261 return std::pair<unsigned, bool>(0, false);
263 bool IdxNIsKill = hasTrivialKill(Idx);
265 // If the index is smaller or larger than intptr_t, truncate or extend it.
266 MVT PtrVT = TLI.getPointerTy();
267 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
268 if (IdxVT.bitsLT(PtrVT)) {
269 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
273 else if (IdxVT.bitsGT(PtrVT)) {
274 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
278 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
281 void FastISel::recomputeInsertPt() {
282 if (getLastLocalValue()) {
283 FuncInfo.InsertPt = getLastLocalValue();
284 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
287 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
289 // Now skip past any EH_LABELs, which must remain at the beginning.
290 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
291 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
295 /// recomputeDebugLocForMaterializedRegs - Recompute debug location for
296 /// very first instruction in a basic block. All instructions emitted
297 /// to materialize registers do not have location information, see
298 /// enterLocalValueArea(), becase they may not be emited at the right
300 void FastISel::recomputeDebugLocForMaterializedRegs() {
301 if (!getLastLocalValue())
303 MachineInstr *First = FuncInfo.MBB->getFirstNonPHI();
304 if (!First->getDebugLoc().isUnknown())
307 for (MachineBasicBlock::iterator I = FuncInfo.MBB->begin(),
308 E = FuncInfo.MBB->end(); I != E; ++I) {
309 DebugLoc DL = I->getDebugLoc();
310 if (!DL.isUnknown()) {
311 First->setDebugLoc(DL);
317 FastISel::SavePoint FastISel::enterLocalValueArea() {
318 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
322 SavePoint SP = { OldInsertPt, OldDL };
326 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
327 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
328 LastLocalValue = llvm::prior(FuncInfo.InsertPt);
330 // Restore the previous insert position.
331 FuncInfo.InsertPt = OldInsertPt.InsertPt;
335 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
336 /// which has an opcode which directly corresponds to the given ISD opcode.
338 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
339 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
340 if (VT == MVT::Other || !VT.isSimple())
341 // Unhandled type. Halt "fast" selection and bail.
344 // We only handle legal types. For example, on x86-32 the instruction
345 // selector contains all of the 64-bit instructions from x86-64,
346 // under the assumption that i64 won't be used if the target doesn't
348 if (!TLI.isTypeLegal(VT)) {
349 // MVT::i1 is special. Allow AND, OR, or XOR because they
350 // don't require additional zeroing, which makes them easy.
352 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
353 ISDOpcode == ISD::XOR))
354 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
359 // Check if the first operand is a constant, and handle it as "ri". At -O0,
360 // we don't have anything that canonicalizes operand order.
361 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
362 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
363 unsigned Op1 = getRegForValue(I->getOperand(1));
364 if (Op1 == 0) return false;
366 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
368 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1,
369 Op1IsKill, CI->getZExtValue(),
371 if (ResultReg == 0) return false;
373 // We successfully emitted code for the given LLVM Instruction.
374 UpdateValueMap(I, ResultReg);
379 unsigned Op0 = getRegForValue(I->getOperand(0));
380 if (Op0 == 0) // Unhandled operand. Halt "fast" selection and bail.
383 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
385 // Check if the second operand is a constant and handle it appropriately.
386 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
387 uint64_t Imm = CI->getZExtValue();
389 // Transform "sdiv exact X, 8" -> "sra X, 3".
390 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
391 cast<BinaryOperator>(I)->isExact() &&
392 isPowerOf2_64(Imm)) {
394 ISDOpcode = ISD::SRA;
397 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
398 Op0IsKill, Imm, VT.getSimpleVT());
399 if (ResultReg == 0) return false;
401 // We successfully emitted code for the given LLVM Instruction.
402 UpdateValueMap(I, ResultReg);
406 // Check if the second operand is a constant float.
407 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
408 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
409 ISDOpcode, Op0, Op0IsKill, CF);
410 if (ResultReg != 0) {
411 // We successfully emitted code for the given LLVM Instruction.
412 UpdateValueMap(I, ResultReg);
417 unsigned Op1 = getRegForValue(I->getOperand(1));
419 // Unhandled operand. Halt "fast" selection and bail.
422 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
424 // Now we have both operands in registers. Emit the instruction.
425 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
430 // Target-specific code wasn't able to find a machine opcode for
431 // the given ISD opcode and type. Halt "fast" selection and bail.
434 // We successfully emitted code for the given LLVM Instruction.
435 UpdateValueMap(I, ResultReg);
439 bool FastISel::SelectGetElementPtr(const User *I) {
440 unsigned N = getRegForValue(I->getOperand(0));
442 // Unhandled operand. Halt "fast" selection and bail.
445 bool NIsKill = hasTrivialKill(I->getOperand(0));
447 Type *Ty = I->getOperand(0)->getType();
448 MVT VT = TLI.getPointerTy();
449 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
450 E = I->op_end(); OI != E; ++OI) {
451 const Value *Idx = *OI;
452 if (StructType *StTy = dyn_cast<StructType>(Ty)) {
453 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
456 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
457 // FIXME: This can be optimized by combining the add with a
459 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
461 // Unhandled operand. Halt "fast" selection and bail.
465 Ty = StTy->getElementType(Field);
467 Ty = cast<SequentialType>(Ty)->getElementType();
469 // If this is a constant subscript, handle it quickly.
470 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
471 if (CI->isZero()) continue;
473 TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
474 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
476 // Unhandled operand. Halt "fast" selection and bail.
482 // N = N + Idx * ElementSize;
483 uint64_t ElementSize = TD.getTypeAllocSize(Ty);
484 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
485 unsigned IdxN = Pair.first;
486 bool IdxNIsKill = Pair.second;
488 // Unhandled operand. Halt "fast" selection and bail.
491 if (ElementSize != 1) {
492 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
494 // Unhandled operand. Halt "fast" selection and bail.
498 N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
500 // Unhandled operand. Halt "fast" selection and bail.
505 // We successfully emitted code for the given LLVM Instruction.
506 UpdateValueMap(I, N);
510 bool FastISel::SelectCall(const User *I) {
511 const CallInst *Call = cast<CallInst>(I);
513 // Handle simple inline asms.
514 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getArgOperand(0))) {
515 // Don't attempt to handle constraints.
516 if (!IA->getConstraintString().empty())
519 unsigned ExtraInfo = 0;
520 if (IA->hasSideEffects())
521 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
522 if (IA->isAlignStack())
523 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
525 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
526 TII.get(TargetOpcode::INLINEASM))
527 .addExternalSymbol(IA->getAsmString().c_str())
532 const Function *F = Call->getCalledFunction();
533 if (!F) return false;
535 // Handle selected intrinsic function calls.
536 switch (F->getIntrinsicID()) {
538 case Intrinsic::dbg_declare: {
539 const DbgDeclareInst *DI = cast<DbgDeclareInst>(Call);
540 if (!DIVariable(DI->getVariable()).Verify() ||
541 !FuncInfo.MF->getMMI().hasDebugInfo())
544 const Value *Address = DI->getAddress();
545 if (!Address || isa<UndefValue>(Address) || isa<AllocaInst>(Address))
550 if (const Argument *Arg = dyn_cast<Argument>(Address)) {
551 if (Arg->hasByValAttr()) {
552 // Byval arguments' frame index is recorded during argument lowering.
553 // Use this info directly.
554 Offset = FuncInfo.getByValArgumentFrameIndex(Arg);
556 Reg = TRI.getFrameRegister(*FuncInfo.MF);
560 Reg = getRegForValue(Address);
563 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
564 TII.get(TargetOpcode::DBG_VALUE))
565 .addReg(Reg, RegState::Debug).addImm(Offset)
566 .addMetadata(DI->getVariable());
569 case Intrinsic::dbg_value: {
570 // This form of DBG_VALUE is target-independent.
571 const DbgValueInst *DI = cast<DbgValueInst>(Call);
572 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
573 const Value *V = DI->getValue();
575 // Currently the optimizer can produce this; insert an undef to
576 // help debugging. Probably the optimizer should not do this.
577 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
578 .addReg(0U).addImm(DI->getOffset())
579 .addMetadata(DI->getVariable());
580 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
581 if (CI->getBitWidth() > 64)
582 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
583 .addCImm(CI).addImm(DI->getOffset())
584 .addMetadata(DI->getVariable());
586 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
587 .addImm(CI->getZExtValue()).addImm(DI->getOffset())
588 .addMetadata(DI->getVariable());
589 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
590 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
591 .addFPImm(CF).addImm(DI->getOffset())
592 .addMetadata(DI->getVariable());
593 } else if (unsigned Reg = lookUpRegForValue(V)) {
594 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
595 .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
596 .addMetadata(DI->getVariable());
598 // We can't yet handle anything else here because it would require
599 // generating code, thus altering codegen because of debug info.
600 DEBUG(dbgs() << "Dropping debug info for " << DI);
604 case Intrinsic::eh_exception: {
605 EVT VT = TLI.getValueType(Call->getType());
606 if (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)!=TargetLowering::Expand)
609 assert(FuncInfo.MBB->isLandingPad() &&
610 "Call to eh.exception not in landing pad!");
611 unsigned Reg = TLI.getExceptionAddressRegister();
612 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
613 unsigned ResultReg = createResultReg(RC);
614 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
615 ResultReg).addReg(Reg);
616 UpdateValueMap(Call, ResultReg);
619 case Intrinsic::eh_selector: {
620 EVT VT = TLI.getValueType(Call->getType());
621 if (TLI.getOperationAction(ISD::EHSELECTION, VT) != TargetLowering::Expand)
623 if (FuncInfo.MBB->isLandingPad())
624 AddCatchInfo(*Call, &FuncInfo.MF->getMMI(), FuncInfo.MBB);
627 FuncInfo.CatchInfoLost.insert(Call);
629 // FIXME: Mark exception selector register as live in. Hack for PR1508.
630 unsigned Reg = TLI.getExceptionSelectorRegister();
631 if (Reg) FuncInfo.MBB->addLiveIn(Reg);
634 unsigned Reg = TLI.getExceptionSelectorRegister();
635 EVT SrcVT = TLI.getPointerTy();
636 const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
637 unsigned ResultReg = createResultReg(RC);
638 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
639 ResultReg).addReg(Reg);
641 bool ResultRegIsKill = hasTrivialKill(Call);
643 // Cast the register to the type of the selector.
644 if (SrcVT.bitsGT(MVT::i32))
645 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
646 ResultReg, ResultRegIsKill);
647 else if (SrcVT.bitsLT(MVT::i32))
648 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
649 ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill);
651 // Unhandled operand. Halt "fast" selection and bail.
654 UpdateValueMap(Call, ResultReg);
658 case Intrinsic::objectsize: {
659 ConstantInt *CI = cast<ConstantInt>(Call->getArgOperand(1));
660 unsigned long long Res = CI->isZero() ? -1ULL : 0;
661 Constant *ResCI = ConstantInt::get(Call->getType(), Res);
662 unsigned ResultReg = getRegForValue(ResCI);
665 UpdateValueMap(Call, ResultReg);
670 // An arbitrary call. Bail.
674 bool FastISel::SelectCast(const User *I, unsigned Opcode) {
675 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
676 EVT DstVT = TLI.getValueType(I->getType());
678 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
679 DstVT == MVT::Other || !DstVT.isSimple())
680 // Unhandled type. Halt "fast" selection and bail.
683 // Check if the destination type is legal.
684 if (!TLI.isTypeLegal(DstVT))
687 // Check if the source operand is legal.
688 if (!TLI.isTypeLegal(SrcVT))
691 unsigned InputReg = getRegForValue(I->getOperand(0));
693 // Unhandled operand. Halt "fast" selection and bail.
696 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
698 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
701 InputReg, InputRegIsKill);
705 UpdateValueMap(I, ResultReg);
709 bool FastISel::SelectBitCast(const User *I) {
710 // If the bitcast doesn't change the type, just use the operand value.
711 if (I->getType() == I->getOperand(0)->getType()) {
712 unsigned Reg = getRegForValue(I->getOperand(0));
715 UpdateValueMap(I, Reg);
719 // Bitcasts of other values become reg-reg copies or BITCAST operators.
720 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
721 EVT DstVT = TLI.getValueType(I->getType());
723 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
724 DstVT == MVT::Other || !DstVT.isSimple() ||
725 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
726 // Unhandled type. Halt "fast" selection and bail.
729 unsigned Op0 = getRegForValue(I->getOperand(0));
731 // Unhandled operand. Halt "fast" selection and bail.
734 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
736 // First, try to perform the bitcast by inserting a reg-reg copy.
737 unsigned ResultReg = 0;
738 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
739 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
740 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
741 // Don't attempt a cross-class copy. It will likely fail.
742 if (SrcClass == DstClass) {
743 ResultReg = createResultReg(DstClass);
744 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
745 ResultReg).addReg(Op0);
749 // If the reg-reg copy failed, select a BITCAST opcode.
751 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
752 ISD::BITCAST, Op0, Op0IsKill);
757 UpdateValueMap(I, ResultReg);
762 FastISel::SelectInstruction(const Instruction *I) {
763 // Just before the terminator instruction, insert instructions to
764 // feed PHI nodes in successor blocks.
765 if (isa<TerminatorInst>(I))
766 if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
769 DL = I->getDebugLoc();
771 // First, try doing target-independent selection.
772 if (SelectOperator(I, I->getOpcode())) {
777 // Next, try calling the target to attempt to handle the instruction.
778 if (TargetSelectInstruction(I)) {
787 /// FastEmitBranch - Emit an unconditional branch to the given block,
788 /// unless it is the immediate (fall-through) successor, and update
791 FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
792 if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
793 // The unconditional fall-through case, which needs no instructions.
795 // The unconditional branch case.
796 TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
797 SmallVector<MachineOperand, 0>(), DL);
799 FuncInfo.MBB->addSuccessor(MSucc);
802 /// SelectFNeg - Emit an FNeg operation.
805 FastISel::SelectFNeg(const User *I) {
806 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
807 if (OpReg == 0) return false;
809 bool OpRegIsKill = hasTrivialKill(I);
811 // If the target has ISD::FNEG, use it.
812 EVT VT = TLI.getValueType(I->getType());
813 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
814 ISD::FNEG, OpReg, OpRegIsKill);
815 if (ResultReg != 0) {
816 UpdateValueMap(I, ResultReg);
820 // Bitcast the value to integer, twiddle the sign bit with xor,
821 // and then bitcast it back to floating-point.
822 if (VT.getSizeInBits() > 64) return false;
823 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
824 if (!TLI.isTypeLegal(IntVT))
827 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
828 ISD::BITCAST, OpReg, OpRegIsKill);
832 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
833 IntReg, /*Kill=*/true,
834 UINT64_C(1) << (VT.getSizeInBits()-1),
835 IntVT.getSimpleVT());
836 if (IntResultReg == 0)
839 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
840 ISD::BITCAST, IntResultReg, /*Kill=*/true);
844 UpdateValueMap(I, ResultReg);
849 FastISel::SelectExtractValue(const User *U) {
850 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
854 // Make sure we only try to handle extracts with a legal result. But also
855 // allow i1 because it's easy.
856 EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true);
857 if (!RealVT.isSimple())
859 MVT VT = RealVT.getSimpleVT();
860 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
863 const Value *Op0 = EVI->getOperand(0);
864 Type *AggTy = Op0->getType();
866 // Get the base result register.
868 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
869 if (I != FuncInfo.ValueMap.end())
870 ResultReg = I->second;
871 else if (isa<Instruction>(Op0))
872 ResultReg = FuncInfo.InitializeRegForValue(Op0);
874 return false; // fast-isel can't handle aggregate constants at the moment
876 // Get the actual result register, which is an offset from the base register.
877 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
879 SmallVector<EVT, 4> AggValueVTs;
880 ComputeValueVTs(TLI, AggTy, AggValueVTs);
882 for (unsigned i = 0; i < VTIndex; i++)
883 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
885 UpdateValueMap(EVI, ResultReg);
890 FastISel::SelectOperator(const User *I, unsigned Opcode) {
892 case Instruction::Add:
893 return SelectBinaryOp(I, ISD::ADD);
894 case Instruction::FAdd:
895 return SelectBinaryOp(I, ISD::FADD);
896 case Instruction::Sub:
897 return SelectBinaryOp(I, ISD::SUB);
898 case Instruction::FSub:
899 // FNeg is currently represented in LLVM IR as a special case of FSub.
900 if (BinaryOperator::isFNeg(I))
901 return SelectFNeg(I);
902 return SelectBinaryOp(I, ISD::FSUB);
903 case Instruction::Mul:
904 return SelectBinaryOp(I, ISD::MUL);
905 case Instruction::FMul:
906 return SelectBinaryOp(I, ISD::FMUL);
907 case Instruction::SDiv:
908 return SelectBinaryOp(I, ISD::SDIV);
909 case Instruction::UDiv:
910 return SelectBinaryOp(I, ISD::UDIV);
911 case Instruction::FDiv:
912 return SelectBinaryOp(I, ISD::FDIV);
913 case Instruction::SRem:
914 return SelectBinaryOp(I, ISD::SREM);
915 case Instruction::URem:
916 return SelectBinaryOp(I, ISD::UREM);
917 case Instruction::FRem:
918 return SelectBinaryOp(I, ISD::FREM);
919 case Instruction::Shl:
920 return SelectBinaryOp(I, ISD::SHL);
921 case Instruction::LShr:
922 return SelectBinaryOp(I, ISD::SRL);
923 case Instruction::AShr:
924 return SelectBinaryOp(I, ISD::SRA);
925 case Instruction::And:
926 return SelectBinaryOp(I, ISD::AND);
927 case Instruction::Or:
928 return SelectBinaryOp(I, ISD::OR);
929 case Instruction::Xor:
930 return SelectBinaryOp(I, ISD::XOR);
932 case Instruction::GetElementPtr:
933 return SelectGetElementPtr(I);
935 case Instruction::Br: {
936 const BranchInst *BI = cast<BranchInst>(I);
938 if (BI->isUnconditional()) {
939 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
940 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
941 FastEmitBranch(MSucc, BI->getDebugLoc());
945 // Conditional branches are not handed yet.
946 // Halt "fast" selection and bail.
950 case Instruction::Unreachable:
954 case Instruction::Alloca:
955 // FunctionLowering has the static-sized case covered.
956 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
959 // Dynamic-sized alloca is not handled yet.
962 case Instruction::Call:
963 return SelectCall(I);
965 case Instruction::BitCast:
966 return SelectBitCast(I);
968 case Instruction::FPToSI:
969 return SelectCast(I, ISD::FP_TO_SINT);
970 case Instruction::ZExt:
971 return SelectCast(I, ISD::ZERO_EXTEND);
972 case Instruction::SExt:
973 return SelectCast(I, ISD::SIGN_EXTEND);
974 case Instruction::Trunc:
975 return SelectCast(I, ISD::TRUNCATE);
976 case Instruction::SIToFP:
977 return SelectCast(I, ISD::SINT_TO_FP);
979 case Instruction::IntToPtr: // Deliberate fall-through.
980 case Instruction::PtrToInt: {
981 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
982 EVT DstVT = TLI.getValueType(I->getType());
983 if (DstVT.bitsGT(SrcVT))
984 return SelectCast(I, ISD::ZERO_EXTEND);
985 if (DstVT.bitsLT(SrcVT))
986 return SelectCast(I, ISD::TRUNCATE);
987 unsigned Reg = getRegForValue(I->getOperand(0));
988 if (Reg == 0) return false;
989 UpdateValueMap(I, Reg);
993 case Instruction::ExtractValue:
994 return SelectExtractValue(I);
996 case Instruction::PHI:
997 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1000 // Unhandled instruction. Halt "fast" selection and bail.
1005 FastISel::FastISel(FunctionLoweringInfo &funcInfo)
1006 : FuncInfo(funcInfo),
1007 MRI(FuncInfo.MF->getRegInfo()),
1008 MFI(*FuncInfo.MF->getFrameInfo()),
1009 MCP(*FuncInfo.MF->getConstantPool()),
1010 TM(FuncInfo.MF->getTarget()),
1011 TD(*TM.getTargetData()),
1012 TII(*TM.getInstrInfo()),
1013 TLI(*TM.getTargetLowering()),
1014 TRI(*TM.getRegisterInfo()) {
1017 FastISel::~FastISel() {}
1019 unsigned FastISel::FastEmit_(MVT, MVT,
1024 unsigned FastISel::FastEmit_r(MVT, MVT,
1026 unsigned /*Op0*/, bool /*Op0IsKill*/) {
1030 unsigned FastISel::FastEmit_rr(MVT, MVT,
1032 unsigned /*Op0*/, bool /*Op0IsKill*/,
1033 unsigned /*Op1*/, bool /*Op1IsKill*/) {
1037 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1041 unsigned FastISel::FastEmit_f(MVT, MVT,
1042 unsigned, const ConstantFP * /*FPImm*/) {
1046 unsigned FastISel::FastEmit_ri(MVT, MVT,
1048 unsigned /*Op0*/, bool /*Op0IsKill*/,
1053 unsigned FastISel::FastEmit_rf(MVT, MVT,
1055 unsigned /*Op0*/, bool /*Op0IsKill*/,
1056 const ConstantFP * /*FPImm*/) {
1060 unsigned FastISel::FastEmit_rri(MVT, MVT,
1062 unsigned /*Op0*/, bool /*Op0IsKill*/,
1063 unsigned /*Op1*/, bool /*Op1IsKill*/,
1068 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
1069 /// to emit an instruction with an immediate operand using FastEmit_ri.
1070 /// If that fails, it materializes the immediate into a register and try
1071 /// FastEmit_rr instead.
1072 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
1073 unsigned Op0, bool Op0IsKill,
1074 uint64_t Imm, MVT ImmType) {
1075 // If this is a multiply by a power of two, emit this as a shift left.
1076 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1079 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1080 // div x, 8 -> srl x, 3
1085 // Horrible hack (to be removed), check to make sure shift amounts are
1087 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1088 Imm >= VT.getSizeInBits())
1091 // First check if immediate type is legal. If not, we can't use the ri form.
1092 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1095 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1096 if (MaterialReg == 0) {
1097 // This is a bit ugly/slow, but failing here means falling out of
1098 // fast-isel, which would be very slow.
1099 IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(),
1100 VT.getSizeInBits());
1101 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1103 return FastEmit_rr(VT, VT, Opcode,
1105 MaterialReg, /*Kill=*/true);
1108 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
1109 return MRI.createVirtualRegister(RC);
1112 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
1113 const TargetRegisterClass* RC) {
1114 unsigned ResultReg = createResultReg(RC);
1115 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1117 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
1121 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
1122 const TargetRegisterClass *RC,
1123 unsigned Op0, bool Op0IsKill) {
1124 unsigned ResultReg = createResultReg(RC);
1125 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1127 if (II.getNumDefs() >= 1)
1128 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1129 .addReg(Op0, Op0IsKill * RegState::Kill);
1131 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1132 .addReg(Op0, Op0IsKill * RegState::Kill);
1133 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1134 ResultReg).addReg(II.ImplicitDefs[0]);
1140 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
1141 const TargetRegisterClass *RC,
1142 unsigned Op0, bool Op0IsKill,
1143 unsigned Op1, bool Op1IsKill) {
1144 unsigned ResultReg = createResultReg(RC);
1145 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1147 if (II.getNumDefs() >= 1)
1148 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1149 .addReg(Op0, Op0IsKill * RegState::Kill)
1150 .addReg(Op1, Op1IsKill * RegState::Kill);
1152 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1153 .addReg(Op0, Op0IsKill * RegState::Kill)
1154 .addReg(Op1, Op1IsKill * RegState::Kill);
1155 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1156 ResultReg).addReg(II.ImplicitDefs[0]);
1161 unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
1162 const TargetRegisterClass *RC,
1163 unsigned Op0, bool Op0IsKill,
1164 unsigned Op1, bool Op1IsKill,
1165 unsigned Op2, bool Op2IsKill) {
1166 unsigned ResultReg = createResultReg(RC);
1167 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1169 if (II.getNumDefs() >= 1)
1170 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1171 .addReg(Op0, Op0IsKill * RegState::Kill)
1172 .addReg(Op1, Op1IsKill * RegState::Kill)
1173 .addReg(Op2, Op2IsKill * RegState::Kill);
1175 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1176 .addReg(Op0, Op0IsKill * RegState::Kill)
1177 .addReg(Op1, Op1IsKill * RegState::Kill)
1178 .addReg(Op2, Op2IsKill * RegState::Kill);
1179 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1180 ResultReg).addReg(II.ImplicitDefs[0]);
1185 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
1186 const TargetRegisterClass *RC,
1187 unsigned Op0, bool Op0IsKill,
1189 unsigned ResultReg = createResultReg(RC);
1190 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1192 if (II.getNumDefs() >= 1)
1193 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1194 .addReg(Op0, Op0IsKill * RegState::Kill)
1197 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1198 .addReg(Op0, Op0IsKill * RegState::Kill)
1200 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1201 ResultReg).addReg(II.ImplicitDefs[0]);
1206 unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
1207 const TargetRegisterClass *RC,
1208 unsigned Op0, bool Op0IsKill,
1209 uint64_t Imm1, uint64_t Imm2) {
1210 unsigned ResultReg = createResultReg(RC);
1211 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1213 if (II.getNumDefs() >= 1)
1214 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1215 .addReg(Op0, Op0IsKill * RegState::Kill)
1219 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1220 .addReg(Op0, Op0IsKill * RegState::Kill)
1223 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1224 ResultReg).addReg(II.ImplicitDefs[0]);
1229 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
1230 const TargetRegisterClass *RC,
1231 unsigned Op0, bool Op0IsKill,
1232 const ConstantFP *FPImm) {
1233 unsigned ResultReg = createResultReg(RC);
1234 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1236 if (II.getNumDefs() >= 1)
1237 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1238 .addReg(Op0, Op0IsKill * RegState::Kill)
1241 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1242 .addReg(Op0, Op0IsKill * RegState::Kill)
1244 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1245 ResultReg).addReg(II.ImplicitDefs[0]);
1250 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
1251 const TargetRegisterClass *RC,
1252 unsigned Op0, bool Op0IsKill,
1253 unsigned Op1, bool Op1IsKill,
1255 unsigned ResultReg = createResultReg(RC);
1256 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1258 if (II.getNumDefs() >= 1)
1259 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1260 .addReg(Op0, Op0IsKill * RegState::Kill)
1261 .addReg(Op1, Op1IsKill * RegState::Kill)
1264 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1265 .addReg(Op0, Op0IsKill * RegState::Kill)
1266 .addReg(Op1, Op1IsKill * RegState::Kill)
1268 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1269 ResultReg).addReg(II.ImplicitDefs[0]);
1274 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
1275 const TargetRegisterClass *RC,
1277 unsigned ResultReg = createResultReg(RC);
1278 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1280 if (II.getNumDefs() >= 1)
1281 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
1283 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
1284 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1285 ResultReg).addReg(II.ImplicitDefs[0]);
1290 unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
1291 const TargetRegisterClass *RC,
1292 uint64_t Imm1, uint64_t Imm2) {
1293 unsigned ResultReg = createResultReg(RC);
1294 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1296 if (II.getNumDefs() >= 1)
1297 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1298 .addImm(Imm1).addImm(Imm2);
1300 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm1).addImm(Imm2);
1301 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1302 ResultReg).addReg(II.ImplicitDefs[0]);
1307 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1308 unsigned Op0, bool Op0IsKill,
1310 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1311 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1312 "Cannot yet extract from physregs");
1313 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
1314 DL, TII.get(TargetOpcode::COPY), ResultReg)
1315 .addReg(Op0, getKillRegState(Op0IsKill), Idx);
1319 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1320 /// with all but the least significant bit set to zero.
1321 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1322 return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1325 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1326 /// Emit code to ensure constants are copied into registers when needed.
1327 /// Remember the virtual registers that need to be added to the Machine PHI
1328 /// nodes as input. We cannot just directly add them, because expansion
1329 /// might result in multiple MBB's for one BB. As such, the start of the
1330 /// BB might correspond to a different MBB than the end.
1331 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1332 const TerminatorInst *TI = LLVMBB->getTerminator();
1334 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1335 unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1337 // Check successor nodes' PHI nodes that expect a constant to be available
1339 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1340 const BasicBlock *SuccBB = TI->getSuccessor(succ);
1341 if (!isa<PHINode>(SuccBB->begin())) continue;
1342 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1344 // If this terminator has multiple identical successors (common for
1345 // switches), only handle each succ once.
1346 if (!SuccsHandled.insert(SuccMBB)) continue;
1348 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
1350 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1351 // nodes and Machine PHI nodes, but the incoming operands have not been
1353 for (BasicBlock::const_iterator I = SuccBB->begin();
1354 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1356 // Ignore dead phi's.
1357 if (PN->use_empty()) continue;
1359 // Only handle legal types. Two interesting things to note here. First,
1360 // by bailing out early, we may leave behind some dead instructions,
1361 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1362 // own moves. Second, this check is necessary because FastISel doesn't
1363 // use CreateRegs to create registers, so it always creates
1364 // exactly one register for each non-void instruction.
1365 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
1366 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
1369 VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
1371 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1376 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1378 // Set the DebugLoc for the copy. Prefer the location of the operand
1379 // if there is one; use the location of the PHI otherwise.
1380 DL = PN->getDebugLoc();
1381 if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
1382 DL = Inst->getDebugLoc();
1384 unsigned Reg = getRegForValue(PHIOp);
1386 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1389 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));