1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/Function.h"
43 #include "llvm/GlobalVariable.h"
44 #include "llvm/Instructions.h"
45 #include "llvm/IntrinsicInst.h"
46 #include "llvm/Operator.h"
47 #include "llvm/CodeGen/Analysis.h"
48 #include "llvm/CodeGen/FastISel.h"
49 #include "llvm/CodeGen/FunctionLoweringInfo.h"
50 #include "llvm/CodeGen/MachineInstrBuilder.h"
51 #include "llvm/CodeGen/MachineModuleInfo.h"
52 #include "llvm/CodeGen/MachineRegisterInfo.h"
53 #include "llvm/Analysis/DebugInfo.h"
54 #include "llvm/Analysis/Loads.h"
55 #include "llvm/Target/TargetData.h"
56 #include "llvm/Target/TargetInstrInfo.h"
57 #include "llvm/Target/TargetLowering.h"
58 #include "llvm/Target/TargetMachine.h"
59 #include "llvm/Support/ErrorHandling.h"
60 #include "llvm/Support/Debug.h"
63 /// startNewBlock - Set the current block to which generated machine
64 /// instructions will be appended, and clear the local CSE map.
66 void FastISel::startNewBlock() {
67 LocalValueMap.clear();
71 // Advance the emit start point past any EH_LABEL instructions.
72 MachineBasicBlock::iterator
73 I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
74 while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
78 LastLocalValue = EmitStartPt;
81 void FastISel::flushLocalValueMap() {
82 LocalValueMap.clear();
83 LastLocalValue = EmitStartPt;
87 bool FastISel::hasTrivialKill(const Value *V) const {
88 // Don't consider constants or arguments to have trivial kills.
89 const Instruction *I = dyn_cast<Instruction>(V);
93 // No-op casts are trivially coalesced by fast-isel.
94 if (const CastInst *Cast = dyn_cast<CastInst>(I))
95 if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
96 !hasTrivialKill(Cast->getOperand(0)))
99 // GEPs with all zero indices are trivially coalesced by fast-isel.
100 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I))
101 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
104 // Only instructions with a single use in the same basic block are considered
105 // to have trivial kills.
106 return I->hasOneUse() &&
107 !(I->getOpcode() == Instruction::BitCast ||
108 I->getOpcode() == Instruction::PtrToInt ||
109 I->getOpcode() == Instruction::IntToPtr) &&
110 cast<Instruction>(*I->use_begin())->getParent() == I->getParent();
113 unsigned FastISel::getRegForValue(const Value *V) {
114 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
115 // Don't handle non-simple values in FastISel.
116 if (!RealVT.isSimple())
119 // Ignore illegal types. We must do this before looking up the value
120 // in ValueMap because Arguments are given virtual registers regardless
121 // of whether FastISel can handle them.
122 MVT VT = RealVT.getSimpleVT();
123 if (!TLI.isTypeLegal(VT)) {
124 // Handle integer promotions, though, because they're common and easy.
125 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
126 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
131 // Look up the value to see if we already have a register for it. We
132 // cache values defined by Instructions across blocks, and other values
133 // only locally. This is because Instructions already have the SSA
134 // def-dominates-use requirement enforced.
135 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
136 if (I != FuncInfo.ValueMap.end())
139 unsigned Reg = LocalValueMap[V];
143 // In bottom-up mode, just create the virtual register which will be used
144 // to hold the value. It will be materialized later.
145 if (isa<Instruction>(V) &&
146 (!isa<AllocaInst>(V) ||
147 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
148 return FuncInfo.InitializeRegForValue(V);
150 SavePoint SaveInsertPt = enterLocalValueArea();
152 // Materialize the value in a register. Emit any instructions in the
154 Reg = materializeRegForValue(V, VT);
156 leaveLocalValueArea(SaveInsertPt);
161 /// materializeRegForValue - Helper for getRegForValue. This function is
162 /// called when the value isn't already available in a register and must
163 /// be materialized with new instructions.
164 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
167 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
168 if (CI->getValue().getActiveBits() <= 64)
169 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
170 } else if (isa<AllocaInst>(V)) {
171 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
172 } else if (isa<ConstantPointerNull>(V)) {
173 // Translate this as an integer zero so that it can be
174 // local-CSE'd with actual integer zeros.
176 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
177 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
178 if (CF->isNullValue()) {
179 Reg = TargetMaterializeFloatZero(CF);
181 // Try to emit the constant directly.
182 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
186 // Try to emit the constant by using an integer constant with a cast.
187 const APFloat &Flt = CF->getValueAPF();
188 EVT IntVT = TLI.getPointerTy();
191 uint32_t IntBitWidth = IntVT.getSizeInBits();
193 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
194 APFloat::rmTowardZero, &isExact);
196 APInt IntVal(IntBitWidth, x);
198 unsigned IntegerReg =
199 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
201 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
202 IntegerReg, /*Kill=*/false);
205 } else if (const Operator *Op = dyn_cast<Operator>(V)) {
206 if (!SelectOperator(Op, Op->getOpcode()))
207 if (!isa<Instruction>(Op) ||
208 !TargetSelectInstruction(cast<Instruction>(Op)))
210 Reg = lookUpRegForValue(Op);
211 } else if (isa<UndefValue>(V)) {
212 Reg = createResultReg(TLI.getRegClassFor(VT));
213 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
214 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
217 // If target-independent code couldn't handle the value, give target-specific
219 if (!Reg && isa<Constant>(V))
220 Reg = TargetMaterializeConstant(cast<Constant>(V));
222 // Don't cache constant materializations in the general ValueMap.
223 // To do so would require tracking what uses they dominate.
225 LocalValueMap[V] = Reg;
226 LastLocalValue = MRI.getVRegDef(Reg);
231 unsigned FastISel::lookUpRegForValue(const Value *V) {
232 // Look up the value to see if we already have a register for it. We
233 // cache values defined by Instructions across blocks, and other values
234 // only locally. This is because Instructions already have the SSA
235 // def-dominates-use requirement enforced.
236 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
237 if (I != FuncInfo.ValueMap.end())
239 return LocalValueMap[V];
242 /// UpdateValueMap - Update the value map to include the new mapping for this
243 /// instruction, or insert an extra copy to get the result in a previous
244 /// determined register.
245 /// NOTE: This is only necessary because we might select a block that uses
246 /// a value before we select the block that defines the value. It might be
247 /// possible to fix this by selecting blocks in reverse postorder.
248 void FastISel::UpdateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
249 if (!isa<Instruction>(I)) {
250 LocalValueMap[I] = Reg;
254 unsigned &AssignedReg = FuncInfo.ValueMap[I];
255 if (AssignedReg == 0)
256 // Use the new register.
258 else if (Reg != AssignedReg) {
259 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
260 for (unsigned i = 0; i < NumRegs; i++)
261 FuncInfo.RegFixups[AssignedReg+i] = Reg+i;
267 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
268 unsigned IdxN = getRegForValue(Idx);
270 // Unhandled operand. Halt "fast" selection and bail.
271 return std::pair<unsigned, bool>(0, false);
273 bool IdxNIsKill = hasTrivialKill(Idx);
275 // If the index is smaller or larger than intptr_t, truncate or extend it.
276 MVT PtrVT = TLI.getPointerTy();
277 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
278 if (IdxVT.bitsLT(PtrVT)) {
279 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
283 else if (IdxVT.bitsGT(PtrVT)) {
284 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
288 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
291 void FastISel::recomputeInsertPt() {
292 if (getLastLocalValue()) {
293 FuncInfo.InsertPt = getLastLocalValue();
294 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
297 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
299 // Now skip past any EH_LABELs, which must remain at the beginning.
300 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
301 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
305 FastISel::SavePoint FastISel::enterLocalValueArea() {
306 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
310 SavePoint SP = { OldInsertPt, OldDL };
314 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
315 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
316 LastLocalValue = llvm::prior(FuncInfo.InsertPt);
318 // Restore the previous insert position.
319 FuncInfo.InsertPt = OldInsertPt.InsertPt;
323 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
324 /// which has an opcode which directly corresponds to the given ISD opcode.
326 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
327 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
328 if (VT == MVT::Other || !VT.isSimple())
329 // Unhandled type. Halt "fast" selection and bail.
332 // We only handle legal types. For example, on x86-32 the instruction
333 // selector contains all of the 64-bit instructions from x86-64,
334 // under the assumption that i64 won't be used if the target doesn't
336 if (!TLI.isTypeLegal(VT)) {
337 // MVT::i1 is special. Allow AND, OR, or XOR because they
338 // don't require additional zeroing, which makes them easy.
340 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
341 ISDOpcode == ISD::XOR))
342 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
347 // Check if the first operand is a constant, and handle it as "ri". At -O0,
348 // we don't have anything that canonicalizes operand order.
349 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
350 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
351 unsigned Op1 = getRegForValue(I->getOperand(1));
352 if (Op1 == 0) return false;
354 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
356 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1,
357 Op1IsKill, CI->getZExtValue(),
359 if (ResultReg == 0) return false;
361 // We successfully emitted code for the given LLVM Instruction.
362 UpdateValueMap(I, ResultReg);
367 unsigned Op0 = getRegForValue(I->getOperand(0));
368 if (Op0 == 0) // Unhandled operand. Halt "fast" selection and bail.
371 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
373 // Check if the second operand is a constant and handle it appropriately.
374 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
375 uint64_t Imm = CI->getZExtValue();
377 // Transform "sdiv exact X, 8" -> "sra X, 3".
378 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
379 cast<BinaryOperator>(I)->isExact() &&
380 isPowerOf2_64(Imm)) {
382 ISDOpcode = ISD::SRA;
385 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
386 Op0IsKill, Imm, VT.getSimpleVT());
387 if (ResultReg == 0) return false;
389 // We successfully emitted code for the given LLVM Instruction.
390 UpdateValueMap(I, ResultReg);
394 // Check if the second operand is a constant float.
395 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
396 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
397 ISDOpcode, Op0, Op0IsKill, CF);
398 if (ResultReg != 0) {
399 // We successfully emitted code for the given LLVM Instruction.
400 UpdateValueMap(I, ResultReg);
405 unsigned Op1 = getRegForValue(I->getOperand(1));
407 // Unhandled operand. Halt "fast" selection and bail.
410 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
412 // Now we have both operands in registers. Emit the instruction.
413 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
418 // Target-specific code wasn't able to find a machine opcode for
419 // the given ISD opcode and type. Halt "fast" selection and bail.
422 // We successfully emitted code for the given LLVM Instruction.
423 UpdateValueMap(I, ResultReg);
427 bool FastISel::SelectGetElementPtr(const User *I) {
428 unsigned N = getRegForValue(I->getOperand(0));
430 // Unhandled operand. Halt "fast" selection and bail.
433 bool NIsKill = hasTrivialKill(I->getOperand(0));
435 Type *Ty = I->getOperand(0)->getType();
436 MVT VT = TLI.getPointerTy();
437 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
438 E = I->op_end(); OI != E; ++OI) {
439 const Value *Idx = *OI;
440 if (StructType *StTy = dyn_cast<StructType>(Ty)) {
441 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
444 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
445 // FIXME: This can be optimized by combining the add with a
447 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
449 // Unhandled operand. Halt "fast" selection and bail.
453 Ty = StTy->getElementType(Field);
455 Ty = cast<SequentialType>(Ty)->getElementType();
457 // If this is a constant subscript, handle it quickly.
458 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
459 if (CI->isZero()) continue;
461 TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
462 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
464 // Unhandled operand. Halt "fast" selection and bail.
470 // N = N + Idx * ElementSize;
471 uint64_t ElementSize = TD.getTypeAllocSize(Ty);
472 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
473 unsigned IdxN = Pair.first;
474 bool IdxNIsKill = Pair.second;
476 // Unhandled operand. Halt "fast" selection and bail.
479 if (ElementSize != 1) {
480 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
482 // Unhandled operand. Halt "fast" selection and bail.
486 N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
488 // Unhandled operand. Halt "fast" selection and bail.
493 // We successfully emitted code for the given LLVM Instruction.
494 UpdateValueMap(I, N);
498 bool FastISel::SelectCall(const User *I) {
499 const CallInst *Call = cast<CallInst>(I);
501 // Handle simple inline asms.
502 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
503 // Don't attempt to handle constraints.
504 if (!IA->getConstraintString().empty())
507 unsigned ExtraInfo = 0;
508 if (IA->hasSideEffects())
509 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
510 if (IA->isAlignStack())
511 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
513 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
514 TII.get(TargetOpcode::INLINEASM))
515 .addExternalSymbol(IA->getAsmString().c_str())
520 const Function *F = Call->getCalledFunction();
521 if (!F) return false;
523 // Handle selected intrinsic function calls.
524 switch (F->getIntrinsicID()) {
526 case Intrinsic::dbg_declare: {
527 const DbgDeclareInst *DI = cast<DbgDeclareInst>(Call);
528 if (!DIVariable(DI->getVariable()).Verify() ||
529 !FuncInfo.MF->getMMI().hasDebugInfo())
532 const Value *Address = DI->getAddress();
533 if (!Address || isa<UndefValue>(Address) || isa<AllocaInst>(Address))
538 if (const Argument *Arg = dyn_cast<Argument>(Address)) {
539 // Some arguments' frame index is recorded during argument lowering.
540 Offset = FuncInfo.getArgumentFrameIndex(Arg);
542 Reg = TRI.getFrameRegister(*FuncInfo.MF);
545 Reg = getRegForValue(Address);
548 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
549 TII.get(TargetOpcode::DBG_VALUE))
550 .addReg(Reg, RegState::Debug).addImm(Offset)
551 .addMetadata(DI->getVariable());
554 case Intrinsic::dbg_value: {
555 // This form of DBG_VALUE is target-independent.
556 const DbgValueInst *DI = cast<DbgValueInst>(Call);
557 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
558 const Value *V = DI->getValue();
560 // Currently the optimizer can produce this; insert an undef to
561 // help debugging. Probably the optimizer should not do this.
562 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
563 .addReg(0U).addImm(DI->getOffset())
564 .addMetadata(DI->getVariable());
565 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
566 if (CI->getBitWidth() > 64)
567 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
568 .addCImm(CI).addImm(DI->getOffset())
569 .addMetadata(DI->getVariable());
571 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
572 .addImm(CI->getZExtValue()).addImm(DI->getOffset())
573 .addMetadata(DI->getVariable());
574 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
575 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
576 .addFPImm(CF).addImm(DI->getOffset())
577 .addMetadata(DI->getVariable());
578 } else if (unsigned Reg = lookUpRegForValue(V)) {
579 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
580 .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
581 .addMetadata(DI->getVariable());
583 // We can't yet handle anything else here because it would require
584 // generating code, thus altering codegen because of debug info.
585 DEBUG(dbgs() << "Dropping debug info for " << DI);
589 case Intrinsic::eh_exception: {
590 EVT VT = TLI.getValueType(Call->getType());
591 if (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)!=TargetLowering::Expand)
594 assert(FuncInfo.MBB->isLandingPad() &&
595 "Call to eh.exception not in landing pad!");
596 unsigned Reg = TLI.getExceptionAddressRegister();
597 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
598 unsigned ResultReg = createResultReg(RC);
599 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
600 ResultReg).addReg(Reg);
601 UpdateValueMap(Call, ResultReg);
604 case Intrinsic::eh_selector: {
605 EVT VT = TLI.getValueType(Call->getType());
606 if (TLI.getOperationAction(ISD::EHSELECTION, VT) != TargetLowering::Expand)
608 if (FuncInfo.MBB->isLandingPad())
609 AddCatchInfo(*Call, &FuncInfo.MF->getMMI(), FuncInfo.MBB);
612 FuncInfo.CatchInfoLost.insert(Call);
614 // FIXME: Mark exception selector register as live in. Hack for PR1508.
615 unsigned Reg = TLI.getExceptionSelectorRegister();
616 if (Reg) FuncInfo.MBB->addLiveIn(Reg);
619 unsigned Reg = TLI.getExceptionSelectorRegister();
620 EVT SrcVT = TLI.getPointerTy();
621 const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
622 unsigned ResultReg = createResultReg(RC);
623 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
624 ResultReg).addReg(Reg);
626 bool ResultRegIsKill = hasTrivialKill(Call);
628 // Cast the register to the type of the selector.
629 if (SrcVT.bitsGT(MVT::i32))
630 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
631 ResultReg, ResultRegIsKill);
632 else if (SrcVT.bitsLT(MVT::i32))
633 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
634 ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill);
636 // Unhandled operand. Halt "fast" selection and bail.
639 UpdateValueMap(Call, ResultReg);
643 case Intrinsic::objectsize: {
644 ConstantInt *CI = cast<ConstantInt>(Call->getArgOperand(1));
645 unsigned long long Res = CI->isZero() ? -1ULL : 0;
646 Constant *ResCI = ConstantInt::get(Call->getType(), Res);
647 unsigned ResultReg = getRegForValue(ResCI);
650 UpdateValueMap(Call, ResultReg);
655 // Usually, it does not make sense to initialize a value,
656 // make an unrelated function call and use the value, because
657 // it tends to be spilled on the stack. So, we move the pointer
658 // to the last local value to the beginning of the block, so that
659 // all the values which have already been materialized,
660 // appear after the call. It also makes sense to skip intrinsics
661 // since they tend to be inlined.
662 if (!isa<IntrinsicInst>(F))
663 flushLocalValueMap();
665 // An arbitrary call. Bail.
669 bool FastISel::SelectCast(const User *I, unsigned Opcode) {
670 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
671 EVT DstVT = TLI.getValueType(I->getType());
673 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
674 DstVT == MVT::Other || !DstVT.isSimple())
675 // Unhandled type. Halt "fast" selection and bail.
678 // Check if the destination type is legal.
679 if (!TLI.isTypeLegal(DstVT))
682 // Check if the source operand is legal.
683 if (!TLI.isTypeLegal(SrcVT))
686 unsigned InputReg = getRegForValue(I->getOperand(0));
688 // Unhandled operand. Halt "fast" selection and bail.
691 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
693 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
696 InputReg, InputRegIsKill);
700 UpdateValueMap(I, ResultReg);
704 bool FastISel::SelectBitCast(const User *I) {
705 // If the bitcast doesn't change the type, just use the operand value.
706 if (I->getType() == I->getOperand(0)->getType()) {
707 unsigned Reg = getRegForValue(I->getOperand(0));
710 UpdateValueMap(I, Reg);
714 // Bitcasts of other values become reg-reg copies or BITCAST operators.
715 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
716 EVT DstVT = TLI.getValueType(I->getType());
718 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
719 DstVT == MVT::Other || !DstVT.isSimple() ||
720 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
721 // Unhandled type. Halt "fast" selection and bail.
724 unsigned Op0 = getRegForValue(I->getOperand(0));
726 // Unhandled operand. Halt "fast" selection and bail.
729 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
731 // First, try to perform the bitcast by inserting a reg-reg copy.
732 unsigned ResultReg = 0;
733 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
734 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
735 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
736 // Don't attempt a cross-class copy. It will likely fail.
737 if (SrcClass == DstClass) {
738 ResultReg = createResultReg(DstClass);
739 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
740 ResultReg).addReg(Op0);
744 // If the reg-reg copy failed, select a BITCAST opcode.
746 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
747 ISD::BITCAST, Op0, Op0IsKill);
752 UpdateValueMap(I, ResultReg);
757 FastISel::SelectInstruction(const Instruction *I) {
758 // Just before the terminator instruction, insert instructions to
759 // feed PHI nodes in successor blocks.
760 if (isa<TerminatorInst>(I))
761 if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
764 DL = I->getDebugLoc();
766 // First, try doing target-independent selection.
767 if (SelectOperator(I, I->getOpcode())) {
772 // Next, try calling the target to attempt to handle the instruction.
773 if (TargetSelectInstruction(I)) {
782 /// FastEmitBranch - Emit an unconditional branch to the given block,
783 /// unless it is the immediate (fall-through) successor, and update
786 FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
787 if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
788 // The unconditional fall-through case, which needs no instructions.
790 // The unconditional branch case.
791 TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
792 SmallVector<MachineOperand, 0>(), DL);
794 FuncInfo.MBB->addSuccessor(MSucc);
797 /// SelectFNeg - Emit an FNeg operation.
800 FastISel::SelectFNeg(const User *I) {
801 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
802 if (OpReg == 0) return false;
804 bool OpRegIsKill = hasTrivialKill(I);
806 // If the target has ISD::FNEG, use it.
807 EVT VT = TLI.getValueType(I->getType());
808 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
809 ISD::FNEG, OpReg, OpRegIsKill);
810 if (ResultReg != 0) {
811 UpdateValueMap(I, ResultReg);
815 // Bitcast the value to integer, twiddle the sign bit with xor,
816 // and then bitcast it back to floating-point.
817 if (VT.getSizeInBits() > 64) return false;
818 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
819 if (!TLI.isTypeLegal(IntVT))
822 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
823 ISD::BITCAST, OpReg, OpRegIsKill);
827 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
828 IntReg, /*Kill=*/true,
829 UINT64_C(1) << (VT.getSizeInBits()-1),
830 IntVT.getSimpleVT());
831 if (IntResultReg == 0)
834 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
835 ISD::BITCAST, IntResultReg, /*Kill=*/true);
839 UpdateValueMap(I, ResultReg);
844 FastISel::SelectExtractValue(const User *U) {
845 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
849 // Make sure we only try to handle extracts with a legal result. But also
850 // allow i1 because it's easy.
851 EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true);
852 if (!RealVT.isSimple())
854 MVT VT = RealVT.getSimpleVT();
855 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
858 const Value *Op0 = EVI->getOperand(0);
859 Type *AggTy = Op0->getType();
861 // Get the base result register.
863 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
864 if (I != FuncInfo.ValueMap.end())
865 ResultReg = I->second;
866 else if (isa<Instruction>(Op0))
867 ResultReg = FuncInfo.InitializeRegForValue(Op0);
869 return false; // fast-isel can't handle aggregate constants at the moment
871 // Get the actual result register, which is an offset from the base register.
872 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
874 SmallVector<EVT, 4> AggValueVTs;
875 ComputeValueVTs(TLI, AggTy, AggValueVTs);
877 for (unsigned i = 0; i < VTIndex; i++)
878 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
880 UpdateValueMap(EVI, ResultReg);
885 FastISel::SelectOperator(const User *I, unsigned Opcode) {
887 case Instruction::Add:
888 return SelectBinaryOp(I, ISD::ADD);
889 case Instruction::FAdd:
890 return SelectBinaryOp(I, ISD::FADD);
891 case Instruction::Sub:
892 return SelectBinaryOp(I, ISD::SUB);
893 case Instruction::FSub:
894 // FNeg is currently represented in LLVM IR as a special case of FSub.
895 if (BinaryOperator::isFNeg(I))
896 return SelectFNeg(I);
897 return SelectBinaryOp(I, ISD::FSUB);
898 case Instruction::Mul:
899 return SelectBinaryOp(I, ISD::MUL);
900 case Instruction::FMul:
901 return SelectBinaryOp(I, ISD::FMUL);
902 case Instruction::SDiv:
903 return SelectBinaryOp(I, ISD::SDIV);
904 case Instruction::UDiv:
905 return SelectBinaryOp(I, ISD::UDIV);
906 case Instruction::FDiv:
907 return SelectBinaryOp(I, ISD::FDIV);
908 case Instruction::SRem:
909 return SelectBinaryOp(I, ISD::SREM);
910 case Instruction::URem:
911 return SelectBinaryOp(I, ISD::UREM);
912 case Instruction::FRem:
913 return SelectBinaryOp(I, ISD::FREM);
914 case Instruction::Shl:
915 return SelectBinaryOp(I, ISD::SHL);
916 case Instruction::LShr:
917 return SelectBinaryOp(I, ISD::SRL);
918 case Instruction::AShr:
919 return SelectBinaryOp(I, ISD::SRA);
920 case Instruction::And:
921 return SelectBinaryOp(I, ISD::AND);
922 case Instruction::Or:
923 return SelectBinaryOp(I, ISD::OR);
924 case Instruction::Xor:
925 return SelectBinaryOp(I, ISD::XOR);
927 case Instruction::GetElementPtr:
928 return SelectGetElementPtr(I);
930 case Instruction::Br: {
931 const BranchInst *BI = cast<BranchInst>(I);
933 if (BI->isUnconditional()) {
934 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
935 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
936 FastEmitBranch(MSucc, BI->getDebugLoc());
940 // Conditional branches are not handed yet.
941 // Halt "fast" selection and bail.
945 case Instruction::Unreachable:
949 case Instruction::Alloca:
950 // FunctionLowering has the static-sized case covered.
951 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
954 // Dynamic-sized alloca is not handled yet.
957 case Instruction::Call:
958 return SelectCall(I);
960 case Instruction::BitCast:
961 return SelectBitCast(I);
963 case Instruction::FPToSI:
964 return SelectCast(I, ISD::FP_TO_SINT);
965 case Instruction::ZExt:
966 return SelectCast(I, ISD::ZERO_EXTEND);
967 case Instruction::SExt:
968 return SelectCast(I, ISD::SIGN_EXTEND);
969 case Instruction::Trunc:
970 return SelectCast(I, ISD::TRUNCATE);
971 case Instruction::SIToFP:
972 return SelectCast(I, ISD::SINT_TO_FP);
974 case Instruction::IntToPtr: // Deliberate fall-through.
975 case Instruction::PtrToInt: {
976 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
977 EVT DstVT = TLI.getValueType(I->getType());
978 if (DstVT.bitsGT(SrcVT))
979 return SelectCast(I, ISD::ZERO_EXTEND);
980 if (DstVT.bitsLT(SrcVT))
981 return SelectCast(I, ISD::TRUNCATE);
982 unsigned Reg = getRegForValue(I->getOperand(0));
983 if (Reg == 0) return false;
984 UpdateValueMap(I, Reg);
988 case Instruction::ExtractValue:
989 return SelectExtractValue(I);
991 case Instruction::PHI:
992 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
995 // Unhandled instruction. Halt "fast" selection and bail.
1000 FastISel::FastISel(FunctionLoweringInfo &funcInfo)
1001 : FuncInfo(funcInfo),
1002 MRI(FuncInfo.MF->getRegInfo()),
1003 MFI(*FuncInfo.MF->getFrameInfo()),
1004 MCP(*FuncInfo.MF->getConstantPool()),
1005 TM(FuncInfo.MF->getTarget()),
1006 TD(*TM.getTargetData()),
1007 TII(*TM.getInstrInfo()),
1008 TLI(*TM.getTargetLowering()),
1009 TRI(*TM.getRegisterInfo()) {
1012 FastISel::~FastISel() {}
1014 unsigned FastISel::FastEmit_(MVT, MVT,
1019 unsigned FastISel::FastEmit_r(MVT, MVT,
1021 unsigned /*Op0*/, bool /*Op0IsKill*/) {
1025 unsigned FastISel::FastEmit_rr(MVT, MVT,
1027 unsigned /*Op0*/, bool /*Op0IsKill*/,
1028 unsigned /*Op1*/, bool /*Op1IsKill*/) {
1032 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1036 unsigned FastISel::FastEmit_f(MVT, MVT,
1037 unsigned, const ConstantFP * /*FPImm*/) {
1041 unsigned FastISel::FastEmit_ri(MVT, MVT,
1043 unsigned /*Op0*/, bool /*Op0IsKill*/,
1048 unsigned FastISel::FastEmit_rf(MVT, MVT,
1050 unsigned /*Op0*/, bool /*Op0IsKill*/,
1051 const ConstantFP * /*FPImm*/) {
1055 unsigned FastISel::FastEmit_rri(MVT, MVT,
1057 unsigned /*Op0*/, bool /*Op0IsKill*/,
1058 unsigned /*Op1*/, bool /*Op1IsKill*/,
1063 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
1064 /// to emit an instruction with an immediate operand using FastEmit_ri.
1065 /// If that fails, it materializes the immediate into a register and try
1066 /// FastEmit_rr instead.
1067 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
1068 unsigned Op0, bool Op0IsKill,
1069 uint64_t Imm, MVT ImmType) {
1070 // If this is a multiply by a power of two, emit this as a shift left.
1071 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1074 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1075 // div x, 8 -> srl x, 3
1080 // Horrible hack (to be removed), check to make sure shift amounts are
1082 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1083 Imm >= VT.getSizeInBits())
1086 // First check if immediate type is legal. If not, we can't use the ri form.
1087 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1090 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1091 if (MaterialReg == 0) {
1092 // This is a bit ugly/slow, but failing here means falling out of
1093 // fast-isel, which would be very slow.
1094 IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(),
1095 VT.getSizeInBits());
1096 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1098 return FastEmit_rr(VT, VT, Opcode,
1100 MaterialReg, /*Kill=*/true);
1103 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
1104 return MRI.createVirtualRegister(RC);
1107 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
1108 const TargetRegisterClass* RC) {
1109 unsigned ResultReg = createResultReg(RC);
1110 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1112 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
1116 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
1117 const TargetRegisterClass *RC,
1118 unsigned Op0, bool Op0IsKill) {
1119 unsigned ResultReg = createResultReg(RC);
1120 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1122 if (II.getNumDefs() >= 1)
1123 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1124 .addReg(Op0, Op0IsKill * RegState::Kill);
1126 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1127 .addReg(Op0, Op0IsKill * RegState::Kill);
1128 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1129 ResultReg).addReg(II.ImplicitDefs[0]);
1135 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
1136 const TargetRegisterClass *RC,
1137 unsigned Op0, bool Op0IsKill,
1138 unsigned Op1, bool Op1IsKill) {
1139 unsigned ResultReg = createResultReg(RC);
1140 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1142 if (II.getNumDefs() >= 1)
1143 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1144 .addReg(Op0, Op0IsKill * RegState::Kill)
1145 .addReg(Op1, Op1IsKill * RegState::Kill);
1147 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1148 .addReg(Op0, Op0IsKill * RegState::Kill)
1149 .addReg(Op1, Op1IsKill * RegState::Kill);
1150 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1151 ResultReg).addReg(II.ImplicitDefs[0]);
1156 unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
1157 const TargetRegisterClass *RC,
1158 unsigned Op0, bool Op0IsKill,
1159 unsigned Op1, bool Op1IsKill,
1160 unsigned Op2, bool Op2IsKill) {
1161 unsigned ResultReg = createResultReg(RC);
1162 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1164 if (II.getNumDefs() >= 1)
1165 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1166 .addReg(Op0, Op0IsKill * RegState::Kill)
1167 .addReg(Op1, Op1IsKill * RegState::Kill)
1168 .addReg(Op2, Op2IsKill * RegState::Kill);
1170 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1171 .addReg(Op0, Op0IsKill * RegState::Kill)
1172 .addReg(Op1, Op1IsKill * RegState::Kill)
1173 .addReg(Op2, Op2IsKill * RegState::Kill);
1174 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1175 ResultReg).addReg(II.ImplicitDefs[0]);
1180 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
1181 const TargetRegisterClass *RC,
1182 unsigned Op0, bool Op0IsKill,
1184 unsigned ResultReg = createResultReg(RC);
1185 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1187 if (II.getNumDefs() >= 1)
1188 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1189 .addReg(Op0, Op0IsKill * RegState::Kill)
1192 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1193 .addReg(Op0, Op0IsKill * RegState::Kill)
1195 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1196 ResultReg).addReg(II.ImplicitDefs[0]);
1201 unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
1202 const TargetRegisterClass *RC,
1203 unsigned Op0, bool Op0IsKill,
1204 uint64_t Imm1, uint64_t Imm2) {
1205 unsigned ResultReg = createResultReg(RC);
1206 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1208 if (II.getNumDefs() >= 1)
1209 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1210 .addReg(Op0, Op0IsKill * RegState::Kill)
1214 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1215 .addReg(Op0, Op0IsKill * RegState::Kill)
1218 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1219 ResultReg).addReg(II.ImplicitDefs[0]);
1224 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
1225 const TargetRegisterClass *RC,
1226 unsigned Op0, bool Op0IsKill,
1227 const ConstantFP *FPImm) {
1228 unsigned ResultReg = createResultReg(RC);
1229 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1231 if (II.getNumDefs() >= 1)
1232 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1233 .addReg(Op0, Op0IsKill * RegState::Kill)
1236 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1237 .addReg(Op0, Op0IsKill * RegState::Kill)
1239 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1240 ResultReg).addReg(II.ImplicitDefs[0]);
1245 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
1246 const TargetRegisterClass *RC,
1247 unsigned Op0, bool Op0IsKill,
1248 unsigned Op1, bool Op1IsKill,
1250 unsigned ResultReg = createResultReg(RC);
1251 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1253 if (II.getNumDefs() >= 1)
1254 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1255 .addReg(Op0, Op0IsKill * RegState::Kill)
1256 .addReg(Op1, Op1IsKill * RegState::Kill)
1259 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1260 .addReg(Op0, Op0IsKill * RegState::Kill)
1261 .addReg(Op1, Op1IsKill * RegState::Kill)
1263 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1264 ResultReg).addReg(II.ImplicitDefs[0]);
1269 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
1270 const TargetRegisterClass *RC,
1272 unsigned ResultReg = createResultReg(RC);
1273 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1275 if (II.getNumDefs() >= 1)
1276 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
1278 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
1279 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1280 ResultReg).addReg(II.ImplicitDefs[0]);
1285 unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
1286 const TargetRegisterClass *RC,
1287 uint64_t Imm1, uint64_t Imm2) {
1288 unsigned ResultReg = createResultReg(RC);
1289 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1291 if (II.getNumDefs() >= 1)
1292 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1293 .addImm(Imm1).addImm(Imm2);
1295 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm1).addImm(Imm2);
1296 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1297 ResultReg).addReg(II.ImplicitDefs[0]);
1302 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1303 unsigned Op0, bool Op0IsKill,
1305 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1306 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1307 "Cannot yet extract from physregs");
1308 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
1309 DL, TII.get(TargetOpcode::COPY), ResultReg)
1310 .addReg(Op0, getKillRegState(Op0IsKill), Idx);
1314 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1315 /// with all but the least significant bit set to zero.
1316 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1317 return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1320 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1321 /// Emit code to ensure constants are copied into registers when needed.
1322 /// Remember the virtual registers that need to be added to the Machine PHI
1323 /// nodes as input. We cannot just directly add them, because expansion
1324 /// might result in multiple MBB's for one BB. As such, the start of the
1325 /// BB might correspond to a different MBB than the end.
1326 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1327 const TerminatorInst *TI = LLVMBB->getTerminator();
1329 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1330 unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1332 // Check successor nodes' PHI nodes that expect a constant to be available
1334 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1335 const BasicBlock *SuccBB = TI->getSuccessor(succ);
1336 if (!isa<PHINode>(SuccBB->begin())) continue;
1337 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1339 // If this terminator has multiple identical successors (common for
1340 // switches), only handle each succ once.
1341 if (!SuccsHandled.insert(SuccMBB)) continue;
1343 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
1345 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1346 // nodes and Machine PHI nodes, but the incoming operands have not been
1348 for (BasicBlock::const_iterator I = SuccBB->begin();
1349 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1351 // Ignore dead phi's.
1352 if (PN->use_empty()) continue;
1354 // Only handle legal types. Two interesting things to note here. First,
1355 // by bailing out early, we may leave behind some dead instructions,
1356 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1357 // own moves. Second, this check is necessary because FastISel doesn't
1358 // use CreateRegs to create registers, so it always creates
1359 // exactly one register for each non-void instruction.
1360 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
1361 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
1364 VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
1366 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1371 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1373 // Set the DebugLoc for the copy. Prefer the location of the operand
1374 // if there is one; use the location of the PHI otherwise.
1375 DL = PN->getDebugLoc();
1376 if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
1377 DL = Inst->getDebugLoc();
1379 unsigned Reg = getRegForValue(PHIOp);
1381 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1384 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));