1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/Function.h"
43 #include "llvm/GlobalVariable.h"
44 #include "llvm/Instructions.h"
45 #include "llvm/IntrinsicInst.h"
46 #include "llvm/Operator.h"
47 #include "llvm/CodeGen/Analysis.h"
48 #include "llvm/CodeGen/FastISel.h"
49 #include "llvm/CodeGen/FunctionLoweringInfo.h"
50 #include "llvm/CodeGen/MachineInstrBuilder.h"
51 #include "llvm/CodeGen/MachineModuleInfo.h"
52 #include "llvm/CodeGen/MachineRegisterInfo.h"
53 #include "llvm/Analysis/DebugInfo.h"
54 #include "llvm/Analysis/Loads.h"
55 #include "llvm/Target/TargetData.h"
56 #include "llvm/Target/TargetInstrInfo.h"
57 #include "llvm/Target/TargetLowering.h"
58 #include "llvm/Target/TargetMachine.h"
59 #include "llvm/Support/ErrorHandling.h"
60 #include "llvm/Support/Debug.h"
63 /// startNewBlock - Set the current block to which generated machine
64 /// instructions will be appended, and clear the local CSE map.
66 void FastISel::startNewBlock() {
67 LocalValueMap.clear();
71 // Advance the emit start point past any EH_LABEL instructions.
72 MachineBasicBlock::iterator
73 I = FuncInfo.MBB->begin(), E = FuncInfo.MBB->end();
74 while (I != E && I->getOpcode() == TargetOpcode::EH_LABEL) {
78 LastLocalValue = EmitStartPt;
81 void FastISel::flushLocalValueMap() {
82 LocalValueMap.clear();
83 LastLocalValue = EmitStartPt;
87 bool FastISel::hasTrivialKill(const Value *V) const {
88 // Don't consider constants or arguments to have trivial kills.
89 const Instruction *I = dyn_cast<Instruction>(V);
93 // No-op casts are trivially coalesced by fast-isel.
94 if (const CastInst *Cast = dyn_cast<CastInst>(I))
95 if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
96 !hasTrivialKill(Cast->getOperand(0)))
99 // Only instructions with a single use in the same basic block are considered
100 // to have trivial kills.
101 return I->hasOneUse() &&
102 !(I->getOpcode() == Instruction::BitCast ||
103 I->getOpcode() == Instruction::PtrToInt ||
104 I->getOpcode() == Instruction::IntToPtr) &&
105 cast<Instruction>(*I->use_begin())->getParent() == I->getParent();
108 unsigned FastISel::getRegForValue(const Value *V) {
109 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
110 // Don't handle non-simple values in FastISel.
111 if (!RealVT.isSimple())
114 // Ignore illegal types. We must do this before looking up the value
115 // in ValueMap because Arguments are given virtual registers regardless
116 // of whether FastISel can handle them.
117 MVT VT = RealVT.getSimpleVT();
118 if (!TLI.isTypeLegal(VT)) {
119 // Handle integer promotions, though, because they're common and easy.
120 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
121 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
126 // Look up the value to see if we already have a register for it. We
127 // cache values defined by Instructions across blocks, and other values
128 // only locally. This is because Instructions already have the SSA
129 // def-dominates-use requirement enforced.
130 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
131 if (I != FuncInfo.ValueMap.end())
134 unsigned Reg = LocalValueMap[V];
138 // In bottom-up mode, just create the virtual register which will be used
139 // to hold the value. It will be materialized later.
140 if (isa<Instruction>(V) &&
141 (!isa<AllocaInst>(V) ||
142 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
143 return FuncInfo.InitializeRegForValue(V);
145 SavePoint SaveInsertPt = enterLocalValueArea();
147 // Materialize the value in a register. Emit any instructions in the
149 Reg = materializeRegForValue(V, VT);
151 leaveLocalValueArea(SaveInsertPt);
156 /// materializeRegForValue - Helper for getRegForValue. This function is
157 /// called when the value isn't already available in a register and must
158 /// be materialized with new instructions.
159 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
162 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
163 if (CI->getValue().getActiveBits() <= 64)
164 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
165 } else if (isa<AllocaInst>(V)) {
166 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
167 } else if (isa<ConstantPointerNull>(V)) {
168 // Translate this as an integer zero so that it can be
169 // local-CSE'd with actual integer zeros.
171 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
172 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
173 if (CF->isNullValue()) {
174 Reg = TargetMaterializeFloatZero(CF);
176 // Try to emit the constant directly.
177 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
181 // Try to emit the constant by using an integer constant with a cast.
182 const APFloat &Flt = CF->getValueAPF();
183 EVT IntVT = TLI.getPointerTy();
186 uint32_t IntBitWidth = IntVT.getSizeInBits();
188 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
189 APFloat::rmTowardZero, &isExact);
191 APInt IntVal(IntBitWidth, x);
193 unsigned IntegerReg =
194 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
196 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
197 IntegerReg, /*Kill=*/false);
200 } else if (const Operator *Op = dyn_cast<Operator>(V)) {
201 if (!SelectOperator(Op, Op->getOpcode()))
202 if (!isa<Instruction>(Op) ||
203 !TargetSelectInstruction(cast<Instruction>(Op)))
205 Reg = lookUpRegForValue(Op);
206 } else if (isa<UndefValue>(V)) {
207 Reg = createResultReg(TLI.getRegClassFor(VT));
208 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
209 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
212 // If target-independent code couldn't handle the value, give target-specific
214 if (!Reg && isa<Constant>(V))
215 Reg = TargetMaterializeConstant(cast<Constant>(V));
217 // Don't cache constant materializations in the general ValueMap.
218 // To do so would require tracking what uses they dominate.
220 LocalValueMap[V] = Reg;
221 LastLocalValue = MRI.getVRegDef(Reg);
226 unsigned FastISel::lookUpRegForValue(const Value *V) {
227 // Look up the value to see if we already have a register for it. We
228 // cache values defined by Instructions across blocks, and other values
229 // only locally. This is because Instructions already have the SSA
230 // def-dominates-use requirement enforced.
231 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
232 if (I != FuncInfo.ValueMap.end())
234 return LocalValueMap[V];
237 /// UpdateValueMap - Update the value map to include the new mapping for this
238 /// instruction, or insert an extra copy to get the result in a previous
239 /// determined register.
240 /// NOTE: This is only necessary because we might select a block that uses
241 /// a value before we select the block that defines the value. It might be
242 /// possible to fix this by selecting blocks in reverse postorder.
243 void FastISel::UpdateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
244 if (!isa<Instruction>(I)) {
245 LocalValueMap[I] = Reg;
249 unsigned &AssignedReg = FuncInfo.ValueMap[I];
250 if (AssignedReg == 0)
251 // Use the new register.
253 else if (Reg != AssignedReg) {
254 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
255 for (unsigned i = 0; i < NumRegs; i++)
256 FuncInfo.RegFixups[AssignedReg+i] = Reg+i;
262 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
263 unsigned IdxN = getRegForValue(Idx);
265 // Unhandled operand. Halt "fast" selection and bail.
266 return std::pair<unsigned, bool>(0, false);
268 bool IdxNIsKill = hasTrivialKill(Idx);
270 // If the index is smaller or larger than intptr_t, truncate or extend it.
271 MVT PtrVT = TLI.getPointerTy();
272 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
273 if (IdxVT.bitsLT(PtrVT)) {
274 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
278 else if (IdxVT.bitsGT(PtrVT)) {
279 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
283 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
286 void FastISel::recomputeInsertPt() {
287 if (getLastLocalValue()) {
288 FuncInfo.InsertPt = getLastLocalValue();
289 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
292 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
294 // Now skip past any EH_LABELs, which must remain at the beginning.
295 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
296 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
300 FastISel::SavePoint FastISel::enterLocalValueArea() {
301 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
305 SavePoint SP = { OldInsertPt, OldDL };
309 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
310 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
311 LastLocalValue = llvm::prior(FuncInfo.InsertPt);
313 // Restore the previous insert position.
314 FuncInfo.InsertPt = OldInsertPt.InsertPt;
318 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
319 /// which has an opcode which directly corresponds to the given ISD opcode.
321 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
322 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
323 if (VT == MVT::Other || !VT.isSimple())
324 // Unhandled type. Halt "fast" selection and bail.
327 // We only handle legal types. For example, on x86-32 the instruction
328 // selector contains all of the 64-bit instructions from x86-64,
329 // under the assumption that i64 won't be used if the target doesn't
331 if (!TLI.isTypeLegal(VT)) {
332 // MVT::i1 is special. Allow AND, OR, or XOR because they
333 // don't require additional zeroing, which makes them easy.
335 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
336 ISDOpcode == ISD::XOR))
337 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
342 // Check if the first operand is a constant, and handle it as "ri". At -O0,
343 // we don't have anything that canonicalizes operand order.
344 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
345 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
346 unsigned Op1 = getRegForValue(I->getOperand(1));
347 if (Op1 == 0) return false;
349 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
351 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1,
352 Op1IsKill, CI->getZExtValue(),
354 if (ResultReg == 0) return false;
356 // We successfully emitted code for the given LLVM Instruction.
357 UpdateValueMap(I, ResultReg);
362 unsigned Op0 = getRegForValue(I->getOperand(0));
363 if (Op0 == 0) // Unhandled operand. Halt "fast" selection and bail.
366 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
368 // Check if the second operand is a constant and handle it appropriately.
369 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
370 uint64_t Imm = CI->getZExtValue();
372 // Transform "sdiv exact X, 8" -> "sra X, 3".
373 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
374 cast<BinaryOperator>(I)->isExact() &&
375 isPowerOf2_64(Imm)) {
377 ISDOpcode = ISD::SRA;
380 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
381 Op0IsKill, Imm, VT.getSimpleVT());
382 if (ResultReg == 0) return false;
384 // We successfully emitted code for the given LLVM Instruction.
385 UpdateValueMap(I, ResultReg);
389 // Check if the second operand is a constant float.
390 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
391 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
392 ISDOpcode, Op0, Op0IsKill, CF);
393 if (ResultReg != 0) {
394 // We successfully emitted code for the given LLVM Instruction.
395 UpdateValueMap(I, ResultReg);
400 unsigned Op1 = getRegForValue(I->getOperand(1));
402 // Unhandled operand. Halt "fast" selection and bail.
405 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
407 // Now we have both operands in registers. Emit the instruction.
408 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
413 // Target-specific code wasn't able to find a machine opcode for
414 // the given ISD opcode and type. Halt "fast" selection and bail.
417 // We successfully emitted code for the given LLVM Instruction.
418 UpdateValueMap(I, ResultReg);
422 bool FastISel::SelectGetElementPtr(const User *I) {
423 unsigned N = getRegForValue(I->getOperand(0));
425 // Unhandled operand. Halt "fast" selection and bail.
428 bool NIsKill = hasTrivialKill(I->getOperand(0));
430 Type *Ty = I->getOperand(0)->getType();
431 MVT VT = TLI.getPointerTy();
432 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
433 E = I->op_end(); OI != E; ++OI) {
434 const Value *Idx = *OI;
435 if (StructType *StTy = dyn_cast<StructType>(Ty)) {
436 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
439 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
440 // FIXME: This can be optimized by combining the add with a
442 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
444 // Unhandled operand. Halt "fast" selection and bail.
448 Ty = StTy->getElementType(Field);
450 Ty = cast<SequentialType>(Ty)->getElementType();
452 // If this is a constant subscript, handle it quickly.
453 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
454 if (CI->isZero()) continue;
456 TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
457 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
459 // Unhandled operand. Halt "fast" selection and bail.
465 // N = N + Idx * ElementSize;
466 uint64_t ElementSize = TD.getTypeAllocSize(Ty);
467 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
468 unsigned IdxN = Pair.first;
469 bool IdxNIsKill = Pair.second;
471 // Unhandled operand. Halt "fast" selection and bail.
474 if (ElementSize != 1) {
475 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
477 // Unhandled operand. Halt "fast" selection and bail.
481 N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
483 // Unhandled operand. Halt "fast" selection and bail.
488 // We successfully emitted code for the given LLVM Instruction.
489 UpdateValueMap(I, N);
493 bool FastISel::SelectCall(const User *I) {
494 const CallInst *Call = cast<CallInst>(I);
496 // Handle simple inline asms.
497 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getArgOperand(0))) {
498 // Don't attempt to handle constraints.
499 if (!IA->getConstraintString().empty())
502 unsigned ExtraInfo = 0;
503 if (IA->hasSideEffects())
504 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
505 if (IA->isAlignStack())
506 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
508 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
509 TII.get(TargetOpcode::INLINEASM))
510 .addExternalSymbol(IA->getAsmString().c_str())
515 const Function *F = Call->getCalledFunction();
516 if (!F) return false;
518 // Handle selected intrinsic function calls.
519 switch (F->getIntrinsicID()) {
521 case Intrinsic::dbg_declare: {
522 const DbgDeclareInst *DI = cast<DbgDeclareInst>(Call);
523 if (!DIVariable(DI->getVariable()).Verify() ||
524 !FuncInfo.MF->getMMI().hasDebugInfo())
527 const Value *Address = DI->getAddress();
528 if (!Address || isa<UndefValue>(Address) || isa<AllocaInst>(Address))
533 if (const Argument *Arg = dyn_cast<Argument>(Address)) {
534 if (Arg->hasByValAttr()) {
535 // Byval arguments' frame index is recorded during argument lowering.
536 // Use this info directly.
537 Offset = FuncInfo.getByValArgumentFrameIndex(Arg);
539 Reg = TRI.getFrameRegister(*FuncInfo.MF);
543 Reg = getRegForValue(Address);
546 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
547 TII.get(TargetOpcode::DBG_VALUE))
548 .addReg(Reg, RegState::Debug).addImm(Offset)
549 .addMetadata(DI->getVariable());
552 case Intrinsic::dbg_value: {
553 // This form of DBG_VALUE is target-independent.
554 const DbgValueInst *DI = cast<DbgValueInst>(Call);
555 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
556 const Value *V = DI->getValue();
558 // Currently the optimizer can produce this; insert an undef to
559 // help debugging. Probably the optimizer should not do this.
560 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
561 .addReg(0U).addImm(DI->getOffset())
562 .addMetadata(DI->getVariable());
563 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
564 if (CI->getBitWidth() > 64)
565 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
566 .addCImm(CI).addImm(DI->getOffset())
567 .addMetadata(DI->getVariable());
569 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
570 .addImm(CI->getZExtValue()).addImm(DI->getOffset())
571 .addMetadata(DI->getVariable());
572 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
573 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
574 .addFPImm(CF).addImm(DI->getOffset())
575 .addMetadata(DI->getVariable());
576 } else if (unsigned Reg = lookUpRegForValue(V)) {
577 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
578 .addReg(Reg, RegState::Debug).addImm(DI->getOffset())
579 .addMetadata(DI->getVariable());
581 // We can't yet handle anything else here because it would require
582 // generating code, thus altering codegen because of debug info.
583 DEBUG(dbgs() << "Dropping debug info for " << DI);
587 case Intrinsic::eh_exception: {
588 EVT VT = TLI.getValueType(Call->getType());
589 if (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)!=TargetLowering::Expand)
592 assert(FuncInfo.MBB->isLandingPad() &&
593 "Call to eh.exception not in landing pad!");
594 unsigned Reg = TLI.getExceptionAddressRegister();
595 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
596 unsigned ResultReg = createResultReg(RC);
597 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
598 ResultReg).addReg(Reg);
599 UpdateValueMap(Call, ResultReg);
602 case Intrinsic::eh_selector: {
603 EVT VT = TLI.getValueType(Call->getType());
604 if (TLI.getOperationAction(ISD::EHSELECTION, VT) != TargetLowering::Expand)
606 if (FuncInfo.MBB->isLandingPad())
607 AddCatchInfo(*Call, &FuncInfo.MF->getMMI(), FuncInfo.MBB);
610 FuncInfo.CatchInfoLost.insert(Call);
612 // FIXME: Mark exception selector register as live in. Hack for PR1508.
613 unsigned Reg = TLI.getExceptionSelectorRegister();
614 if (Reg) FuncInfo.MBB->addLiveIn(Reg);
617 unsigned Reg = TLI.getExceptionSelectorRegister();
618 EVT SrcVT = TLI.getPointerTy();
619 const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
620 unsigned ResultReg = createResultReg(RC);
621 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
622 ResultReg).addReg(Reg);
624 bool ResultRegIsKill = hasTrivialKill(Call);
626 // Cast the register to the type of the selector.
627 if (SrcVT.bitsGT(MVT::i32))
628 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
629 ResultReg, ResultRegIsKill);
630 else if (SrcVT.bitsLT(MVT::i32))
631 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
632 ISD::SIGN_EXTEND, ResultReg, ResultRegIsKill);
634 // Unhandled operand. Halt "fast" selection and bail.
637 UpdateValueMap(Call, ResultReg);
641 case Intrinsic::objectsize: {
642 ConstantInt *CI = cast<ConstantInt>(Call->getArgOperand(1));
643 unsigned long long Res = CI->isZero() ? -1ULL : 0;
644 Constant *ResCI = ConstantInt::get(Call->getType(), Res);
645 unsigned ResultReg = getRegForValue(ResCI);
648 UpdateValueMap(Call, ResultReg);
653 // Usually, it does not make sense to initialize a value,
654 // make an unrelated function call and use the value, because
655 // it tends to be spilled on the stack. So, we move the pointer
656 // to the last local value to the beginning of the block, so that
657 // all the values which have already been materialized,
658 // appear after the call. It also makes sense to skip intrinsics
659 // since they tend to be inlined.
660 if (!isa<IntrinsicInst>(F))
661 flushLocalValueMap();
663 // An arbitrary call. Bail.
667 bool FastISel::SelectCast(const User *I, unsigned Opcode) {
668 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
669 EVT DstVT = TLI.getValueType(I->getType());
671 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
672 DstVT == MVT::Other || !DstVT.isSimple())
673 // Unhandled type. Halt "fast" selection and bail.
676 // Check if the destination type is legal.
677 if (!TLI.isTypeLegal(DstVT))
680 // Check if the source operand is legal.
681 if (!TLI.isTypeLegal(SrcVT))
684 unsigned InputReg = getRegForValue(I->getOperand(0));
686 // Unhandled operand. Halt "fast" selection and bail.
689 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
691 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
694 InputReg, InputRegIsKill);
698 UpdateValueMap(I, ResultReg);
702 bool FastISel::SelectBitCast(const User *I) {
703 // If the bitcast doesn't change the type, just use the operand value.
704 if (I->getType() == I->getOperand(0)->getType()) {
705 unsigned Reg = getRegForValue(I->getOperand(0));
708 UpdateValueMap(I, Reg);
712 // Bitcasts of other values become reg-reg copies or BITCAST operators.
713 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
714 EVT DstVT = TLI.getValueType(I->getType());
716 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
717 DstVT == MVT::Other || !DstVT.isSimple() ||
718 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
719 // Unhandled type. Halt "fast" selection and bail.
722 unsigned Op0 = getRegForValue(I->getOperand(0));
724 // Unhandled operand. Halt "fast" selection and bail.
727 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
729 // First, try to perform the bitcast by inserting a reg-reg copy.
730 unsigned ResultReg = 0;
731 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
732 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
733 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
734 // Don't attempt a cross-class copy. It will likely fail.
735 if (SrcClass == DstClass) {
736 ResultReg = createResultReg(DstClass);
737 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
738 ResultReg).addReg(Op0);
742 // If the reg-reg copy failed, select a BITCAST opcode.
744 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
745 ISD::BITCAST, Op0, Op0IsKill);
750 UpdateValueMap(I, ResultReg);
755 FastISel::SelectInstruction(const Instruction *I) {
756 // Just before the terminator instruction, insert instructions to
757 // feed PHI nodes in successor blocks.
758 if (isa<TerminatorInst>(I))
759 if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
762 DL = I->getDebugLoc();
764 // First, try doing target-independent selection.
765 if (SelectOperator(I, I->getOpcode())) {
770 // Next, try calling the target to attempt to handle the instruction.
771 if (TargetSelectInstruction(I)) {
780 /// FastEmitBranch - Emit an unconditional branch to the given block,
781 /// unless it is the immediate (fall-through) successor, and update
784 FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
785 if (FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
786 // The unconditional fall-through case, which needs no instructions.
788 // The unconditional branch case.
789 TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
790 SmallVector<MachineOperand, 0>(), DL);
792 FuncInfo.MBB->addSuccessor(MSucc);
795 /// SelectFNeg - Emit an FNeg operation.
798 FastISel::SelectFNeg(const User *I) {
799 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
800 if (OpReg == 0) return false;
802 bool OpRegIsKill = hasTrivialKill(I);
804 // If the target has ISD::FNEG, use it.
805 EVT VT = TLI.getValueType(I->getType());
806 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
807 ISD::FNEG, OpReg, OpRegIsKill);
808 if (ResultReg != 0) {
809 UpdateValueMap(I, ResultReg);
813 // Bitcast the value to integer, twiddle the sign bit with xor,
814 // and then bitcast it back to floating-point.
815 if (VT.getSizeInBits() > 64) return false;
816 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
817 if (!TLI.isTypeLegal(IntVT))
820 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
821 ISD::BITCAST, OpReg, OpRegIsKill);
825 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
826 IntReg, /*Kill=*/true,
827 UINT64_C(1) << (VT.getSizeInBits()-1),
828 IntVT.getSimpleVT());
829 if (IntResultReg == 0)
832 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
833 ISD::BITCAST, IntResultReg, /*Kill=*/true);
837 UpdateValueMap(I, ResultReg);
842 FastISel::SelectExtractValue(const User *U) {
843 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
847 // Make sure we only try to handle extracts with a legal result. But also
848 // allow i1 because it's easy.
849 EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true);
850 if (!RealVT.isSimple())
852 MVT VT = RealVT.getSimpleVT();
853 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
856 const Value *Op0 = EVI->getOperand(0);
857 Type *AggTy = Op0->getType();
859 // Get the base result register.
861 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
862 if (I != FuncInfo.ValueMap.end())
863 ResultReg = I->second;
864 else if (isa<Instruction>(Op0))
865 ResultReg = FuncInfo.InitializeRegForValue(Op0);
867 return false; // fast-isel can't handle aggregate constants at the moment
869 // Get the actual result register, which is an offset from the base register.
870 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
872 SmallVector<EVT, 4> AggValueVTs;
873 ComputeValueVTs(TLI, AggTy, AggValueVTs);
875 for (unsigned i = 0; i < VTIndex; i++)
876 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
878 UpdateValueMap(EVI, ResultReg);
883 FastISel::SelectOperator(const User *I, unsigned Opcode) {
885 case Instruction::Add:
886 return SelectBinaryOp(I, ISD::ADD);
887 case Instruction::FAdd:
888 return SelectBinaryOp(I, ISD::FADD);
889 case Instruction::Sub:
890 return SelectBinaryOp(I, ISD::SUB);
891 case Instruction::FSub:
892 // FNeg is currently represented in LLVM IR as a special case of FSub.
893 if (BinaryOperator::isFNeg(I))
894 return SelectFNeg(I);
895 return SelectBinaryOp(I, ISD::FSUB);
896 case Instruction::Mul:
897 return SelectBinaryOp(I, ISD::MUL);
898 case Instruction::FMul:
899 return SelectBinaryOp(I, ISD::FMUL);
900 case Instruction::SDiv:
901 return SelectBinaryOp(I, ISD::SDIV);
902 case Instruction::UDiv:
903 return SelectBinaryOp(I, ISD::UDIV);
904 case Instruction::FDiv:
905 return SelectBinaryOp(I, ISD::FDIV);
906 case Instruction::SRem:
907 return SelectBinaryOp(I, ISD::SREM);
908 case Instruction::URem:
909 return SelectBinaryOp(I, ISD::UREM);
910 case Instruction::FRem:
911 return SelectBinaryOp(I, ISD::FREM);
912 case Instruction::Shl:
913 return SelectBinaryOp(I, ISD::SHL);
914 case Instruction::LShr:
915 return SelectBinaryOp(I, ISD::SRL);
916 case Instruction::AShr:
917 return SelectBinaryOp(I, ISD::SRA);
918 case Instruction::And:
919 return SelectBinaryOp(I, ISD::AND);
920 case Instruction::Or:
921 return SelectBinaryOp(I, ISD::OR);
922 case Instruction::Xor:
923 return SelectBinaryOp(I, ISD::XOR);
925 case Instruction::GetElementPtr:
926 return SelectGetElementPtr(I);
928 case Instruction::Br: {
929 const BranchInst *BI = cast<BranchInst>(I);
931 if (BI->isUnconditional()) {
932 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
933 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
934 FastEmitBranch(MSucc, BI->getDebugLoc());
938 // Conditional branches are not handed yet.
939 // Halt "fast" selection and bail.
943 case Instruction::Unreachable:
947 case Instruction::Alloca:
948 // FunctionLowering has the static-sized case covered.
949 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
952 // Dynamic-sized alloca is not handled yet.
955 case Instruction::Call:
956 return SelectCall(I);
958 case Instruction::BitCast:
959 return SelectBitCast(I);
961 case Instruction::FPToSI:
962 return SelectCast(I, ISD::FP_TO_SINT);
963 case Instruction::ZExt:
964 return SelectCast(I, ISD::ZERO_EXTEND);
965 case Instruction::SExt:
966 return SelectCast(I, ISD::SIGN_EXTEND);
967 case Instruction::Trunc:
968 return SelectCast(I, ISD::TRUNCATE);
969 case Instruction::SIToFP:
970 return SelectCast(I, ISD::SINT_TO_FP);
972 case Instruction::IntToPtr: // Deliberate fall-through.
973 case Instruction::PtrToInt: {
974 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
975 EVT DstVT = TLI.getValueType(I->getType());
976 if (DstVT.bitsGT(SrcVT))
977 return SelectCast(I, ISD::ZERO_EXTEND);
978 if (DstVT.bitsLT(SrcVT))
979 return SelectCast(I, ISD::TRUNCATE);
980 unsigned Reg = getRegForValue(I->getOperand(0));
981 if (Reg == 0) return false;
982 UpdateValueMap(I, Reg);
986 case Instruction::ExtractValue:
987 return SelectExtractValue(I);
989 case Instruction::PHI:
990 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
993 // Unhandled instruction. Halt "fast" selection and bail.
998 FastISel::FastISel(FunctionLoweringInfo &funcInfo)
999 : FuncInfo(funcInfo),
1000 MRI(FuncInfo.MF->getRegInfo()),
1001 MFI(*FuncInfo.MF->getFrameInfo()),
1002 MCP(*FuncInfo.MF->getConstantPool()),
1003 TM(FuncInfo.MF->getTarget()),
1004 TD(*TM.getTargetData()),
1005 TII(*TM.getInstrInfo()),
1006 TLI(*TM.getTargetLowering()),
1007 TRI(*TM.getRegisterInfo()) {
1010 FastISel::~FastISel() {}
1012 unsigned FastISel::FastEmit_(MVT, MVT,
1017 unsigned FastISel::FastEmit_r(MVT, MVT,
1019 unsigned /*Op0*/, bool /*Op0IsKill*/) {
1023 unsigned FastISel::FastEmit_rr(MVT, MVT,
1025 unsigned /*Op0*/, bool /*Op0IsKill*/,
1026 unsigned /*Op1*/, bool /*Op1IsKill*/) {
1030 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1034 unsigned FastISel::FastEmit_f(MVT, MVT,
1035 unsigned, const ConstantFP * /*FPImm*/) {
1039 unsigned FastISel::FastEmit_ri(MVT, MVT,
1041 unsigned /*Op0*/, bool /*Op0IsKill*/,
1046 unsigned FastISel::FastEmit_rf(MVT, MVT,
1048 unsigned /*Op0*/, bool /*Op0IsKill*/,
1049 const ConstantFP * /*FPImm*/) {
1053 unsigned FastISel::FastEmit_rri(MVT, MVT,
1055 unsigned /*Op0*/, bool /*Op0IsKill*/,
1056 unsigned /*Op1*/, bool /*Op1IsKill*/,
1061 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
1062 /// to emit an instruction with an immediate operand using FastEmit_ri.
1063 /// If that fails, it materializes the immediate into a register and try
1064 /// FastEmit_rr instead.
1065 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
1066 unsigned Op0, bool Op0IsKill,
1067 uint64_t Imm, MVT ImmType) {
1068 // If this is a multiply by a power of two, emit this as a shift left.
1069 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1072 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1073 // div x, 8 -> srl x, 3
1078 // Horrible hack (to be removed), check to make sure shift amounts are
1080 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1081 Imm >= VT.getSizeInBits())
1084 // First check if immediate type is legal. If not, we can't use the ri form.
1085 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1088 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1089 if (MaterialReg == 0) {
1090 // This is a bit ugly/slow, but failing here means falling out of
1091 // fast-isel, which would be very slow.
1092 IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(),
1093 VT.getSizeInBits());
1094 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1096 return FastEmit_rr(VT, VT, Opcode,
1098 MaterialReg, /*Kill=*/true);
1101 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
1102 return MRI.createVirtualRegister(RC);
1105 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
1106 const TargetRegisterClass* RC) {
1107 unsigned ResultReg = createResultReg(RC);
1108 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1110 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
1114 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
1115 const TargetRegisterClass *RC,
1116 unsigned Op0, bool Op0IsKill) {
1117 unsigned ResultReg = createResultReg(RC);
1118 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1120 if (II.getNumDefs() >= 1)
1121 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1122 .addReg(Op0, Op0IsKill * RegState::Kill);
1124 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1125 .addReg(Op0, Op0IsKill * RegState::Kill);
1126 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1127 ResultReg).addReg(II.ImplicitDefs[0]);
1133 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
1134 const TargetRegisterClass *RC,
1135 unsigned Op0, bool Op0IsKill,
1136 unsigned Op1, bool Op1IsKill) {
1137 unsigned ResultReg = createResultReg(RC);
1138 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1140 if (II.getNumDefs() >= 1)
1141 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1142 .addReg(Op0, Op0IsKill * RegState::Kill)
1143 .addReg(Op1, Op1IsKill * RegState::Kill);
1145 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1146 .addReg(Op0, Op0IsKill * RegState::Kill)
1147 .addReg(Op1, Op1IsKill * RegState::Kill);
1148 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1149 ResultReg).addReg(II.ImplicitDefs[0]);
1154 unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
1155 const TargetRegisterClass *RC,
1156 unsigned Op0, bool Op0IsKill,
1157 unsigned Op1, bool Op1IsKill,
1158 unsigned Op2, bool Op2IsKill) {
1159 unsigned ResultReg = createResultReg(RC);
1160 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1162 if (II.getNumDefs() >= 1)
1163 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1164 .addReg(Op0, Op0IsKill * RegState::Kill)
1165 .addReg(Op1, Op1IsKill * RegState::Kill)
1166 .addReg(Op2, Op2IsKill * RegState::Kill);
1168 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1169 .addReg(Op0, Op0IsKill * RegState::Kill)
1170 .addReg(Op1, Op1IsKill * RegState::Kill)
1171 .addReg(Op2, Op2IsKill * RegState::Kill);
1172 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1173 ResultReg).addReg(II.ImplicitDefs[0]);
1178 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
1179 const TargetRegisterClass *RC,
1180 unsigned Op0, bool Op0IsKill,
1182 unsigned ResultReg = createResultReg(RC);
1183 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1185 if (II.getNumDefs() >= 1)
1186 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1187 .addReg(Op0, Op0IsKill * RegState::Kill)
1190 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1191 .addReg(Op0, Op0IsKill * RegState::Kill)
1193 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1194 ResultReg).addReg(II.ImplicitDefs[0]);
1199 unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
1200 const TargetRegisterClass *RC,
1201 unsigned Op0, bool Op0IsKill,
1202 uint64_t Imm1, uint64_t Imm2) {
1203 unsigned ResultReg = createResultReg(RC);
1204 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1206 if (II.getNumDefs() >= 1)
1207 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1208 .addReg(Op0, Op0IsKill * RegState::Kill)
1212 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1213 .addReg(Op0, Op0IsKill * RegState::Kill)
1216 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1217 ResultReg).addReg(II.ImplicitDefs[0]);
1222 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
1223 const TargetRegisterClass *RC,
1224 unsigned Op0, bool Op0IsKill,
1225 const ConstantFP *FPImm) {
1226 unsigned ResultReg = createResultReg(RC);
1227 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1229 if (II.getNumDefs() >= 1)
1230 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1231 .addReg(Op0, Op0IsKill * RegState::Kill)
1234 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1235 .addReg(Op0, Op0IsKill * RegState::Kill)
1237 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1238 ResultReg).addReg(II.ImplicitDefs[0]);
1243 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
1244 const TargetRegisterClass *RC,
1245 unsigned Op0, bool Op0IsKill,
1246 unsigned Op1, bool Op1IsKill,
1248 unsigned ResultReg = createResultReg(RC);
1249 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1251 if (II.getNumDefs() >= 1)
1252 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1253 .addReg(Op0, Op0IsKill * RegState::Kill)
1254 .addReg(Op1, Op1IsKill * RegState::Kill)
1257 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1258 .addReg(Op0, Op0IsKill * RegState::Kill)
1259 .addReg(Op1, Op1IsKill * RegState::Kill)
1261 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1262 ResultReg).addReg(II.ImplicitDefs[0]);
1267 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
1268 const TargetRegisterClass *RC,
1270 unsigned ResultReg = createResultReg(RC);
1271 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1273 if (II.getNumDefs() >= 1)
1274 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
1276 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
1277 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1278 ResultReg).addReg(II.ImplicitDefs[0]);
1283 unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
1284 const TargetRegisterClass *RC,
1285 uint64_t Imm1, uint64_t Imm2) {
1286 unsigned ResultReg = createResultReg(RC);
1287 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1289 if (II.getNumDefs() >= 1)
1290 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
1291 .addImm(Imm1).addImm(Imm2);
1293 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm1).addImm(Imm2);
1294 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1295 ResultReg).addReg(II.ImplicitDefs[0]);
1300 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1301 unsigned Op0, bool Op0IsKill,
1303 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1304 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1305 "Cannot yet extract from physregs");
1306 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
1307 DL, TII.get(TargetOpcode::COPY), ResultReg)
1308 .addReg(Op0, getKillRegState(Op0IsKill), Idx);
1312 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1313 /// with all but the least significant bit set to zero.
1314 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1315 return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1318 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1319 /// Emit code to ensure constants are copied into registers when needed.
1320 /// Remember the virtual registers that need to be added to the Machine PHI
1321 /// nodes as input. We cannot just directly add them, because expansion
1322 /// might result in multiple MBB's for one BB. As such, the start of the
1323 /// BB might correspond to a different MBB than the end.
1324 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1325 const TerminatorInst *TI = LLVMBB->getTerminator();
1327 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1328 unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1330 // Check successor nodes' PHI nodes that expect a constant to be available
1332 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1333 const BasicBlock *SuccBB = TI->getSuccessor(succ);
1334 if (!isa<PHINode>(SuccBB->begin())) continue;
1335 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1337 // If this terminator has multiple identical successors (common for
1338 // switches), only handle each succ once.
1339 if (!SuccsHandled.insert(SuccMBB)) continue;
1341 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
1343 // At this point we know that there is a 1-1 correspondence between LLVM PHI
1344 // nodes and Machine PHI nodes, but the incoming operands have not been
1346 for (BasicBlock::const_iterator I = SuccBB->begin();
1347 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1349 // Ignore dead phi's.
1350 if (PN->use_empty()) continue;
1352 // Only handle legal types. Two interesting things to note here. First,
1353 // by bailing out early, we may leave behind some dead instructions,
1354 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
1355 // own moves. Second, this check is necessary because FastISel doesn't
1356 // use CreateRegs to create registers, so it always creates
1357 // exactly one register for each non-void instruction.
1358 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
1359 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
1362 VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
1364 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1369 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
1371 // Set the DebugLoc for the copy. Prefer the location of the operand
1372 // if there is one; use the location of the PHI otherwise.
1373 DL = PN->getDebugLoc();
1374 if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
1375 DL = Inst->getDebugLoc();
1377 unsigned Reg = getRegForValue(PHIOp);
1379 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
1382 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));