1 ///===-- FastISel.cpp - Implementation of the FastISel class --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Instructions.h"
15 #include "llvm/CodeGen/FastISel.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/Target/TargetData.h"
19 #include "llvm/Target/TargetInstrInfo.h"
20 #include "llvm/Target/TargetLowering.h"
21 #include "llvm/Target/TargetMachine.h"
24 unsigned FastISel::getRegForValue(Value *V) {
25 // Look up the value to see if we already have a register for it. We
26 // cache values defined by Instructions across blocks, and other values
27 // only locally. This is because Instructions already have the SSA
28 // def-dominatess-use requirement enforced.
29 if (ValueMap.count(V))
31 unsigned Reg = LocalValueMap[V];
35 MVT::SimpleValueType VT = TLI.getValueType(V->getType()).getSimpleVT();
37 // Ignore illegal types.
38 if (!TLI.isTypeLegal(VT)) {
39 // Promote MVT::i1 to a legal type though, because it's common and easy.
41 VT = TLI.getTypeToTransformTo(VT).getSimpleVT();
46 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
47 if (CI->getValue().getActiveBits() <= 64)
48 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
49 } else if (isa<AllocaInst>(V)) {
50 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
51 } else if (isa<ConstantPointerNull>(V)) {
52 Reg = FastEmit_i(VT, VT, ISD::Constant, 0);
53 } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
54 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
57 const APFloat &Flt = CF->getValueAPF();
58 MVT IntVT = TLI.getPointerTy();
61 uint32_t IntBitWidth = IntVT.getSizeInBits();
62 if (!Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
63 APFloat::rmTowardZero) != APFloat::opOK) {
64 APInt IntVal(IntBitWidth, 2, x);
66 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
67 ISD::Constant, IntVal.getZExtValue());
69 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
72 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
73 if (!SelectOperator(CE, CE->getOpcode())) return 0;
74 Reg = LocalValueMap[CE];
75 } else if (isa<UndefValue>(V)) {
76 Reg = createResultReg(TLI.getRegClassFor(VT));
77 BuildMI(MBB, TII.get(TargetInstrInfo::IMPLICIT_DEF), Reg);
80 // If target-independent code couldn't handle the value, give target-specific
82 if (!Reg && isa<Constant>(V))
83 Reg = TargetMaterializeConstant(cast<Constant>(V));
85 // Don't cache constant materializations in the general ValueMap.
86 // To do so would require tracking what uses they dominate.
88 LocalValueMap[V] = Reg;
92 unsigned FastISel::lookUpRegForValue(Value *V) {
93 // Look up the value to see if we already have a register for it. We
94 // cache values defined by Instructions across blocks, and other values
95 // only locally. This is because Instructions already have the SSA
96 // def-dominatess-use requirement enforced.
97 if (ValueMap.count(V))
99 return LocalValueMap[V];
102 /// UpdateValueMap - Update the value map to include the new mapping for this
103 /// instruction, or insert an extra copy to get the result in a previous
104 /// determined register.
105 /// NOTE: This is only necessary because we might select a block that uses
106 /// a value before we select the block that defines the value. It might be
107 /// possible to fix this by selecting blocks in reverse postorder.
108 void FastISel::UpdateValueMap(Value* I, unsigned Reg) {
109 if (!isa<Instruction>(I)) {
110 LocalValueMap[I] = Reg;
113 if (!ValueMap.count(I))
116 TII.copyRegToReg(*MBB, MBB->end(), ValueMap[I],
117 Reg, MRI.getRegClass(Reg), MRI.getRegClass(Reg));
120 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
121 /// which has an opcode which directly corresponds to the given ISD opcode.
123 bool FastISel::SelectBinaryOp(User *I, ISD::NodeType ISDOpcode) {
124 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
125 if (VT == MVT::Other || !VT.isSimple())
126 // Unhandled type. Halt "fast" selection and bail.
129 // We only handle legal types. For example, on x86-32 the instruction
130 // selector contains all of the 64-bit instructions from x86-64,
131 // under the assumption that i64 won't be used if the target doesn't
133 if (!TLI.isTypeLegal(VT)) {
134 // MVT::i1 is special. Allow AND and OR (but not XOR) because they
135 // don't require additional zeroing, which makes them easy.
137 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR))
138 VT = TLI.getTypeToTransformTo(VT);
143 unsigned Op0 = getRegForValue(I->getOperand(0));
145 // Unhandled operand. Halt "fast" selection and bail.
148 // Check if the second operand is a constant and handle it appropriately.
149 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
150 unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
151 ISDOpcode, Op0, CI->getZExtValue());
152 if (ResultReg != 0) {
153 // We successfully emitted code for the given LLVM Instruction.
154 UpdateValueMap(I, ResultReg);
159 // Check if the second operand is a constant float.
160 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
161 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
163 if (ResultReg != 0) {
164 // We successfully emitted code for the given LLVM Instruction.
165 UpdateValueMap(I, ResultReg);
170 unsigned Op1 = getRegForValue(I->getOperand(1));
172 // Unhandled operand. Halt "fast" selection and bail.
175 // Now we have both operands in registers. Emit the instruction.
176 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
177 ISDOpcode, Op0, Op1);
179 // Target-specific code wasn't able to find a machine opcode for
180 // the given ISD opcode and type. Halt "fast" selection and bail.
183 // We successfully emitted code for the given LLVM Instruction.
184 UpdateValueMap(I, ResultReg);
188 bool FastISel::SelectGetElementPtr(User *I) {
189 unsigned N = getRegForValue(I->getOperand(0));
191 // Unhandled operand. Halt "fast" selection and bail.
194 const Type *Ty = I->getOperand(0)->getType();
195 MVT::SimpleValueType VT = TLI.getPointerTy().getSimpleVT();
196 for (GetElementPtrInst::op_iterator OI = I->op_begin()+1, E = I->op_end();
199 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
200 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
203 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
204 // FIXME: This can be optimized by combining the add with a
206 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
208 // Unhandled operand. Halt "fast" selection and bail.
211 Ty = StTy->getElementType(Field);
213 Ty = cast<SequentialType>(Ty)->getElementType();
215 // If this is a constant subscript, handle it quickly.
216 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
217 if (CI->getZExtValue() == 0) continue;
219 TD.getABITypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
220 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
222 // Unhandled operand. Halt "fast" selection and bail.
227 // N = N + Idx * ElementSize;
228 uint64_t ElementSize = TD.getABITypeSize(Ty);
229 unsigned IdxN = getRegForValue(Idx);
231 // Unhandled operand. Halt "fast" selection and bail.
234 // If the index is smaller or larger than intptr_t, truncate or extend
236 MVT IdxVT = MVT::getMVT(Idx->getType(), /*HandleUnknown=*/false);
237 if (IdxVT.bitsLT(VT))
238 IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::SIGN_EXTEND, IdxN);
239 else if (IdxVT.bitsGT(VT))
240 IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::TRUNCATE, IdxN);
242 // Unhandled operand. Halt "fast" selection and bail.
245 if (ElementSize != 1) {
246 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
248 // Unhandled operand. Halt "fast" selection and bail.
251 N = FastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
253 // Unhandled operand. Halt "fast" selection and bail.
258 // We successfully emitted code for the given LLVM Instruction.
259 UpdateValueMap(I, N);
263 bool FastISel::SelectCast(User *I, ISD::NodeType Opcode) {
264 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
265 MVT DstVT = TLI.getValueType(I->getType());
267 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
268 DstVT == MVT::Other || !DstVT.isSimple() ||
269 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
270 // Unhandled type. Halt "fast" selection and bail.
273 unsigned InputReg = getRegForValue(I->getOperand(0));
275 // Unhandled operand. Halt "fast" selection and bail.
278 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
285 UpdateValueMap(I, ResultReg);
289 bool FastISel::SelectBitCast(User *I) {
290 // If the bitcast doesn't change the type, just use the operand value.
291 if (I->getType() == I->getOperand(0)->getType()) {
292 unsigned Reg = getRegForValue(I->getOperand(0));
295 UpdateValueMap(I, Reg);
299 // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
300 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
301 MVT DstVT = TLI.getValueType(I->getType());
303 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
304 DstVT == MVT::Other || !DstVT.isSimple() ||
305 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
306 // Unhandled type. Halt "fast" selection and bail.
309 unsigned Op0 = getRegForValue(I->getOperand(0));
311 // Unhandled operand. Halt "fast" selection and bail.
314 // First, try to perform the bitcast by inserting a reg-reg copy.
315 unsigned ResultReg = 0;
316 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
317 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
318 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
319 ResultReg = createResultReg(DstClass);
321 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
322 Op0, DstClass, SrcClass);
327 // If the reg-reg copy failed, select a BIT_CONVERT opcode.
329 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
330 ISD::BIT_CONVERT, Op0);
335 UpdateValueMap(I, ResultReg);
340 FastISel::SelectInstruction(Instruction *I) {
341 return SelectOperator(I, I->getOpcode());
345 FastISel::SelectOperator(User *I, unsigned Opcode) {
347 case Instruction::Add: {
348 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FADD : ISD::ADD;
349 return SelectBinaryOp(I, Opc);
351 case Instruction::Sub: {
352 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FSUB : ISD::SUB;
353 return SelectBinaryOp(I, Opc);
355 case Instruction::Mul: {
356 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FMUL : ISD::MUL;
357 return SelectBinaryOp(I, Opc);
359 case Instruction::SDiv:
360 return SelectBinaryOp(I, ISD::SDIV);
361 case Instruction::UDiv:
362 return SelectBinaryOp(I, ISD::UDIV);
363 case Instruction::FDiv:
364 return SelectBinaryOp(I, ISD::FDIV);
365 case Instruction::SRem:
366 return SelectBinaryOp(I, ISD::SREM);
367 case Instruction::URem:
368 return SelectBinaryOp(I, ISD::UREM);
369 case Instruction::FRem:
370 return SelectBinaryOp(I, ISD::FREM);
371 case Instruction::Shl:
372 return SelectBinaryOp(I, ISD::SHL);
373 case Instruction::LShr:
374 return SelectBinaryOp(I, ISD::SRL);
375 case Instruction::AShr:
376 return SelectBinaryOp(I, ISD::SRA);
377 case Instruction::And:
378 return SelectBinaryOp(I, ISD::AND);
379 case Instruction::Or:
380 return SelectBinaryOp(I, ISD::OR);
381 case Instruction::Xor:
382 return SelectBinaryOp(I, ISD::XOR);
384 case Instruction::GetElementPtr:
385 return SelectGetElementPtr(I);
387 case Instruction::Br: {
388 BranchInst *BI = cast<BranchInst>(I);
390 if (BI->isUnconditional()) {
391 MachineFunction::iterator NextMBB =
392 next(MachineFunction::iterator(MBB));
393 BasicBlock *LLVMSucc = BI->getSuccessor(0);
394 MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
396 if (NextMBB != MF.end() && MSucc == NextMBB) {
397 // The unconditional fall-through case, which needs no instructions.
399 // The unconditional branch case.
400 TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
402 MBB->addSuccessor(MSucc);
406 // Conditional branches are not handed yet.
407 // Halt "fast" selection and bail.
411 case Instruction::Unreachable:
415 case Instruction::PHI:
416 // PHI nodes are already emitted.
419 case Instruction::Alloca:
420 // FunctionLowering has the static-sized case covered.
421 if (StaticAllocaMap.count(cast<AllocaInst>(I)))
424 // Dynamic-sized alloca is not handled yet.
427 case Instruction::BitCast:
428 return SelectBitCast(I);
430 case Instruction::FPToSI:
431 return SelectCast(I, ISD::FP_TO_SINT);
432 case Instruction::ZExt:
433 return SelectCast(I, ISD::ZERO_EXTEND);
434 case Instruction::SExt:
435 return SelectCast(I, ISD::SIGN_EXTEND);
436 case Instruction::Trunc:
437 return SelectCast(I, ISD::TRUNCATE);
438 case Instruction::SIToFP:
439 return SelectCast(I, ISD::SINT_TO_FP);
441 case Instruction::IntToPtr: // Deliberate fall-through.
442 case Instruction::PtrToInt: {
443 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
444 MVT DstVT = TLI.getValueType(I->getType());
445 if (DstVT.bitsGT(SrcVT))
446 return SelectCast(I, ISD::ZERO_EXTEND);
447 if (DstVT.bitsLT(SrcVT))
448 return SelectCast(I, ISD::TRUNCATE);
449 unsigned Reg = getRegForValue(I->getOperand(0));
450 if (Reg == 0) return false;
451 UpdateValueMap(I, Reg);
456 // Unhandled instruction. Halt "fast" selection and bail.
461 FastISel::FastISel(MachineFunction &mf,
462 MachineModuleInfo *mmi,
463 DenseMap<const Value *, unsigned> &vm,
464 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
465 DenseMap<const AllocaInst *, int> &am)
472 MRI(MF.getRegInfo()),
473 MFI(*MF.getFrameInfo()),
474 MCP(*MF.getConstantPool()),
476 TD(*TM.getTargetData()),
477 TII(*TM.getInstrInfo()),
478 TLI(*TM.getTargetLowering()) {
481 FastISel::~FastISel() {}
483 unsigned FastISel::FastEmit_(MVT::SimpleValueType, MVT::SimpleValueType,
488 unsigned FastISel::FastEmit_r(MVT::SimpleValueType, MVT::SimpleValueType,
489 ISD::NodeType, unsigned /*Op0*/) {
493 unsigned FastISel::FastEmit_rr(MVT::SimpleValueType, MVT::SimpleValueType,
494 ISD::NodeType, unsigned /*Op0*/,
499 unsigned FastISel::FastEmit_i(MVT::SimpleValueType, MVT::SimpleValueType,
500 ISD::NodeType, uint64_t /*Imm*/) {
504 unsigned FastISel::FastEmit_f(MVT::SimpleValueType, MVT::SimpleValueType,
505 ISD::NodeType, ConstantFP * /*FPImm*/) {
509 unsigned FastISel::FastEmit_ri(MVT::SimpleValueType, MVT::SimpleValueType,
510 ISD::NodeType, unsigned /*Op0*/,
515 unsigned FastISel::FastEmit_rf(MVT::SimpleValueType, MVT::SimpleValueType,
516 ISD::NodeType, unsigned /*Op0*/,
517 ConstantFP * /*FPImm*/) {
521 unsigned FastISel::FastEmit_rri(MVT::SimpleValueType, MVT::SimpleValueType,
523 unsigned /*Op0*/, unsigned /*Op1*/,
528 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
529 /// to emit an instruction with an immediate operand using FastEmit_ri.
530 /// If that fails, it materializes the immediate into a register and try
531 /// FastEmit_rr instead.
532 unsigned FastISel::FastEmit_ri_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
533 unsigned Op0, uint64_t Imm,
534 MVT::SimpleValueType ImmType) {
535 // First check if immediate type is legal. If not, we can't use the ri form.
536 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm);
539 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
540 if (MaterialReg == 0)
542 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
545 /// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
546 /// to emit an instruction with a floating-point immediate operand using
547 /// FastEmit_rf. If that fails, it materializes the immediate into a register
548 /// and try FastEmit_rr instead.
549 unsigned FastISel::FastEmit_rf_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
550 unsigned Op0, ConstantFP *FPImm,
551 MVT::SimpleValueType ImmType) {
552 // First check if immediate type is legal. If not, we can't use the rf form.
553 unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm);
557 // Materialize the constant in a register.
558 unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
559 if (MaterialReg == 0) {
560 // If the target doesn't have a way to directly enter a floating-point
561 // value into a register, use an alternate approach.
562 // TODO: The current approach only supports floating-point constants
563 // that can be constructed by conversion from integer values. This should
564 // be replaced by code that creates a load from a constant-pool entry,
565 // which will require some target-specific work.
566 const APFloat &Flt = FPImm->getValueAPF();
567 MVT IntVT = TLI.getPointerTy();
570 uint32_t IntBitWidth = IntVT.getSizeInBits();
571 if (Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
572 APFloat::rmTowardZero) != APFloat::opOK)
574 APInt IntVal(IntBitWidth, 2, x);
576 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
577 ISD::Constant, IntVal.getZExtValue());
580 MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
581 ISD::SINT_TO_FP, IntegerReg);
582 if (MaterialReg == 0)
585 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
588 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
589 return MRI.createVirtualRegister(RC);
592 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
593 const TargetRegisterClass* RC) {
594 unsigned ResultReg = createResultReg(RC);
595 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
597 BuildMI(MBB, II, ResultReg);
601 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
602 const TargetRegisterClass *RC,
604 unsigned ResultReg = createResultReg(RC);
605 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
607 if (II.getNumDefs() >= 1)
608 BuildMI(MBB, II, ResultReg).addReg(Op0);
610 BuildMI(MBB, II).addReg(Op0);
611 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
612 II.ImplicitDefs[0], RC, RC);
620 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
621 const TargetRegisterClass *RC,
622 unsigned Op0, unsigned Op1) {
623 unsigned ResultReg = createResultReg(RC);
624 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
626 if (II.getNumDefs() >= 1)
627 BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1);
629 BuildMI(MBB, II).addReg(Op0).addReg(Op1);
630 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
631 II.ImplicitDefs[0], RC, RC);
638 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
639 const TargetRegisterClass *RC,
640 unsigned Op0, uint64_t Imm) {
641 unsigned ResultReg = createResultReg(RC);
642 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
644 if (II.getNumDefs() >= 1)
645 BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Imm);
647 BuildMI(MBB, II).addReg(Op0).addImm(Imm);
648 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
649 II.ImplicitDefs[0], RC, RC);
656 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
657 const TargetRegisterClass *RC,
658 unsigned Op0, ConstantFP *FPImm) {
659 unsigned ResultReg = createResultReg(RC);
660 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
662 if (II.getNumDefs() >= 1)
663 BuildMI(MBB, II, ResultReg).addReg(Op0).addFPImm(FPImm);
665 BuildMI(MBB, II).addReg(Op0).addFPImm(FPImm);
666 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
667 II.ImplicitDefs[0], RC, RC);
674 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
675 const TargetRegisterClass *RC,
676 unsigned Op0, unsigned Op1, uint64_t Imm) {
677 unsigned ResultReg = createResultReg(RC);
678 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
680 if (II.getNumDefs() >= 1)
681 BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm);
683 BuildMI(MBB, II).addReg(Op0).addReg(Op1).addImm(Imm);
684 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
685 II.ImplicitDefs[0], RC, RC);
692 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
693 const TargetRegisterClass *RC,
695 unsigned ResultReg = createResultReg(RC);
696 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
698 if (II.getNumDefs() >= 1)
699 BuildMI(MBB, II, ResultReg).addImm(Imm);
701 BuildMI(MBB, II).addImm(Imm);
702 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
703 II.ImplicitDefs[0], RC, RC);
710 unsigned FastISel::FastEmitInst_extractsubreg(unsigned Op0, uint32_t Idx) {
711 const TargetRegisterClass* RC = MRI.getRegClass(Op0);
712 const TargetRegisterClass* SRC = *(RC->subregclasses_begin()+Idx-1);
714 unsigned ResultReg = createResultReg(SRC);
715 const TargetInstrDesc &II = TII.get(TargetInstrInfo::EXTRACT_SUBREG);
717 if (II.getNumDefs() >= 1)
718 BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Idx);
720 BuildMI(MBB, II).addReg(Op0).addImm(Idx);
721 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
722 II.ImplicitDefs[0], RC, RC);