1 ///===-- FastISel.cpp - Implementation of the FastISel class --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations (e.g. calls) are not
15 // supported. It is also not intended to be able to do much optimization,
16 // except in a few cases where doing optimizations reduces overall compile
17 // time (e.g. folding constants into immediate fields, because it's cheap
18 // and it reduces the number of instructions later phases have to examine).
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/Function.h"
43 #include "llvm/GlobalVariable.h"
44 #include "llvm/Instructions.h"
45 #include "llvm/IntrinsicInst.h"
46 #include "llvm/CodeGen/FastISel.h"
47 #include "llvm/CodeGen/MachineInstrBuilder.h"
48 #include "llvm/CodeGen/MachineModuleInfo.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/Target/TargetData.h"
51 #include "llvm/Target/TargetInstrInfo.h"
52 #include "llvm/Target/TargetLowering.h"
53 #include "llvm/Target/TargetMachine.h"
56 unsigned FastISel::getRegForValue(Value *V) {
57 // Look up the value to see if we already have a register for it. We
58 // cache values defined by Instructions across blocks, and other values
59 // only locally. This is because Instructions already have the SSA
60 // def-dominatess-use requirement enforced.
61 if (ValueMap.count(V))
63 unsigned Reg = LocalValueMap[V];
67 MVT::SimpleValueType VT = TLI.getValueType(V->getType()).getSimpleVT();
69 // Ignore illegal types.
70 if (!TLI.isTypeLegal(VT)) {
71 // Promote MVT::i1 to a legal type though, because it's common and easy.
73 VT = TLI.getTypeToTransformTo(VT).getSimpleVT();
78 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
79 if (CI->getValue().getActiveBits() <= 64)
80 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
81 } else if (isa<AllocaInst>(V)) {
82 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
83 } else if (isa<ConstantPointerNull>(V)) {
84 Reg = FastEmit_i(VT, VT, ISD::Constant, 0);
85 } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
86 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
89 const APFloat &Flt = CF->getValueAPF();
90 MVT IntVT = TLI.getPointerTy();
93 uint32_t IntBitWidth = IntVT.getSizeInBits();
94 if (!Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
95 APFloat::rmTowardZero) != APFloat::opOK) {
96 APInt IntVal(IntBitWidth, 2, x);
98 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
99 ISD::Constant, IntVal.getZExtValue());
101 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
104 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
105 if (!SelectOperator(CE, CE->getOpcode())) return 0;
106 Reg = LocalValueMap[CE];
107 } else if (isa<UndefValue>(V)) {
108 Reg = createResultReg(TLI.getRegClassFor(VT));
109 BuildMI(MBB, TII.get(TargetInstrInfo::IMPLICIT_DEF), Reg);
112 // If target-independent code couldn't handle the value, give target-specific
114 if (!Reg && isa<Constant>(V))
115 Reg = TargetMaterializeConstant(cast<Constant>(V));
117 // Don't cache constant materializations in the general ValueMap.
118 // To do so would require tracking what uses they dominate.
120 LocalValueMap[V] = Reg;
124 unsigned FastISel::lookUpRegForValue(Value *V) {
125 // Look up the value to see if we already have a register for it. We
126 // cache values defined by Instructions across blocks, and other values
127 // only locally. This is because Instructions already have the SSA
128 // def-dominatess-use requirement enforced.
129 if (ValueMap.count(V))
131 return LocalValueMap[V];
134 /// UpdateValueMap - Update the value map to include the new mapping for this
135 /// instruction, or insert an extra copy to get the result in a previous
136 /// determined register.
137 /// NOTE: This is only necessary because we might select a block that uses
138 /// a value before we select the block that defines the value. It might be
139 /// possible to fix this by selecting blocks in reverse postorder.
140 void FastISel::UpdateValueMap(Value* I, unsigned Reg) {
141 if (!isa<Instruction>(I)) {
142 LocalValueMap[I] = Reg;
145 if (!ValueMap.count(I))
148 TII.copyRegToReg(*MBB, MBB->end(), ValueMap[I],
149 Reg, MRI.getRegClass(Reg), MRI.getRegClass(Reg));
152 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
153 /// which has an opcode which directly corresponds to the given ISD opcode.
155 bool FastISel::SelectBinaryOp(User *I, ISD::NodeType ISDOpcode) {
156 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
157 if (VT == MVT::Other || !VT.isSimple())
158 // Unhandled type. Halt "fast" selection and bail.
161 // We only handle legal types. For example, on x86-32 the instruction
162 // selector contains all of the 64-bit instructions from x86-64,
163 // under the assumption that i64 won't be used if the target doesn't
165 if (!TLI.isTypeLegal(VT)) {
166 // MVT::i1 is special. Allow AND, OR, or XOR because they
167 // don't require additional zeroing, which makes them easy.
169 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
170 ISDOpcode == ISD::XOR))
171 VT = TLI.getTypeToTransformTo(VT);
176 unsigned Op0 = getRegForValue(I->getOperand(0));
178 // Unhandled operand. Halt "fast" selection and bail.
181 // Check if the second operand is a constant and handle it appropriately.
182 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
183 unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
184 ISDOpcode, Op0, CI->getZExtValue());
185 if (ResultReg != 0) {
186 // We successfully emitted code for the given LLVM Instruction.
187 UpdateValueMap(I, ResultReg);
192 // Check if the second operand is a constant float.
193 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
194 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
196 if (ResultReg != 0) {
197 // We successfully emitted code for the given LLVM Instruction.
198 UpdateValueMap(I, ResultReg);
203 unsigned Op1 = getRegForValue(I->getOperand(1));
205 // Unhandled operand. Halt "fast" selection and bail.
208 // Now we have both operands in registers. Emit the instruction.
209 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
210 ISDOpcode, Op0, Op1);
212 // Target-specific code wasn't able to find a machine opcode for
213 // the given ISD opcode and type. Halt "fast" selection and bail.
216 // We successfully emitted code for the given LLVM Instruction.
217 UpdateValueMap(I, ResultReg);
221 bool FastISel::SelectGetElementPtr(User *I) {
222 unsigned N = getRegForValue(I->getOperand(0));
224 // Unhandled operand. Halt "fast" selection and bail.
227 const Type *Ty = I->getOperand(0)->getType();
228 MVT::SimpleValueType VT = TLI.getPointerTy().getSimpleVT();
229 for (GetElementPtrInst::op_iterator OI = I->op_begin()+1, E = I->op_end();
232 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
233 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
236 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
237 // FIXME: This can be optimized by combining the add with a
239 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
241 // Unhandled operand. Halt "fast" selection and bail.
244 Ty = StTy->getElementType(Field);
246 Ty = cast<SequentialType>(Ty)->getElementType();
248 // If this is a constant subscript, handle it quickly.
249 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
250 if (CI->getZExtValue() == 0) continue;
252 TD.getABITypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
253 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
255 // Unhandled operand. Halt "fast" selection and bail.
260 // N = N + Idx * ElementSize;
261 uint64_t ElementSize = TD.getABITypeSize(Ty);
262 unsigned IdxN = getRegForValue(Idx);
264 // Unhandled operand. Halt "fast" selection and bail.
267 // If the index is smaller or larger than intptr_t, truncate or extend
269 MVT IdxVT = MVT::getMVT(Idx->getType(), /*HandleUnknown=*/false);
270 if (IdxVT.bitsLT(VT))
271 IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::SIGN_EXTEND, IdxN);
272 else if (IdxVT.bitsGT(VT))
273 IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::TRUNCATE, IdxN);
275 // Unhandled operand. Halt "fast" selection and bail.
278 if (ElementSize != 1) {
279 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
281 // Unhandled operand. Halt "fast" selection and bail.
284 N = FastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
286 // Unhandled operand. Halt "fast" selection and bail.
291 // We successfully emitted code for the given LLVM Instruction.
292 UpdateValueMap(I, N);
296 bool FastISel::SelectCall(User *I) {
297 Function *F = cast<CallInst>(I)->getCalledFunction();
298 if (!F) return false;
300 unsigned IID = F->getIntrinsicID();
303 case Intrinsic::dbg_stoppoint: {
304 DbgStopPointInst *SPI = cast<DbgStopPointInst>(I);
305 if (MMI && SPI->getContext() && MMI->Verify(SPI->getContext())) {
306 DebugInfoDesc *DD = MMI->getDescFor(SPI->getContext());
307 assert(DD && "Not a debug information descriptor");
308 const CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD);
309 unsigned SrcFile = MMI->RecordSource(CompileUnit);
310 unsigned Line = SPI->getLine();
311 unsigned Col = SPI->getColumn();
312 unsigned ID = MMI->RecordSourceLine(Line, Col, SrcFile);
313 const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
314 BuildMI(MBB, II).addImm(ID);
318 case Intrinsic::dbg_region_start: {
319 DbgRegionStartInst *RSI = cast<DbgRegionStartInst>(I);
320 if (MMI && RSI->getContext() && MMI->Verify(RSI->getContext())) {
321 unsigned ID = MMI->RecordRegionStart(RSI->getContext());
322 const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
323 BuildMI(MBB, II).addImm(ID);
327 case Intrinsic::dbg_region_end: {
328 DbgRegionEndInst *REI = cast<DbgRegionEndInst>(I);
329 if (MMI && REI->getContext() && MMI->Verify(REI->getContext())) {
330 unsigned ID = MMI->RecordRegionEnd(REI->getContext());
331 const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
332 BuildMI(MBB, II).addImm(ID);
336 case Intrinsic::dbg_func_start: {
337 if (!MMI) return true;
338 DbgFuncStartInst *FSI = cast<DbgFuncStartInst>(I);
339 Value *SP = FSI->getSubprogram();
340 if (SP && MMI->Verify(SP)) {
341 // llvm.dbg.func.start implicitly defines a dbg_stoppoint which is
342 // what (most?) gdb expects.
343 DebugInfoDesc *DD = MMI->getDescFor(SP);
344 assert(DD && "Not a debug information descriptor");
345 SubprogramDesc *Subprogram = cast<SubprogramDesc>(DD);
346 const CompileUnitDesc *CompileUnit = Subprogram->getFile();
347 unsigned SrcFile = MMI->RecordSource(CompileUnit);
348 // Record the source line but does create a label. It will be emitted
349 // at asm emission time.
350 MMI->RecordSourceLine(Subprogram->getLine(), 0, SrcFile);
354 case Intrinsic::dbg_declare: {
355 DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
356 Value *Variable = DI->getVariable();
357 if (MMI && Variable && MMI->Verify(Variable)) {
358 // Determine the address of the declared object.
359 Value *Address = DI->getAddress();
360 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
361 Address = BCI->getOperand(0);
362 AllocaInst *AI = dyn_cast<AllocaInst>(Address);
363 // Don't handle byval struct arguments, for example.
365 DenseMap<const AllocaInst*, int>::iterator SI =
366 StaticAllocaMap.find(AI);
367 assert(SI != StaticAllocaMap.end() && "Invalid dbg.declare!");
370 // Determine the debug globalvariable.
371 GlobalValue *GV = cast<GlobalVariable>(Variable);
373 // Build the DECLARE instruction.
374 const TargetInstrDesc &II = TII.get(TargetInstrInfo::DECLARE);
375 BuildMI(MBB, II).addFrameIndex(FI).addGlobalAddress(GV);
383 bool FastISel::SelectCast(User *I, ISD::NodeType Opcode) {
384 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
385 MVT DstVT = TLI.getValueType(I->getType());
387 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
388 DstVT == MVT::Other || !DstVT.isSimple() ||
389 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
390 // Unhandled type. Halt "fast" selection and bail.
393 unsigned InputReg = getRegForValue(I->getOperand(0));
395 // Unhandled operand. Halt "fast" selection and bail.
398 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
405 UpdateValueMap(I, ResultReg);
409 bool FastISel::SelectBitCast(User *I) {
410 // If the bitcast doesn't change the type, just use the operand value.
411 if (I->getType() == I->getOperand(0)->getType()) {
412 unsigned Reg = getRegForValue(I->getOperand(0));
415 UpdateValueMap(I, Reg);
419 // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
420 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
421 MVT DstVT = TLI.getValueType(I->getType());
423 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
424 DstVT == MVT::Other || !DstVT.isSimple() ||
425 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
426 // Unhandled type. Halt "fast" selection and bail.
429 unsigned Op0 = getRegForValue(I->getOperand(0));
431 // Unhandled operand. Halt "fast" selection and bail.
434 // First, try to perform the bitcast by inserting a reg-reg copy.
435 unsigned ResultReg = 0;
436 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
437 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
438 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
439 ResultReg = createResultReg(DstClass);
441 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
442 Op0, DstClass, SrcClass);
447 // If the reg-reg copy failed, select a BIT_CONVERT opcode.
449 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
450 ISD::BIT_CONVERT, Op0);
455 UpdateValueMap(I, ResultReg);
460 FastISel::SelectInstruction(Instruction *I) {
461 return SelectOperator(I, I->getOpcode());
465 FastISel::SelectOperator(User *I, unsigned Opcode) {
467 case Instruction::Add: {
468 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FADD : ISD::ADD;
469 return SelectBinaryOp(I, Opc);
471 case Instruction::Sub: {
472 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FSUB : ISD::SUB;
473 return SelectBinaryOp(I, Opc);
475 case Instruction::Mul: {
476 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FMUL : ISD::MUL;
477 return SelectBinaryOp(I, Opc);
479 case Instruction::SDiv:
480 return SelectBinaryOp(I, ISD::SDIV);
481 case Instruction::UDiv:
482 return SelectBinaryOp(I, ISD::UDIV);
483 case Instruction::FDiv:
484 return SelectBinaryOp(I, ISD::FDIV);
485 case Instruction::SRem:
486 return SelectBinaryOp(I, ISD::SREM);
487 case Instruction::URem:
488 return SelectBinaryOp(I, ISD::UREM);
489 case Instruction::FRem:
490 return SelectBinaryOp(I, ISD::FREM);
491 case Instruction::Shl:
492 return SelectBinaryOp(I, ISD::SHL);
493 case Instruction::LShr:
494 return SelectBinaryOp(I, ISD::SRL);
495 case Instruction::AShr:
496 return SelectBinaryOp(I, ISD::SRA);
497 case Instruction::And:
498 return SelectBinaryOp(I, ISD::AND);
499 case Instruction::Or:
500 return SelectBinaryOp(I, ISD::OR);
501 case Instruction::Xor:
502 return SelectBinaryOp(I, ISD::XOR);
504 case Instruction::GetElementPtr:
505 return SelectGetElementPtr(I);
507 case Instruction::Br: {
508 BranchInst *BI = cast<BranchInst>(I);
510 if (BI->isUnconditional()) {
511 MachineFunction::iterator NextMBB =
512 next(MachineFunction::iterator(MBB));
513 BasicBlock *LLVMSucc = BI->getSuccessor(0);
514 MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
516 if (NextMBB != MF.end() && MSucc == NextMBB) {
517 // The unconditional fall-through case, which needs no instructions.
519 // The unconditional branch case.
520 TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
522 MBB->addSuccessor(MSucc);
526 // Conditional branches are not handed yet.
527 // Halt "fast" selection and bail.
531 case Instruction::Unreachable:
535 case Instruction::PHI:
536 // PHI nodes are already emitted.
539 case Instruction::Alloca:
540 // FunctionLowering has the static-sized case covered.
541 if (StaticAllocaMap.count(cast<AllocaInst>(I)))
544 // Dynamic-sized alloca is not handled yet.
547 case Instruction::Call:
548 return SelectCall(I);
550 case Instruction::BitCast:
551 return SelectBitCast(I);
553 case Instruction::FPToSI:
554 return SelectCast(I, ISD::FP_TO_SINT);
555 case Instruction::ZExt:
556 return SelectCast(I, ISD::ZERO_EXTEND);
557 case Instruction::SExt:
558 return SelectCast(I, ISD::SIGN_EXTEND);
559 case Instruction::Trunc:
560 return SelectCast(I, ISD::TRUNCATE);
561 case Instruction::SIToFP:
562 return SelectCast(I, ISD::SINT_TO_FP);
564 case Instruction::IntToPtr: // Deliberate fall-through.
565 case Instruction::PtrToInt: {
566 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
567 MVT DstVT = TLI.getValueType(I->getType());
568 if (DstVT.bitsGT(SrcVT))
569 return SelectCast(I, ISD::ZERO_EXTEND);
570 if (DstVT.bitsLT(SrcVT))
571 return SelectCast(I, ISD::TRUNCATE);
572 unsigned Reg = getRegForValue(I->getOperand(0));
573 if (Reg == 0) return false;
574 UpdateValueMap(I, Reg);
579 // Unhandled instruction. Halt "fast" selection and bail.
584 FastISel::FastISel(MachineFunction &mf,
585 MachineModuleInfo *mmi,
586 DenseMap<const Value *, unsigned> &vm,
587 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
588 DenseMap<const AllocaInst *, int> &am)
595 MRI(MF.getRegInfo()),
596 MFI(*MF.getFrameInfo()),
597 MCP(*MF.getConstantPool()),
599 TD(*TM.getTargetData()),
600 TII(*TM.getInstrInfo()),
601 TLI(*TM.getTargetLowering()) {
604 FastISel::~FastISel() {}
606 unsigned FastISel::FastEmit_(MVT::SimpleValueType, MVT::SimpleValueType,
611 unsigned FastISel::FastEmit_r(MVT::SimpleValueType, MVT::SimpleValueType,
612 ISD::NodeType, unsigned /*Op0*/) {
616 unsigned FastISel::FastEmit_rr(MVT::SimpleValueType, MVT::SimpleValueType,
617 ISD::NodeType, unsigned /*Op0*/,
622 unsigned FastISel::FastEmit_i(MVT::SimpleValueType, MVT::SimpleValueType,
623 ISD::NodeType, uint64_t /*Imm*/) {
627 unsigned FastISel::FastEmit_f(MVT::SimpleValueType, MVT::SimpleValueType,
628 ISD::NodeType, ConstantFP * /*FPImm*/) {
632 unsigned FastISel::FastEmit_ri(MVT::SimpleValueType, MVT::SimpleValueType,
633 ISD::NodeType, unsigned /*Op0*/,
638 unsigned FastISel::FastEmit_rf(MVT::SimpleValueType, MVT::SimpleValueType,
639 ISD::NodeType, unsigned /*Op0*/,
640 ConstantFP * /*FPImm*/) {
644 unsigned FastISel::FastEmit_rri(MVT::SimpleValueType, MVT::SimpleValueType,
646 unsigned /*Op0*/, unsigned /*Op1*/,
651 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
652 /// to emit an instruction with an immediate operand using FastEmit_ri.
653 /// If that fails, it materializes the immediate into a register and try
654 /// FastEmit_rr instead.
655 unsigned FastISel::FastEmit_ri_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
656 unsigned Op0, uint64_t Imm,
657 MVT::SimpleValueType ImmType) {
658 // First check if immediate type is legal. If not, we can't use the ri form.
659 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm);
662 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
663 if (MaterialReg == 0)
665 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
668 /// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
669 /// to emit an instruction with a floating-point immediate operand using
670 /// FastEmit_rf. If that fails, it materializes the immediate into a register
671 /// and try FastEmit_rr instead.
672 unsigned FastISel::FastEmit_rf_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
673 unsigned Op0, ConstantFP *FPImm,
674 MVT::SimpleValueType ImmType) {
675 // First check if immediate type is legal. If not, we can't use the rf form.
676 unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm);
680 // Materialize the constant in a register.
681 unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
682 if (MaterialReg == 0) {
683 // If the target doesn't have a way to directly enter a floating-point
684 // value into a register, use an alternate approach.
685 // TODO: The current approach only supports floating-point constants
686 // that can be constructed by conversion from integer values. This should
687 // be replaced by code that creates a load from a constant-pool entry,
688 // which will require some target-specific work.
689 const APFloat &Flt = FPImm->getValueAPF();
690 MVT IntVT = TLI.getPointerTy();
693 uint32_t IntBitWidth = IntVT.getSizeInBits();
694 if (Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
695 APFloat::rmTowardZero) != APFloat::opOK)
697 APInt IntVal(IntBitWidth, 2, x);
699 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
700 ISD::Constant, IntVal.getZExtValue());
703 MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
704 ISD::SINT_TO_FP, IntegerReg);
705 if (MaterialReg == 0)
708 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
711 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
712 return MRI.createVirtualRegister(RC);
715 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
716 const TargetRegisterClass* RC) {
717 unsigned ResultReg = createResultReg(RC);
718 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
720 BuildMI(MBB, II, ResultReg);
724 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
725 const TargetRegisterClass *RC,
727 unsigned ResultReg = createResultReg(RC);
728 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
730 if (II.getNumDefs() >= 1)
731 BuildMI(MBB, II, ResultReg).addReg(Op0);
733 BuildMI(MBB, II).addReg(Op0);
734 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
735 II.ImplicitDefs[0], RC, RC);
743 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
744 const TargetRegisterClass *RC,
745 unsigned Op0, unsigned Op1) {
746 unsigned ResultReg = createResultReg(RC);
747 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
749 if (II.getNumDefs() >= 1)
750 BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1);
752 BuildMI(MBB, II).addReg(Op0).addReg(Op1);
753 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
754 II.ImplicitDefs[0], RC, RC);
761 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
762 const TargetRegisterClass *RC,
763 unsigned Op0, uint64_t Imm) {
764 unsigned ResultReg = createResultReg(RC);
765 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
767 if (II.getNumDefs() >= 1)
768 BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Imm);
770 BuildMI(MBB, II).addReg(Op0).addImm(Imm);
771 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
772 II.ImplicitDefs[0], RC, RC);
779 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
780 const TargetRegisterClass *RC,
781 unsigned Op0, ConstantFP *FPImm) {
782 unsigned ResultReg = createResultReg(RC);
783 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
785 if (II.getNumDefs() >= 1)
786 BuildMI(MBB, II, ResultReg).addReg(Op0).addFPImm(FPImm);
788 BuildMI(MBB, II).addReg(Op0).addFPImm(FPImm);
789 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
790 II.ImplicitDefs[0], RC, RC);
797 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
798 const TargetRegisterClass *RC,
799 unsigned Op0, unsigned Op1, uint64_t Imm) {
800 unsigned ResultReg = createResultReg(RC);
801 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
803 if (II.getNumDefs() >= 1)
804 BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm);
806 BuildMI(MBB, II).addReg(Op0).addReg(Op1).addImm(Imm);
807 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
808 II.ImplicitDefs[0], RC, RC);
815 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
816 const TargetRegisterClass *RC,
818 unsigned ResultReg = createResultReg(RC);
819 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
821 if (II.getNumDefs() >= 1)
822 BuildMI(MBB, II, ResultReg).addImm(Imm);
824 BuildMI(MBB, II).addImm(Imm);
825 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
826 II.ImplicitDefs[0], RC, RC);
833 unsigned FastISel::FastEmitInst_extractsubreg(unsigned Op0, uint32_t Idx) {
834 const TargetRegisterClass* RC = MRI.getRegClass(Op0);
835 const TargetRegisterClass* SRC = *(RC->subregclasses_begin()+Idx-1);
837 unsigned ResultReg = createResultReg(SRC);
838 const TargetInstrDesc &II = TII.get(TargetInstrInfo::EXTRACT_SUBREG);
840 if (II.getNumDefs() >= 1)
841 BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Idx);
843 BuildMI(MBB, II).addReg(Op0).addImm(Idx);
844 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
845 II.ImplicitDefs[0], RC, RC);