1 ///===-- FastISel.cpp - Implementation of the FastISel class --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations (e.g. calls) are not
15 // supported. It is also not intended to be able to do much optimization,
16 // except in a few cases where doing optimizations reduces overall compile
17 // time (e.g. folding constants into immediate fields, because it's cheap
18 // and it reduces the number of instructions later phases have to examine).
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/Function.h"
43 #include "llvm/GlobalVariable.h"
44 #include "llvm/Instructions.h"
45 #include "llvm/IntrinsicInst.h"
46 #include "llvm/CodeGen/FastISel.h"
47 #include "llvm/CodeGen/MachineInstrBuilder.h"
48 #include "llvm/CodeGen/MachineModuleInfo.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/Target/TargetData.h"
51 #include "llvm/Target/TargetInstrInfo.h"
52 #include "llvm/Target/TargetLowering.h"
53 #include "llvm/Target/TargetMachine.h"
56 unsigned FastISel::getRegForValue(Value *V) {
57 // Look up the value to see if we already have a register for it. We
58 // cache values defined by Instructions across blocks, and other values
59 // only locally. This is because Instructions already have the SSA
60 // def-dominatess-use requirement enforced.
61 if (ValueMap.count(V))
63 unsigned Reg = LocalValueMap[V];
67 MVT::SimpleValueType VT = TLI.getValueType(V->getType()).getSimpleVT();
69 // Ignore illegal types.
70 if (!TLI.isTypeLegal(VT)) {
71 // Promote MVT::i1 to a legal type though, because it's common and easy.
73 VT = TLI.getTypeToTransformTo(VT).getSimpleVT();
78 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
79 if (CI->getValue().getActiveBits() <= 64)
80 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
81 } else if (isa<AllocaInst>(V)) {
82 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
83 } else if (isa<ConstantPointerNull>(V)) {
84 Reg = FastEmit_i(VT, VT, ISD::Constant, 0);
85 } else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
86 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
89 const APFloat &Flt = CF->getValueAPF();
90 MVT IntVT = TLI.getPointerTy();
93 uint32_t IntBitWidth = IntVT.getSizeInBits();
94 if (!Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
95 APFloat::rmTowardZero) != APFloat::opOK) {
96 APInt IntVal(IntBitWidth, 2, x);
98 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
99 ISD::Constant, IntVal.getZExtValue());
101 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
104 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
105 if (!SelectOperator(CE, CE->getOpcode())) return 0;
106 Reg = LocalValueMap[CE];
107 } else if (isa<UndefValue>(V)) {
108 Reg = createResultReg(TLI.getRegClassFor(VT));
109 BuildMI(MBB, TII.get(TargetInstrInfo::IMPLICIT_DEF), Reg);
112 // If target-independent code couldn't handle the value, give target-specific
114 if (!Reg && isa<Constant>(V))
115 Reg = TargetMaterializeConstant(cast<Constant>(V));
117 // Don't cache constant materializations in the general ValueMap.
118 // To do so would require tracking what uses they dominate.
120 LocalValueMap[V] = Reg;
124 unsigned FastISel::lookUpRegForValue(Value *V) {
125 // Look up the value to see if we already have a register for it. We
126 // cache values defined by Instructions across blocks, and other values
127 // only locally. This is because Instructions already have the SSA
128 // def-dominatess-use requirement enforced.
129 if (ValueMap.count(V))
131 return LocalValueMap[V];
134 /// UpdateValueMap - Update the value map to include the new mapping for this
135 /// instruction, or insert an extra copy to get the result in a previous
136 /// determined register.
137 /// NOTE: This is only necessary because we might select a block that uses
138 /// a value before we select the block that defines the value. It might be
139 /// possible to fix this by selecting blocks in reverse postorder.
140 void FastISel::UpdateValueMap(Value* I, unsigned Reg) {
141 if (!isa<Instruction>(I)) {
142 LocalValueMap[I] = Reg;
145 if (!ValueMap.count(I))
148 TII.copyRegToReg(*MBB, MBB->end(), ValueMap[I],
149 Reg, MRI.getRegClass(Reg), MRI.getRegClass(Reg));
152 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
153 /// which has an opcode which directly corresponds to the given ISD opcode.
155 bool FastISel::SelectBinaryOp(User *I, ISD::NodeType ISDOpcode) {
156 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
157 if (VT == MVT::Other || !VT.isSimple())
158 // Unhandled type. Halt "fast" selection and bail.
161 // We only handle legal types. For example, on x86-32 the instruction
162 // selector contains all of the 64-bit instructions from x86-64,
163 // under the assumption that i64 won't be used if the target doesn't
165 if (!TLI.isTypeLegal(VT)) {
166 // MVT::i1 is special. Allow AND, OR, or XOR because they
167 // don't require additional zeroing, which makes them easy.
169 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
170 ISDOpcode == ISD::XOR))
171 VT = TLI.getTypeToTransformTo(VT);
176 unsigned Op0 = getRegForValue(I->getOperand(0));
178 // Unhandled operand. Halt "fast" selection and bail.
181 // Check if the second operand is a constant and handle it appropriately.
182 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
183 unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
184 ISDOpcode, Op0, CI->getZExtValue());
185 if (ResultReg != 0) {
186 // We successfully emitted code for the given LLVM Instruction.
187 UpdateValueMap(I, ResultReg);
192 // Check if the second operand is a constant float.
193 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
194 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
196 if (ResultReg != 0) {
197 // We successfully emitted code for the given LLVM Instruction.
198 UpdateValueMap(I, ResultReg);
203 unsigned Op1 = getRegForValue(I->getOperand(1));
205 // Unhandled operand. Halt "fast" selection and bail.
208 // Now we have both operands in registers. Emit the instruction.
209 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
210 ISDOpcode, Op0, Op1);
212 // Target-specific code wasn't able to find a machine opcode for
213 // the given ISD opcode and type. Halt "fast" selection and bail.
216 // We successfully emitted code for the given LLVM Instruction.
217 UpdateValueMap(I, ResultReg);
221 bool FastISel::SelectGetElementPtr(User *I) {
222 unsigned N = getRegForValue(I->getOperand(0));
224 // Unhandled operand. Halt "fast" selection and bail.
227 const Type *Ty = I->getOperand(0)->getType();
228 MVT::SimpleValueType VT = TLI.getPointerTy().getSimpleVT();
229 for (GetElementPtrInst::op_iterator OI = I->op_begin()+1, E = I->op_end();
232 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
233 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
236 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
237 // FIXME: This can be optimized by combining the add with a
239 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
241 // Unhandled operand. Halt "fast" selection and bail.
244 Ty = StTy->getElementType(Field);
246 Ty = cast<SequentialType>(Ty)->getElementType();
248 // If this is a constant subscript, handle it quickly.
249 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
250 if (CI->getZExtValue() == 0) continue;
252 TD.getABITypeSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
253 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
255 // Unhandled operand. Halt "fast" selection and bail.
260 // N = N + Idx * ElementSize;
261 uint64_t ElementSize = TD.getABITypeSize(Ty);
262 unsigned IdxN = getRegForValue(Idx);
264 // Unhandled operand. Halt "fast" selection and bail.
267 // If the index is smaller or larger than intptr_t, truncate or extend
269 MVT IdxVT = MVT::getMVT(Idx->getType(), /*HandleUnknown=*/false);
270 if (IdxVT.bitsLT(VT))
271 IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::SIGN_EXTEND, IdxN);
272 else if (IdxVT.bitsGT(VT))
273 IdxN = FastEmit_r(IdxVT.getSimpleVT(), VT, ISD::TRUNCATE, IdxN);
275 // Unhandled operand. Halt "fast" selection and bail.
278 if (ElementSize != 1) {
279 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
281 // Unhandled operand. Halt "fast" selection and bail.
284 N = FastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
286 // Unhandled operand. Halt "fast" selection and bail.
291 // We successfully emitted code for the given LLVM Instruction.
292 UpdateValueMap(I, N);
296 bool FastISel::SelectCall(User *I) {
297 Function *F = cast<CallInst>(I)->getCalledFunction();
298 if (!F) return false;
300 unsigned IID = F->getIntrinsicID();
303 case Intrinsic::dbg_stoppoint: {
304 DbgStopPointInst *SPI = cast<DbgStopPointInst>(I);
305 if (MMI && SPI->getContext() && MMI->Verify(SPI->getContext())) {
306 DebugInfoDesc *DD = MMI->getDescFor(SPI->getContext());
307 assert(DD && "Not a debug information descriptor");
308 const CompileUnitDesc *CompileUnit = cast<CompileUnitDesc>(DD);
309 unsigned SrcFile = MMI->RecordSource(CompileUnit);
310 unsigned Line = SPI->getLine();
311 unsigned Col = SPI->getColumn();
312 unsigned ID = MMI->RecordSourceLine(Line, Col, SrcFile);
313 const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
314 BuildMI(MBB, II).addImm(ID);
318 case Intrinsic::dbg_region_start: {
319 DbgRegionStartInst *RSI = cast<DbgRegionStartInst>(I);
320 if (MMI && RSI->getContext() && MMI->Verify(RSI->getContext())) {
321 unsigned ID = MMI->RecordRegionStart(RSI->getContext());
322 const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
323 BuildMI(MBB, II).addImm(ID);
327 case Intrinsic::dbg_region_end: {
328 DbgRegionEndInst *REI = cast<DbgRegionEndInst>(I);
329 if (MMI && REI->getContext() && MMI->Verify(REI->getContext())) {
330 unsigned ID = MMI->RecordRegionEnd(REI->getContext());
331 const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);
332 BuildMI(MBB, II).addImm(ID);
336 case Intrinsic::dbg_func_start: {
337 if (!MMI) return true;
338 DbgFuncStartInst *FSI = cast<DbgFuncStartInst>(I);
339 Value *SP = FSI->getSubprogram();
340 if (SP && MMI->Verify(SP)) {
341 // llvm.dbg.func.start implicitly defines a dbg_stoppoint which is
342 // what (most?) gdb expects.
343 DebugInfoDesc *DD = MMI->getDescFor(SP);
344 assert(DD && "Not a debug information descriptor");
345 SubprogramDesc *Subprogram = cast<SubprogramDesc>(DD);
346 const CompileUnitDesc *CompileUnit = Subprogram->getFile();
347 unsigned SrcFile = MMI->RecordSource(CompileUnit);
348 // Record the source line but does create a label. It will be emitted
349 // at asm emission time.
350 MMI->RecordSourceLine(Subprogram->getLine(), 0, SrcFile);
354 case Intrinsic::dbg_declare: {
355 DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
356 Value *Variable = DI->getVariable();
357 if (MMI && Variable && MMI->Verify(Variable)) {
358 // Determine the address of the declared object.
359 Value *Address = DI->getAddress();
360 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
361 Address = BCI->getOperand(0);
362 AllocaInst *AI = dyn_cast<AllocaInst>(Address);
363 // Don't handle byval struct arguments, for example.
365 DenseMap<const AllocaInst*, int>::iterator SI =
366 StaticAllocaMap.find(AI);
367 assert(SI != StaticAllocaMap.end() && "Invalid dbg.declare!");
370 // Determine the debug globalvariable.
371 GlobalValue *GV = cast<GlobalVariable>(Variable);
373 // Build the DECLARE instruction.
374 const TargetInstrDesc &II = TII.get(TargetInstrInfo::DECLARE);
375 BuildMI(MBB, II).addFrameIndex(FI).addGlobalAddress(GV);
383 bool FastISel::SelectCast(User *I, ISD::NodeType Opcode) {
384 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
385 MVT DstVT = TLI.getValueType(I->getType());
387 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
388 DstVT == MVT::Other || !DstVT.isSimple() ||
389 !TLI.isTypeLegal(DstVT))
390 // Unhandled type. Halt "fast" selection and bail.
393 // Check if the source operand is legal. Or as a special case,
394 // it may be i1 if we're doing zero-extension because that's
395 // trivially easy and somewhat common.
396 if (!TLI.isTypeLegal(SrcVT)) {
397 if (SrcVT == MVT::i1 && Opcode == ISD::ZERO_EXTEND)
398 SrcVT = TLI.getTypeToTransformTo(SrcVT);
400 // Unhandled type. Halt "fast" selection and bail.
404 unsigned InputReg = getRegForValue(I->getOperand(0));
406 // Unhandled operand. Halt "fast" selection and bail.
409 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
416 UpdateValueMap(I, ResultReg);
420 bool FastISel::SelectBitCast(User *I) {
421 // If the bitcast doesn't change the type, just use the operand value.
422 if (I->getType() == I->getOperand(0)->getType()) {
423 unsigned Reg = getRegForValue(I->getOperand(0));
426 UpdateValueMap(I, Reg);
430 // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
431 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
432 MVT DstVT = TLI.getValueType(I->getType());
434 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
435 DstVT == MVT::Other || !DstVT.isSimple() ||
436 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
437 // Unhandled type. Halt "fast" selection and bail.
440 unsigned Op0 = getRegForValue(I->getOperand(0));
442 // Unhandled operand. Halt "fast" selection and bail.
445 // First, try to perform the bitcast by inserting a reg-reg copy.
446 unsigned ResultReg = 0;
447 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
448 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
449 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
450 ResultReg = createResultReg(DstClass);
452 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
453 Op0, DstClass, SrcClass);
458 // If the reg-reg copy failed, select a BIT_CONVERT opcode.
460 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
461 ISD::BIT_CONVERT, Op0);
466 UpdateValueMap(I, ResultReg);
471 FastISel::SelectInstruction(Instruction *I) {
472 return SelectOperator(I, I->getOpcode());
475 /// FastEmitBranch - Emit an unconditional branch to the given block,
476 /// unless it is the immediate (fall-through) successor, and update
479 FastISel::FastEmitBranch(MachineBasicBlock *MSucc) {
480 MachineFunction::iterator NextMBB =
481 next(MachineFunction::iterator(MBB));
483 if (MBB->isLayoutSuccessor(MSucc)) {
484 // The unconditional fall-through case, which needs no instructions.
486 // The unconditional branch case.
487 TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
489 MBB->addSuccessor(MSucc);
493 FastISel::SelectOperator(User *I, unsigned Opcode) {
495 case Instruction::Add: {
496 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FADD : ISD::ADD;
497 return SelectBinaryOp(I, Opc);
499 case Instruction::Sub: {
500 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FSUB : ISD::SUB;
501 return SelectBinaryOp(I, Opc);
503 case Instruction::Mul: {
504 ISD::NodeType Opc = I->getType()->isFPOrFPVector() ? ISD::FMUL : ISD::MUL;
505 return SelectBinaryOp(I, Opc);
507 case Instruction::SDiv:
508 return SelectBinaryOp(I, ISD::SDIV);
509 case Instruction::UDiv:
510 return SelectBinaryOp(I, ISD::UDIV);
511 case Instruction::FDiv:
512 return SelectBinaryOp(I, ISD::FDIV);
513 case Instruction::SRem:
514 return SelectBinaryOp(I, ISD::SREM);
515 case Instruction::URem:
516 return SelectBinaryOp(I, ISD::UREM);
517 case Instruction::FRem:
518 return SelectBinaryOp(I, ISD::FREM);
519 case Instruction::Shl:
520 return SelectBinaryOp(I, ISD::SHL);
521 case Instruction::LShr:
522 return SelectBinaryOp(I, ISD::SRL);
523 case Instruction::AShr:
524 return SelectBinaryOp(I, ISD::SRA);
525 case Instruction::And:
526 return SelectBinaryOp(I, ISD::AND);
527 case Instruction::Or:
528 return SelectBinaryOp(I, ISD::OR);
529 case Instruction::Xor:
530 return SelectBinaryOp(I, ISD::XOR);
532 case Instruction::GetElementPtr:
533 return SelectGetElementPtr(I);
535 case Instruction::Br: {
536 BranchInst *BI = cast<BranchInst>(I);
538 if (BI->isUnconditional()) {
539 BasicBlock *LLVMSucc = BI->getSuccessor(0);
540 MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
541 FastEmitBranch(MSucc);
545 // Conditional branches are not handed yet.
546 // Halt "fast" selection and bail.
550 case Instruction::Unreachable:
554 case Instruction::PHI:
555 // PHI nodes are already emitted.
558 case Instruction::Alloca:
559 // FunctionLowering has the static-sized case covered.
560 if (StaticAllocaMap.count(cast<AllocaInst>(I)))
563 // Dynamic-sized alloca is not handled yet.
566 case Instruction::Call:
567 return SelectCall(I);
569 case Instruction::BitCast:
570 return SelectBitCast(I);
572 case Instruction::FPToSI:
573 return SelectCast(I, ISD::FP_TO_SINT);
574 case Instruction::ZExt:
575 return SelectCast(I, ISD::ZERO_EXTEND);
576 case Instruction::SExt:
577 return SelectCast(I, ISD::SIGN_EXTEND);
578 case Instruction::Trunc:
579 return SelectCast(I, ISD::TRUNCATE);
580 case Instruction::SIToFP:
581 return SelectCast(I, ISD::SINT_TO_FP);
583 case Instruction::IntToPtr: // Deliberate fall-through.
584 case Instruction::PtrToInt: {
585 MVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
586 MVT DstVT = TLI.getValueType(I->getType());
587 if (DstVT.bitsGT(SrcVT))
588 return SelectCast(I, ISD::ZERO_EXTEND);
589 if (DstVT.bitsLT(SrcVT))
590 return SelectCast(I, ISD::TRUNCATE);
591 unsigned Reg = getRegForValue(I->getOperand(0));
592 if (Reg == 0) return false;
593 UpdateValueMap(I, Reg);
598 // Unhandled instruction. Halt "fast" selection and bail.
603 FastISel::FastISel(MachineFunction &mf,
604 MachineModuleInfo *mmi,
605 DenseMap<const Value *, unsigned> &vm,
606 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
607 DenseMap<const AllocaInst *, int> &am)
614 MRI(MF.getRegInfo()),
615 MFI(*MF.getFrameInfo()),
616 MCP(*MF.getConstantPool()),
618 TD(*TM.getTargetData()),
619 TII(*TM.getInstrInfo()),
620 TLI(*TM.getTargetLowering()) {
623 FastISel::~FastISel() {}
625 unsigned FastISel::FastEmit_(MVT::SimpleValueType, MVT::SimpleValueType,
630 unsigned FastISel::FastEmit_r(MVT::SimpleValueType, MVT::SimpleValueType,
631 ISD::NodeType, unsigned /*Op0*/) {
635 unsigned FastISel::FastEmit_rr(MVT::SimpleValueType, MVT::SimpleValueType,
636 ISD::NodeType, unsigned /*Op0*/,
641 unsigned FastISel::FastEmit_i(MVT::SimpleValueType, MVT::SimpleValueType,
642 ISD::NodeType, uint64_t /*Imm*/) {
646 unsigned FastISel::FastEmit_f(MVT::SimpleValueType, MVT::SimpleValueType,
647 ISD::NodeType, ConstantFP * /*FPImm*/) {
651 unsigned FastISel::FastEmit_ri(MVT::SimpleValueType, MVT::SimpleValueType,
652 ISD::NodeType, unsigned /*Op0*/,
657 unsigned FastISel::FastEmit_rf(MVT::SimpleValueType, MVT::SimpleValueType,
658 ISD::NodeType, unsigned /*Op0*/,
659 ConstantFP * /*FPImm*/) {
663 unsigned FastISel::FastEmit_rri(MVT::SimpleValueType, MVT::SimpleValueType,
665 unsigned /*Op0*/, unsigned /*Op1*/,
670 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
671 /// to emit an instruction with an immediate operand using FastEmit_ri.
672 /// If that fails, it materializes the immediate into a register and try
673 /// FastEmit_rr instead.
674 unsigned FastISel::FastEmit_ri_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
675 unsigned Op0, uint64_t Imm,
676 MVT::SimpleValueType ImmType) {
677 // First check if immediate type is legal. If not, we can't use the ri form.
678 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm);
681 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
682 if (MaterialReg == 0)
684 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
687 /// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
688 /// to emit an instruction with a floating-point immediate operand using
689 /// FastEmit_rf. If that fails, it materializes the immediate into a register
690 /// and try FastEmit_rr instead.
691 unsigned FastISel::FastEmit_rf_(MVT::SimpleValueType VT, ISD::NodeType Opcode,
692 unsigned Op0, ConstantFP *FPImm,
693 MVT::SimpleValueType ImmType) {
694 // First check if immediate type is legal. If not, we can't use the rf form.
695 unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm);
699 // Materialize the constant in a register.
700 unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
701 if (MaterialReg == 0) {
702 // If the target doesn't have a way to directly enter a floating-point
703 // value into a register, use an alternate approach.
704 // TODO: The current approach only supports floating-point constants
705 // that can be constructed by conversion from integer values. This should
706 // be replaced by code that creates a load from a constant-pool entry,
707 // which will require some target-specific work.
708 const APFloat &Flt = FPImm->getValueAPF();
709 MVT IntVT = TLI.getPointerTy();
712 uint32_t IntBitWidth = IntVT.getSizeInBits();
713 if (Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
714 APFloat::rmTowardZero) != APFloat::opOK)
716 APInt IntVal(IntBitWidth, 2, x);
718 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
719 ISD::Constant, IntVal.getZExtValue());
722 MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
723 ISD::SINT_TO_FP, IntegerReg);
724 if (MaterialReg == 0)
727 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
730 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
731 return MRI.createVirtualRegister(RC);
734 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
735 const TargetRegisterClass* RC) {
736 unsigned ResultReg = createResultReg(RC);
737 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
739 BuildMI(MBB, II, ResultReg);
743 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
744 const TargetRegisterClass *RC,
746 unsigned ResultReg = createResultReg(RC);
747 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
749 if (II.getNumDefs() >= 1)
750 BuildMI(MBB, II, ResultReg).addReg(Op0);
752 BuildMI(MBB, II).addReg(Op0);
753 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
754 II.ImplicitDefs[0], RC, RC);
762 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
763 const TargetRegisterClass *RC,
764 unsigned Op0, unsigned Op1) {
765 unsigned ResultReg = createResultReg(RC);
766 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
768 if (II.getNumDefs() >= 1)
769 BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1);
771 BuildMI(MBB, II).addReg(Op0).addReg(Op1);
772 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
773 II.ImplicitDefs[0], RC, RC);
780 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
781 const TargetRegisterClass *RC,
782 unsigned Op0, uint64_t Imm) {
783 unsigned ResultReg = createResultReg(RC);
784 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
786 if (II.getNumDefs() >= 1)
787 BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Imm);
789 BuildMI(MBB, II).addReg(Op0).addImm(Imm);
790 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
791 II.ImplicitDefs[0], RC, RC);
798 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
799 const TargetRegisterClass *RC,
800 unsigned Op0, ConstantFP *FPImm) {
801 unsigned ResultReg = createResultReg(RC);
802 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
804 if (II.getNumDefs() >= 1)
805 BuildMI(MBB, II, ResultReg).addReg(Op0).addFPImm(FPImm);
807 BuildMI(MBB, II).addReg(Op0).addFPImm(FPImm);
808 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
809 II.ImplicitDefs[0], RC, RC);
816 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
817 const TargetRegisterClass *RC,
818 unsigned Op0, unsigned Op1, uint64_t Imm) {
819 unsigned ResultReg = createResultReg(RC);
820 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
822 if (II.getNumDefs() >= 1)
823 BuildMI(MBB, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm);
825 BuildMI(MBB, II).addReg(Op0).addReg(Op1).addImm(Imm);
826 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
827 II.ImplicitDefs[0], RC, RC);
834 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
835 const TargetRegisterClass *RC,
837 unsigned ResultReg = createResultReg(RC);
838 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
840 if (II.getNumDefs() >= 1)
841 BuildMI(MBB, II, ResultReg).addImm(Imm);
843 BuildMI(MBB, II).addImm(Imm);
844 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
845 II.ImplicitDefs[0], RC, RC);
852 unsigned FastISel::FastEmitInst_extractsubreg(unsigned Op0, uint32_t Idx) {
853 const TargetRegisterClass* RC = MRI.getRegClass(Op0);
854 const TargetRegisterClass* SRC = *(RC->subregclasses_begin()+Idx-1);
856 unsigned ResultReg = createResultReg(SRC);
857 const TargetInstrDesc &II = TII.get(TargetInstrInfo::EXTRACT_SUBREG);
859 if (II.getNumDefs() >= 1)
860 BuildMI(MBB, II, ResultReg).addReg(Op0).addImm(Idx);
862 BuildMI(MBB, II).addReg(Op0).addImm(Idx);
863 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
864 II.ImplicitDefs[0], RC, RC);