1 ///===-- FastISel.cpp - Implementation of the FastISel class --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/Function.h"
43 #include "llvm/GlobalVariable.h"
44 #include "llvm/Instructions.h"
45 #include "llvm/IntrinsicInst.h"
46 #include "llvm/CodeGen/FastISel.h"
47 #include "llvm/CodeGen/MachineInstrBuilder.h"
48 #include "llvm/CodeGen/MachineModuleInfo.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/Analysis/DebugInfo.h"
51 #include "llvm/Target/TargetData.h"
52 #include "llvm/Target/TargetInstrInfo.h"
53 #include "llvm/Target/TargetLowering.h"
54 #include "llvm/Target/TargetMachine.h"
55 #include "FunctionLoweringInfo.h"
58 unsigned FastISel::getRegForValue(const Value *V) {
59 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
60 // Don't handle non-simple values in FastISel.
61 if (!RealVT.isSimple())
64 // Ignore illegal types. We must do this before looking up the value
65 // in ValueMap because Arguments are given virtual registers regardless
66 // of whether FastISel can handle them.
67 MVT VT = RealVT.getSimpleVT();
68 if (!TLI.isTypeLegal(VT)) {
69 // Promote MVT::i1 to a legal type though, because it's common and easy.
71 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
76 // Look up the value to see if we already have a register for it. We
77 // cache values defined by Instructions across blocks, and other values
78 // only locally. This is because Instructions already have the SSA
79 // def-dominates-use requirement enforced.
80 if (ValueMap.count(V))
82 unsigned Reg = LocalValueMap[V];
86 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
87 if (CI->getValue().getActiveBits() <= 64)
88 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
89 } else if (isa<AllocaInst>(V)) {
90 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
91 } else if (isa<ConstantPointerNull>(V)) {
92 // Translate this as an integer zero so that it can be
93 // local-CSE'd with actual integer zeros.
95 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
96 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
97 // Try to emit the constant directly.
98 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
101 // Try to emit the constant by using an integer constant with a cast.
102 const APFloat &Flt = CF->getValueAPF();
103 EVT IntVT = TLI.getPointerTy();
106 uint32_t IntBitWidth = IntVT.getSizeInBits();
108 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
109 APFloat::rmTowardZero, &isExact);
111 APInt IntVal(IntBitWidth, 2, x);
113 unsigned IntegerReg =
114 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
116 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
119 } else if (const Operator *Op = dyn_cast<Operator>(V)) {
120 if (!SelectOperator(Op, Op->getOpcode())) return 0;
121 Reg = LocalValueMap[Op];
122 } else if (isa<UndefValue>(V)) {
123 Reg = createResultReg(TLI.getRegClassFor(VT));
124 BuildMI(MBB, DL, TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
127 // If target-independent code couldn't handle the value, give target-specific
129 if (!Reg && isa<Constant>(V))
130 Reg = TargetMaterializeConstant(cast<Constant>(V));
132 // Don't cache constant materializations in the general ValueMap.
133 // To do so would require tracking what uses they dominate.
135 LocalValueMap[V] = Reg;
139 unsigned FastISel::lookUpRegForValue(const Value *V) {
140 // Look up the value to see if we already have a register for it. We
141 // cache values defined by Instructions across blocks, and other values
142 // only locally. This is because Instructions already have the SSA
143 // def-dominatess-use requirement enforced.
144 if (ValueMap.count(V))
146 return LocalValueMap[V];
149 /// UpdateValueMap - Update the value map to include the new mapping for this
150 /// instruction, or insert an extra copy to get the result in a previous
151 /// determined register.
152 /// NOTE: This is only necessary because we might select a block that uses
153 /// a value before we select the block that defines the value. It might be
154 /// possible to fix this by selecting blocks in reverse postorder.
155 unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
156 if (!isa<Instruction>(I)) {
157 LocalValueMap[I] = Reg;
161 unsigned &AssignedReg = ValueMap[I];
162 if (AssignedReg == 0)
164 else if (Reg != AssignedReg) {
165 const TargetRegisterClass *RegClass = MRI.getRegClass(Reg);
166 TII.copyRegToReg(*MBB, MBB->end(), AssignedReg,
167 Reg, RegClass, RegClass);
172 unsigned FastISel::getRegForGEPIndex(const Value *Idx) {
173 unsigned IdxN = getRegForValue(Idx);
175 // Unhandled operand. Halt "fast" selection and bail.
178 // If the index is smaller or larger than intptr_t, truncate or extend it.
179 MVT PtrVT = TLI.getPointerTy();
180 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
181 if (IdxVT.bitsLT(PtrVT))
182 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
183 else if (IdxVT.bitsGT(PtrVT))
184 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
188 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
189 /// which has an opcode which directly corresponds to the given ISD opcode.
191 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
192 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
193 if (VT == MVT::Other || !VT.isSimple())
194 // Unhandled type. Halt "fast" selection and bail.
197 // We only handle legal types. For example, on x86-32 the instruction
198 // selector contains all of the 64-bit instructions from x86-64,
199 // under the assumption that i64 won't be used if the target doesn't
201 if (!TLI.isTypeLegal(VT)) {
202 // MVT::i1 is special. Allow AND, OR, or XOR because they
203 // don't require additional zeroing, which makes them easy.
205 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
206 ISDOpcode == ISD::XOR))
207 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
212 unsigned Op0 = getRegForValue(I->getOperand(0));
214 // Unhandled operand. Halt "fast" selection and bail.
217 // Check if the second operand is a constant and handle it appropriately.
218 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
219 unsigned ResultReg = FastEmit_ri(VT.getSimpleVT(), VT.getSimpleVT(),
220 ISDOpcode, Op0, CI->getZExtValue());
221 if (ResultReg != 0) {
222 // We successfully emitted code for the given LLVM Instruction.
223 UpdateValueMap(I, ResultReg);
228 // Check if the second operand is a constant float.
229 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
230 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
232 if (ResultReg != 0) {
233 // We successfully emitted code for the given LLVM Instruction.
234 UpdateValueMap(I, ResultReg);
239 unsigned Op1 = getRegForValue(I->getOperand(1));
241 // Unhandled operand. Halt "fast" selection and bail.
244 // Now we have both operands in registers. Emit the instruction.
245 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
246 ISDOpcode, Op0, Op1);
248 // Target-specific code wasn't able to find a machine opcode for
249 // the given ISD opcode and type. Halt "fast" selection and bail.
252 // We successfully emitted code for the given LLVM Instruction.
253 UpdateValueMap(I, ResultReg);
257 bool FastISel::SelectGetElementPtr(const User *I) {
258 unsigned N = getRegForValue(I->getOperand(0));
260 // Unhandled operand. Halt "fast" selection and bail.
263 const Type *Ty = I->getOperand(0)->getType();
264 MVT VT = TLI.getPointerTy();
265 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
266 E = I->op_end(); OI != E; ++OI) {
267 const Value *Idx = *OI;
268 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
269 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
272 uint64_t Offs = TD.getStructLayout(StTy)->getElementOffset(Field);
273 // FIXME: This can be optimized by combining the add with a
275 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
277 // Unhandled operand. Halt "fast" selection and bail.
280 Ty = StTy->getElementType(Field);
282 Ty = cast<SequentialType>(Ty)->getElementType();
284 // If this is a constant subscript, handle it quickly.
285 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
286 if (CI->getZExtValue() == 0) continue;
288 TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
289 N = FastEmit_ri_(VT, ISD::ADD, N, Offs, VT);
291 // Unhandled operand. Halt "fast" selection and bail.
296 // N = N + Idx * ElementSize;
297 uint64_t ElementSize = TD.getTypeAllocSize(Ty);
298 unsigned IdxN = getRegForGEPIndex(Idx);
300 // Unhandled operand. Halt "fast" selection and bail.
303 if (ElementSize != 1) {
304 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
306 // Unhandled operand. Halt "fast" selection and bail.
309 N = FastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
311 // Unhandled operand. Halt "fast" selection and bail.
316 // We successfully emitted code for the given LLVM Instruction.
317 UpdateValueMap(I, N);
321 bool FastISel::SelectCall(const User *I) {
322 const Function *F = cast<CallInst>(I)->getCalledFunction();
323 if (!F) return false;
325 // Handle selected intrinsic function calls.
326 unsigned IID = F->getIntrinsicID();
329 case Intrinsic::dbg_declare: {
330 const DbgDeclareInst *DI = cast<DbgDeclareInst>(I);
331 if (!DIDescriptor::ValidDebugInfo(DI->getVariable(), CodeGenOpt::None) ||
332 !MF.getMMI().hasDebugInfo())
335 const Value *Address = DI->getAddress();
338 if (isa<UndefValue>(Address))
340 const AllocaInst *AI = dyn_cast<AllocaInst>(Address);
341 // Don't handle byval struct arguments or VLAs, for example.
343 DenseMap<const AllocaInst*, int>::iterator SI =
344 StaticAllocaMap.find(AI);
345 if (SI == StaticAllocaMap.end()) break; // VLAs.
347 if (!DI->getDebugLoc().isUnknown())
348 MF.getMMI().setVariableDbgInfo(DI->getVariable(), FI, DI->getDebugLoc());
350 // Building the map above is target independent. Generating DBG_VALUE
351 // inline is target dependent; do this now.
352 (void)TargetSelectInstruction(cast<Instruction>(I));
355 case Intrinsic::dbg_value: {
356 // This form of DBG_VALUE is target-independent.
357 const DbgValueInst *DI = cast<DbgValueInst>(I);
358 const TargetInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
359 const Value *V = DI->getValue();
361 // Currently the optimizer can produce this; insert an undef to
362 // help debugging. Probably the optimizer should not do this.
363 BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
364 addMetadata(DI->getVariable());
365 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
366 BuildMI(MBB, DL, II).addImm(CI->getZExtValue()).addImm(DI->getOffset()).
367 addMetadata(DI->getVariable());
368 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
369 BuildMI(MBB, DL, II).addFPImm(CF).addImm(DI->getOffset()).
370 addMetadata(DI->getVariable());
371 } else if (unsigned Reg = lookUpRegForValue(V)) {
372 BuildMI(MBB, DL, II).addReg(Reg, RegState::Debug).addImm(DI->getOffset()).
373 addMetadata(DI->getVariable());
375 // We can't yet handle anything else here because it would require
376 // generating code, thus altering codegen because of debug info.
377 // Insert an undef so we can see what we dropped.
378 BuildMI(MBB, DL, II).addReg(0U).addImm(DI->getOffset()).
379 addMetadata(DI->getVariable());
383 case Intrinsic::eh_exception: {
384 EVT VT = TLI.getValueType(I->getType());
385 switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
387 case TargetLowering::Expand: {
388 assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!");
389 unsigned Reg = TLI.getExceptionAddressRegister();
390 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
391 unsigned ResultReg = createResultReg(RC);
392 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
394 assert(InsertedCopy && "Can't copy address registers!");
395 InsertedCopy = InsertedCopy;
396 UpdateValueMap(I, ResultReg);
402 case Intrinsic::eh_selector: {
403 EVT VT = TLI.getValueType(I->getType());
404 switch (TLI.getOperationAction(ISD::EHSELECTION, VT)) {
406 case TargetLowering::Expand: {
407 if (MBB->isLandingPad())
408 AddCatchInfo(*cast<CallInst>(I), &MF.getMMI(), MBB);
411 CatchInfoLost.insert(cast<CallInst>(I));
413 // FIXME: Mark exception selector register as live in. Hack for PR1508.
414 unsigned Reg = TLI.getExceptionSelectorRegister();
415 if (Reg) MBB->addLiveIn(Reg);
418 unsigned Reg = TLI.getExceptionSelectorRegister();
419 EVT SrcVT = TLI.getPointerTy();
420 const TargetRegisterClass *RC = TLI.getRegClassFor(SrcVT);
421 unsigned ResultReg = createResultReg(RC);
422 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg, Reg,
424 assert(InsertedCopy && "Can't copy address registers!");
425 InsertedCopy = InsertedCopy;
427 // Cast the register to the type of the selector.
428 if (SrcVT.bitsGT(MVT::i32))
429 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32, ISD::TRUNCATE,
431 else if (SrcVT.bitsLT(MVT::i32))
432 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), MVT::i32,
433 ISD::SIGN_EXTEND, ResultReg);
435 // Unhandled operand. Halt "fast" selection and bail.
438 UpdateValueMap(I, ResultReg);
447 // An arbitrary call. Bail.
451 bool FastISel::SelectCast(const User *I, unsigned Opcode) {
452 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
453 EVT DstVT = TLI.getValueType(I->getType());
455 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
456 DstVT == MVT::Other || !DstVT.isSimple())
457 // Unhandled type. Halt "fast" selection and bail.
460 // Check if the destination type is legal. Or as a special case,
461 // it may be i1 if we're doing a truncate because that's
462 // easy and somewhat common.
463 if (!TLI.isTypeLegal(DstVT))
464 if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE)
465 // Unhandled type. Halt "fast" selection and bail.
468 // Check if the source operand is legal. Or as a special case,
469 // it may be i1 if we're doing zero-extension because that's
470 // easy and somewhat common.
471 if (!TLI.isTypeLegal(SrcVT))
472 if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND)
473 // Unhandled type. Halt "fast" selection and bail.
476 unsigned InputReg = getRegForValue(I->getOperand(0));
478 // Unhandled operand. Halt "fast" selection and bail.
481 // If the operand is i1, arrange for the high bits in the register to be zero.
482 if (SrcVT == MVT::i1) {
483 SrcVT = TLI.getTypeToTransformTo(I->getContext(), SrcVT);
484 InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg);
488 // If the result is i1, truncate to the target's type for i1 first.
489 if (DstVT == MVT::i1)
490 DstVT = TLI.getTypeToTransformTo(I->getContext(), DstVT);
492 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
499 UpdateValueMap(I, ResultReg);
503 bool FastISel::SelectBitCast(const User *I) {
504 // If the bitcast doesn't change the type, just use the operand value.
505 if (I->getType() == I->getOperand(0)->getType()) {
506 unsigned Reg = getRegForValue(I->getOperand(0));
509 UpdateValueMap(I, Reg);
513 // Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
514 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
515 EVT DstVT = TLI.getValueType(I->getType());
517 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
518 DstVT == MVT::Other || !DstVT.isSimple() ||
519 !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
520 // Unhandled type. Halt "fast" selection and bail.
523 unsigned Op0 = getRegForValue(I->getOperand(0));
525 // Unhandled operand. Halt "fast" selection and bail.
528 // First, try to perform the bitcast by inserting a reg-reg copy.
529 unsigned ResultReg = 0;
530 if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
531 TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
532 TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
533 ResultReg = createResultReg(DstClass);
535 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
536 Op0, DstClass, SrcClass);
541 // If the reg-reg copy failed, select a BIT_CONVERT opcode.
543 ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
544 ISD::BIT_CONVERT, Op0);
549 UpdateValueMap(I, ResultReg);
554 FastISel::SelectInstruction(const Instruction *I) {
555 DL = I->getDebugLoc();
557 // First, try doing target-independent selection.
558 if (SelectOperator(I, I->getOpcode())) {
563 // Next, try calling the target to attempt to handle the instruction.
564 if (TargetSelectInstruction(I)) {
573 /// FastEmitBranch - Emit an unconditional branch to the given block,
574 /// unless it is the immediate (fall-through) successor, and update
577 FastISel::FastEmitBranch(MachineBasicBlock *MSucc) {
578 if (MBB->isLayoutSuccessor(MSucc)) {
579 // The unconditional fall-through case, which needs no instructions.
581 // The unconditional branch case.
582 TII.InsertBranch(*MBB, MSucc, NULL, SmallVector<MachineOperand, 0>());
584 MBB->addSuccessor(MSucc);
587 /// SelectFNeg - Emit an FNeg operation.
590 FastISel::SelectFNeg(const User *I) {
591 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
592 if (OpReg == 0) return false;
594 // If the target has ISD::FNEG, use it.
595 EVT VT = TLI.getValueType(I->getType());
596 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
598 if (ResultReg != 0) {
599 UpdateValueMap(I, ResultReg);
603 // Bitcast the value to integer, twiddle the sign bit with xor,
604 // and then bitcast it back to floating-point.
605 if (VT.getSizeInBits() > 64) return false;
606 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
607 if (!TLI.isTypeLegal(IntVT))
610 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
611 ISD::BIT_CONVERT, OpReg);
615 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR, IntReg,
616 UINT64_C(1) << (VT.getSizeInBits()-1),
617 IntVT.getSimpleVT());
618 if (IntResultReg == 0)
621 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
622 ISD::BIT_CONVERT, IntResultReg);
626 UpdateValueMap(I, ResultReg);
631 FastISel::SelectOperator(const User *I, unsigned Opcode) {
633 case Instruction::Add:
634 return SelectBinaryOp(I, ISD::ADD);
635 case Instruction::FAdd:
636 return SelectBinaryOp(I, ISD::FADD);
637 case Instruction::Sub:
638 return SelectBinaryOp(I, ISD::SUB);
639 case Instruction::FSub:
640 // FNeg is currently represented in LLVM IR as a special case of FSub.
641 if (BinaryOperator::isFNeg(I))
642 return SelectFNeg(I);
643 return SelectBinaryOp(I, ISD::FSUB);
644 case Instruction::Mul:
645 return SelectBinaryOp(I, ISD::MUL);
646 case Instruction::FMul:
647 return SelectBinaryOp(I, ISD::FMUL);
648 case Instruction::SDiv:
649 return SelectBinaryOp(I, ISD::SDIV);
650 case Instruction::UDiv:
651 return SelectBinaryOp(I, ISD::UDIV);
652 case Instruction::FDiv:
653 return SelectBinaryOp(I, ISD::FDIV);
654 case Instruction::SRem:
655 return SelectBinaryOp(I, ISD::SREM);
656 case Instruction::URem:
657 return SelectBinaryOp(I, ISD::UREM);
658 case Instruction::FRem:
659 return SelectBinaryOp(I, ISD::FREM);
660 case Instruction::Shl:
661 return SelectBinaryOp(I, ISD::SHL);
662 case Instruction::LShr:
663 return SelectBinaryOp(I, ISD::SRL);
664 case Instruction::AShr:
665 return SelectBinaryOp(I, ISD::SRA);
666 case Instruction::And:
667 return SelectBinaryOp(I, ISD::AND);
668 case Instruction::Or:
669 return SelectBinaryOp(I, ISD::OR);
670 case Instruction::Xor:
671 return SelectBinaryOp(I, ISD::XOR);
673 case Instruction::GetElementPtr:
674 return SelectGetElementPtr(I);
676 case Instruction::Br: {
677 const BranchInst *BI = cast<BranchInst>(I);
679 if (BI->isUnconditional()) {
680 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
681 MachineBasicBlock *MSucc = MBBMap[LLVMSucc];
682 FastEmitBranch(MSucc);
686 // Conditional branches are not handed yet.
687 // Halt "fast" selection and bail.
691 case Instruction::Unreachable:
695 case Instruction::PHI:
696 // PHI nodes are already emitted.
699 case Instruction::Alloca:
700 // FunctionLowering has the static-sized case covered.
701 if (StaticAllocaMap.count(cast<AllocaInst>(I)))
704 // Dynamic-sized alloca is not handled yet.
707 case Instruction::Call:
708 return SelectCall(I);
710 case Instruction::BitCast:
711 return SelectBitCast(I);
713 case Instruction::FPToSI:
714 return SelectCast(I, ISD::FP_TO_SINT);
715 case Instruction::ZExt:
716 return SelectCast(I, ISD::ZERO_EXTEND);
717 case Instruction::SExt:
718 return SelectCast(I, ISD::SIGN_EXTEND);
719 case Instruction::Trunc:
720 return SelectCast(I, ISD::TRUNCATE);
721 case Instruction::SIToFP:
722 return SelectCast(I, ISD::SINT_TO_FP);
724 case Instruction::IntToPtr: // Deliberate fall-through.
725 case Instruction::PtrToInt: {
726 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
727 EVT DstVT = TLI.getValueType(I->getType());
728 if (DstVT.bitsGT(SrcVT))
729 return SelectCast(I, ISD::ZERO_EXTEND);
730 if (DstVT.bitsLT(SrcVT))
731 return SelectCast(I, ISD::TRUNCATE);
732 unsigned Reg = getRegForValue(I->getOperand(0));
733 if (Reg == 0) return false;
734 UpdateValueMap(I, Reg);
739 // Unhandled instruction. Halt "fast" selection and bail.
744 FastISel::FastISel(MachineFunction &mf,
745 DenseMap<const Value *, unsigned> &vm,
746 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
747 DenseMap<const AllocaInst *, int> &am
749 , SmallSet<const Instruction *, 8> &cil
760 MRI(MF.getRegInfo()),
761 MFI(*MF.getFrameInfo()),
762 MCP(*MF.getConstantPool()),
764 TD(*TM.getTargetData()),
765 TII(*TM.getInstrInfo()),
766 TLI(*TM.getTargetLowering()) {
769 FastISel::~FastISel() {}
771 unsigned FastISel::FastEmit_(MVT, MVT,
776 unsigned FastISel::FastEmit_r(MVT, MVT,
777 unsigned, unsigned /*Op0*/) {
781 unsigned FastISel::FastEmit_rr(MVT, MVT,
782 unsigned, unsigned /*Op0*/,
787 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
791 unsigned FastISel::FastEmit_f(MVT, MVT,
792 unsigned, const ConstantFP * /*FPImm*/) {
796 unsigned FastISel::FastEmit_ri(MVT, MVT,
797 unsigned, unsigned /*Op0*/,
802 unsigned FastISel::FastEmit_rf(MVT, MVT,
803 unsigned, unsigned /*Op0*/,
804 const ConstantFP * /*FPImm*/) {
808 unsigned FastISel::FastEmit_rri(MVT, MVT,
810 unsigned /*Op0*/, unsigned /*Op1*/,
815 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
816 /// to emit an instruction with an immediate operand using FastEmit_ri.
817 /// If that fails, it materializes the immediate into a register and try
818 /// FastEmit_rr instead.
819 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
820 unsigned Op0, uint64_t Imm,
822 // First check if immediate type is legal. If not, we can't use the ri form.
823 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm);
826 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
827 if (MaterialReg == 0)
829 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
832 /// FastEmit_rf_ - This method is a wrapper of FastEmit_ri. It first tries
833 /// to emit an instruction with a floating-point immediate operand using
834 /// FastEmit_rf. If that fails, it materializes the immediate into a register
835 /// and try FastEmit_rr instead.
836 unsigned FastISel::FastEmit_rf_(MVT VT, unsigned Opcode,
837 unsigned Op0, const ConstantFP *FPImm,
839 // First check if immediate type is legal. If not, we can't use the rf form.
840 unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm);
844 // Materialize the constant in a register.
845 unsigned MaterialReg = FastEmit_f(ImmType, ImmType, ISD::ConstantFP, FPImm);
846 if (MaterialReg == 0) {
847 // If the target doesn't have a way to directly enter a floating-point
848 // value into a register, use an alternate approach.
849 // TODO: The current approach only supports floating-point constants
850 // that can be constructed by conversion from integer values. This should
851 // be replaced by code that creates a load from a constant-pool entry,
852 // which will require some target-specific work.
853 const APFloat &Flt = FPImm->getValueAPF();
854 EVT IntVT = TLI.getPointerTy();
857 uint32_t IntBitWidth = IntVT.getSizeInBits();
859 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
860 APFloat::rmTowardZero, &isExact);
863 APInt IntVal(IntBitWidth, 2, x);
865 unsigned IntegerReg = FastEmit_i(IntVT.getSimpleVT(), IntVT.getSimpleVT(),
866 ISD::Constant, IntVal.getZExtValue());
869 MaterialReg = FastEmit_r(IntVT.getSimpleVT(), VT,
870 ISD::SINT_TO_FP, IntegerReg);
871 if (MaterialReg == 0)
874 return FastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
877 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
878 return MRI.createVirtualRegister(RC);
881 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
882 const TargetRegisterClass* RC) {
883 unsigned ResultReg = createResultReg(RC);
884 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
886 BuildMI(MBB, DL, II, ResultReg);
890 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
891 const TargetRegisterClass *RC,
893 unsigned ResultReg = createResultReg(RC);
894 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
896 if (II.getNumDefs() >= 1)
897 BuildMI(MBB, DL, II, ResultReg).addReg(Op0);
899 BuildMI(MBB, DL, II).addReg(Op0);
900 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
901 II.ImplicitDefs[0], RC, RC);
909 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
910 const TargetRegisterClass *RC,
911 unsigned Op0, unsigned Op1) {
912 unsigned ResultReg = createResultReg(RC);
913 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
915 if (II.getNumDefs() >= 1)
916 BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1);
918 BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1);
919 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
920 II.ImplicitDefs[0], RC, RC);
927 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
928 const TargetRegisterClass *RC,
929 unsigned Op0, uint64_t Imm) {
930 unsigned ResultReg = createResultReg(RC);
931 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
933 if (II.getNumDefs() >= 1)
934 BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Imm);
936 BuildMI(MBB, DL, II).addReg(Op0).addImm(Imm);
937 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
938 II.ImplicitDefs[0], RC, RC);
945 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
946 const TargetRegisterClass *RC,
947 unsigned Op0, const ConstantFP *FPImm) {
948 unsigned ResultReg = createResultReg(RC);
949 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
951 if (II.getNumDefs() >= 1)
952 BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addFPImm(FPImm);
954 BuildMI(MBB, DL, II).addReg(Op0).addFPImm(FPImm);
955 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
956 II.ImplicitDefs[0], RC, RC);
963 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
964 const TargetRegisterClass *RC,
965 unsigned Op0, unsigned Op1, uint64_t Imm) {
966 unsigned ResultReg = createResultReg(RC);
967 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
969 if (II.getNumDefs() >= 1)
970 BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addReg(Op1).addImm(Imm);
972 BuildMI(MBB, DL, II).addReg(Op0).addReg(Op1).addImm(Imm);
973 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
974 II.ImplicitDefs[0], RC, RC);
981 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
982 const TargetRegisterClass *RC,
984 unsigned ResultReg = createResultReg(RC);
985 const TargetInstrDesc &II = TII.get(MachineInstOpcode);
987 if (II.getNumDefs() >= 1)
988 BuildMI(MBB, DL, II, ResultReg).addImm(Imm);
990 BuildMI(MBB, DL, II).addImm(Imm);
991 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
992 II.ImplicitDefs[0], RC, RC);
999 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1000 unsigned Op0, uint32_t Idx) {
1001 const TargetRegisterClass* RC = MRI.getRegClass(Op0);
1003 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1004 const TargetInstrDesc &II = TII.get(TargetOpcode::EXTRACT_SUBREG);
1006 if (II.getNumDefs() >= 1)
1007 BuildMI(MBB, DL, II, ResultReg).addReg(Op0).addImm(Idx);
1009 BuildMI(MBB, DL, II).addReg(Op0).addImm(Idx);
1010 bool InsertedCopy = TII.copyRegToReg(*MBB, MBB->end(), ResultReg,
1011 II.ImplicitDefs[0], RC, RC);
1018 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1019 /// with all but the least significant bit set to zero.
1020 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op) {
1021 return FastEmit_ri(VT, VT, ISD::AND, Op, 1);