1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/CodeGen/Analysis.h"
43 #include "llvm/CodeGen/FastISel.h"
44 #include "llvm/ADT/Optional.h"
45 #include "llvm/ADT/Statistic.h"
46 #include "llvm/Analysis/BranchProbabilityInfo.h"
47 #include "llvm/Analysis/Loads.h"
48 #include "llvm/CodeGen/Analysis.h"
49 #include "llvm/CodeGen/FunctionLoweringInfo.h"
50 #include "llvm/CodeGen/MachineFrameInfo.h"
51 #include "llvm/CodeGen/MachineInstrBuilder.h"
52 #include "llvm/CodeGen/MachineModuleInfo.h"
53 #include "llvm/CodeGen/MachineRegisterInfo.h"
54 #include "llvm/CodeGen/StackMaps.h"
55 #include "llvm/IR/DataLayout.h"
56 #include "llvm/IR/DebugInfo.h"
57 #include "llvm/IR/Function.h"
58 #include "llvm/IR/GlobalVariable.h"
59 #include "llvm/IR/Instructions.h"
60 #include "llvm/IR/IntrinsicInst.h"
61 #include "llvm/IR/Operator.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/ErrorHandling.h"
64 #include "llvm/Target/TargetInstrInfo.h"
65 #include "llvm/Target/TargetLibraryInfo.h"
66 #include "llvm/Target/TargetLowering.h"
67 #include "llvm/Target/TargetMachine.h"
70 #define DEBUG_TYPE "isel"
72 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
73 "target-independent selector");
74 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
75 "target-specific selector");
76 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
78 /// \brief Set CallLoweringInfo attribute flags based on a call instruction
79 /// and called function attributes.
80 void FastISel::ArgListEntry::setAttributes(ImmutableCallSite *CS,
82 isSExt = CS->paramHasAttr(AttrIdx, Attribute::SExt);
83 isZExt = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
84 isInReg = CS->paramHasAttr(AttrIdx, Attribute::InReg);
85 isSRet = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
86 isNest = CS->paramHasAttr(AttrIdx, Attribute::Nest);
87 isByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
88 isInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
89 isReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
90 Alignment = CS->getParamAlignment(AttrIdx);
93 /// startNewBlock - Set the current block to which generated machine
94 /// instructions will be appended, and clear the local CSE map.
96 void FastISel::startNewBlock() {
97 LocalValueMap.clear();
99 // Instructions are appended to FuncInfo.MBB. If the basic block already
100 // contains labels or copies, use the last instruction as the last local
102 EmitStartPt = nullptr;
103 if (!FuncInfo.MBB->empty())
104 EmitStartPt = &FuncInfo.MBB->back();
105 LastLocalValue = EmitStartPt;
108 bool FastISel::LowerArguments() {
109 if (!FuncInfo.CanLowerReturn)
110 // Fallback to SDISel argument lowering code to deal with sret pointer
114 if (!FastLowerArguments())
117 // Enter arguments into ValueMap for uses in non-entry BBs.
118 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
119 E = FuncInfo.Fn->arg_end(); I != E; ++I) {
120 DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(I);
121 assert(VI != LocalValueMap.end() && "Missed an argument?");
122 FuncInfo.ValueMap[I] = VI->second;
127 void FastISel::flushLocalValueMap() {
128 LocalValueMap.clear();
129 LastLocalValue = EmitStartPt;
133 bool FastISel::hasTrivialKill(const Value *V) const {
134 // Don't consider constants or arguments to have trivial kills.
135 const Instruction *I = dyn_cast<Instruction>(V);
139 // No-op casts are trivially coalesced by fast-isel.
140 if (const CastInst *Cast = dyn_cast<CastInst>(I))
141 if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) &&
142 !hasTrivialKill(Cast->getOperand(0)))
145 // GEPs with all zero indices are trivially coalesced by fast-isel.
146 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I))
147 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
150 // Only instructions with a single use in the same basic block are considered
151 // to have trivial kills.
152 return I->hasOneUse() &&
153 !(I->getOpcode() == Instruction::BitCast ||
154 I->getOpcode() == Instruction::PtrToInt ||
155 I->getOpcode() == Instruction::IntToPtr) &&
156 cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
159 unsigned FastISel::getRegForValue(const Value *V) {
160 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
161 // Don't handle non-simple values in FastISel.
162 if (!RealVT.isSimple())
165 // Ignore illegal types. We must do this before looking up the value
166 // in ValueMap because Arguments are given virtual registers regardless
167 // of whether FastISel can handle them.
168 MVT VT = RealVT.getSimpleVT();
169 if (!TLI.isTypeLegal(VT)) {
170 // Handle integer promotions, though, because they're common and easy.
171 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
172 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
177 // Look up the value to see if we already have a register for it.
178 unsigned Reg = lookUpRegForValue(V);
182 // In bottom-up mode, just create the virtual register which will be used
183 // to hold the value. It will be materialized later.
184 if (isa<Instruction>(V) &&
185 (!isa<AllocaInst>(V) ||
186 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
187 return FuncInfo.InitializeRegForValue(V);
189 SavePoint SaveInsertPt = enterLocalValueArea();
191 // Materialize the value in a register. Emit any instructions in the
193 Reg = materializeRegForValue(V, VT);
195 leaveLocalValueArea(SaveInsertPt);
200 /// materializeRegForValue - Helper for getRegForValue. This function is
201 /// called when the value isn't already available in a register and must
202 /// be materialized with new instructions.
203 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
206 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
207 if (CI->getValue().getActiveBits() <= 64)
208 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
209 } else if (isa<AllocaInst>(V)) {
210 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
211 } else if (isa<ConstantPointerNull>(V)) {
212 // Translate this as an integer zero so that it can be
213 // local-CSE'd with actual integer zeros.
215 getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getContext())));
216 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
217 if (CF->isNullValue()) {
218 Reg = TargetMaterializeFloatZero(CF);
220 // Try to emit the constant directly.
221 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
225 // Try to emit the constant by using an integer constant with a cast.
226 const APFloat &Flt = CF->getValueAPF();
227 EVT IntVT = TLI.getPointerTy();
230 uint32_t IntBitWidth = IntVT.getSizeInBits();
232 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
233 APFloat::rmTowardZero, &isExact);
235 APInt IntVal(IntBitWidth, x);
237 unsigned IntegerReg =
238 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
240 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
241 IntegerReg, /*Kill=*/false);
244 } else if (const Operator *Op = dyn_cast<Operator>(V)) {
245 if (!SelectOperator(Op, Op->getOpcode()))
246 if (!isa<Instruction>(Op) ||
247 !TargetSelectInstruction(cast<Instruction>(Op)))
249 Reg = lookUpRegForValue(Op);
250 } else if (isa<UndefValue>(V)) {
251 Reg = createResultReg(TLI.getRegClassFor(VT));
252 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
253 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
256 // If target-independent code couldn't handle the value, give target-specific
258 if (!Reg && isa<Constant>(V))
259 Reg = TargetMaterializeConstant(cast<Constant>(V));
261 // Don't cache constant materializations in the general ValueMap.
262 // To do so would require tracking what uses they dominate.
264 LocalValueMap[V] = Reg;
265 LastLocalValue = MRI.getVRegDef(Reg);
270 unsigned FastISel::lookUpRegForValue(const Value *V) {
271 // Look up the value to see if we already have a register for it. We
272 // cache values defined by Instructions across blocks, and other values
273 // only locally. This is because Instructions already have the SSA
274 // def-dominates-use requirement enforced.
275 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
276 if (I != FuncInfo.ValueMap.end())
278 return LocalValueMap[V];
281 /// UpdateValueMap - Update the value map to include the new mapping for this
282 /// instruction, or insert an extra copy to get the result in a previous
283 /// determined register.
284 /// NOTE: This is only necessary because we might select a block that uses
285 /// a value before we select the block that defines the value. It might be
286 /// possible to fix this by selecting blocks in reverse postorder.
287 void FastISel::UpdateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
288 if (!isa<Instruction>(I)) {
289 LocalValueMap[I] = Reg;
293 unsigned &AssignedReg = FuncInfo.ValueMap[I];
294 if (AssignedReg == 0)
295 // Use the new register.
297 else if (Reg != AssignedReg) {
298 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
299 for (unsigned i = 0; i < NumRegs; i++)
300 FuncInfo.RegFixups[AssignedReg+i] = Reg+i;
306 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
307 unsigned IdxN = getRegForValue(Idx);
309 // Unhandled operand. Halt "fast" selection and bail.
310 return std::pair<unsigned, bool>(0, false);
312 bool IdxNIsKill = hasTrivialKill(Idx);
314 // If the index is smaller or larger than intptr_t, truncate or extend it.
315 MVT PtrVT = TLI.getPointerTy();
316 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
317 if (IdxVT.bitsLT(PtrVT)) {
318 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
322 else if (IdxVT.bitsGT(PtrVT)) {
323 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
327 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
330 void FastISel::recomputeInsertPt() {
331 if (getLastLocalValue()) {
332 FuncInfo.InsertPt = getLastLocalValue();
333 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
336 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
338 // Now skip past any EH_LABELs, which must remain at the beginning.
339 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
340 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
344 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
345 MachineBasicBlock::iterator E) {
346 assert (I && E && std::distance(I, E) > 0 && "Invalid iterator!");
348 MachineInstr *Dead = &*I;
350 Dead->eraseFromParent();
356 FastISel::SavePoint FastISel::enterLocalValueArea() {
357 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
358 DebugLoc OldDL = DbgLoc;
361 SavePoint SP = { OldInsertPt, OldDL };
365 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
366 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
367 LastLocalValue = std::prev(FuncInfo.InsertPt);
369 // Restore the previous insert position.
370 FuncInfo.InsertPt = OldInsertPt.InsertPt;
371 DbgLoc = OldInsertPt.DL;
374 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
375 /// which has an opcode which directly corresponds to the given ISD opcode.
377 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
378 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
379 if (VT == MVT::Other || !VT.isSimple())
380 // Unhandled type. Halt "fast" selection and bail.
383 // We only handle legal types. For example, on x86-32 the instruction
384 // selector contains all of the 64-bit instructions from x86-64,
385 // under the assumption that i64 won't be used if the target doesn't
387 if (!TLI.isTypeLegal(VT)) {
388 // MVT::i1 is special. Allow AND, OR, or XOR because they
389 // don't require additional zeroing, which makes them easy.
391 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
392 ISDOpcode == ISD::XOR))
393 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
398 // Check if the first operand is a constant, and handle it as "ri". At -O0,
399 // we don't have anything that canonicalizes operand order.
400 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
401 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
402 unsigned Op1 = getRegForValue(I->getOperand(1));
403 if (Op1 == 0) return false;
405 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
407 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1,
408 Op1IsKill, CI->getZExtValue(),
410 if (ResultReg == 0) return false;
412 // We successfully emitted code for the given LLVM Instruction.
413 UpdateValueMap(I, ResultReg);
418 unsigned Op0 = getRegForValue(I->getOperand(0));
419 if (Op0 == 0) // Unhandled operand. Halt "fast" selection and bail.
422 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
424 // Check if the second operand is a constant and handle it appropriately.
425 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
426 uint64_t Imm = CI->getZExtValue();
428 // Transform "sdiv exact X, 8" -> "sra X, 3".
429 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
430 cast<BinaryOperator>(I)->isExact() &&
431 isPowerOf2_64(Imm)) {
433 ISDOpcode = ISD::SRA;
436 // Transform "urem x, pow2" -> "and x, pow2-1".
437 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
438 isPowerOf2_64(Imm)) {
440 ISDOpcode = ISD::AND;
443 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
444 Op0IsKill, Imm, VT.getSimpleVT());
445 if (ResultReg == 0) return false;
447 // We successfully emitted code for the given LLVM Instruction.
448 UpdateValueMap(I, ResultReg);
452 // Check if the second operand is a constant float.
453 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
454 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
455 ISDOpcode, Op0, Op0IsKill, CF);
456 if (ResultReg != 0) {
457 // We successfully emitted code for the given LLVM Instruction.
458 UpdateValueMap(I, ResultReg);
463 unsigned Op1 = getRegForValue(I->getOperand(1));
465 // Unhandled operand. Halt "fast" selection and bail.
468 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
470 // Now we have both operands in registers. Emit the instruction.
471 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
476 // Target-specific code wasn't able to find a machine opcode for
477 // the given ISD opcode and type. Halt "fast" selection and bail.
480 // We successfully emitted code for the given LLVM Instruction.
481 UpdateValueMap(I, ResultReg);
485 bool FastISel::SelectGetElementPtr(const User *I) {
486 unsigned N = getRegForValue(I->getOperand(0));
488 // Unhandled operand. Halt "fast" selection and bail.
491 bool NIsKill = hasTrivialKill(I->getOperand(0));
493 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
494 // into a single N = N + TotalOffset.
495 uint64_t TotalOffs = 0;
496 // FIXME: What's a good SWAG number for MaxOffs?
497 uint64_t MaxOffs = 2048;
498 Type *Ty = I->getOperand(0)->getType();
499 MVT VT = TLI.getPointerTy();
500 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
501 E = I->op_end(); OI != E; ++OI) {
502 const Value *Idx = *OI;
503 if (StructType *StTy = dyn_cast<StructType>(Ty)) {
504 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
507 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
508 if (TotalOffs >= MaxOffs) {
509 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
511 // Unhandled operand. Halt "fast" selection and bail.
517 Ty = StTy->getElementType(Field);
519 Ty = cast<SequentialType>(Ty)->getElementType();
521 // If this is a constant subscript, handle it quickly.
522 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
523 if (CI->isZero()) continue;
526 DL.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
527 if (TotalOffs >= MaxOffs) {
528 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
530 // Unhandled operand. Halt "fast" selection and bail.
538 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
540 // Unhandled operand. Halt "fast" selection and bail.
546 // N = N + Idx * ElementSize;
547 uint64_t ElementSize = DL.getTypeAllocSize(Ty);
548 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
549 unsigned IdxN = Pair.first;
550 bool IdxNIsKill = Pair.second;
552 // Unhandled operand. Halt "fast" selection and bail.
555 if (ElementSize != 1) {
556 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
558 // Unhandled operand. Halt "fast" selection and bail.
562 N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
564 // Unhandled operand. Halt "fast" selection and bail.
569 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
571 // Unhandled operand. Halt "fast" selection and bail.
575 // We successfully emitted code for the given LLVM Instruction.
576 UpdateValueMap(I, N);
580 /// \brief Add a stackmap or patchpoint intrinsic call's live variable operands
581 /// to a stackmap or patchpoint machine instruction.
582 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
583 const CallInst *CI, unsigned StartIdx) {
584 for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
585 Value *Val = CI->getArgOperand(i);
586 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
587 if (auto *C = dyn_cast<ConstantInt>(Val)) {
588 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
589 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
590 } else if (isa<ConstantPointerNull>(Val)) {
591 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
592 Ops.push_back(MachineOperand::CreateImm(0));
593 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
594 // Values coming from a stack location also require a sepcial encoding,
595 // but that is added later on by the target specific frame index
596 // elimination implementation.
597 auto SI = FuncInfo.StaticAllocaMap.find(AI);
598 if (SI != FuncInfo.StaticAllocaMap.end())
599 Ops.push_back(MachineOperand::CreateFI(SI->second));
603 unsigned Reg = getRegForValue(Val);
606 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
613 bool FastISel::SelectStackmap(const CallInst *I) {
614 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
615 // [live variables...])
616 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
617 "Stackmap cannot return a value.");
619 // The stackmap intrinsic only records the live variables (the arguments
620 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
621 // intrinsic, this won't be lowered to a function call. This means we don't
622 // have to worry about calling conventions and target-specific lowering code.
623 // Instead we perform the call lowering right here.
626 // STACKMAP(id, nbytes, ...)
629 SmallVector<MachineOperand, 32> Ops;
631 // Add the <id> and <numBytes> constants.
632 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
633 "Expected a constant integer.");
634 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
635 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
637 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
638 "Expected a constant integer.");
639 const auto *NumBytes =
640 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
641 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
643 // Push live variables for the stack map (skipping the first two arguments
644 // <id> and <numBytes>).
645 if (!addStackMapLiveVars(Ops, I, 2))
648 // We are not adding any register mask info here, because the stackmap doesn't
651 // Add scratch registers as implicit def and early clobber.
652 CallingConv::ID CC = I->getCallingConv();
653 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
654 for (unsigned i = 0; ScratchRegs[i]; ++i)
655 Ops.push_back(MachineOperand::CreateReg(
656 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
657 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
659 // Issue CALLSEQ_START
660 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
661 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
665 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
666 TII.get(TargetOpcode::STACKMAP));
667 for (auto const &MO : Ops)
671 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
672 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
673 .addImm(0).addImm(0);
675 // Inform the Frame Information that we have a stackmap in this function.
676 FuncInfo.MF->getFrameInfo()->setHasStackMap();
681 /// \brief Lower an argument list according to the target calling convention.
683 /// This is a helper for lowering intrinsics that follow a target calling
684 /// convention or require stack pointer adjustment. Only a subset of the
685 /// intrinsic's operands need to participate in the calling convention.
686 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
687 unsigned NumArgs, const Value *Callee,
688 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
690 Args.reserve(NumArgs);
692 // Populate the argument list.
693 // Attributes for args start at offset 1, after the return attribute.
694 ImmutableCallSite CS(CI);
695 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1;
696 ArgI != ArgE; ++ArgI) {
697 Value *V = CI->getOperand(ArgI);
699 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
703 Entry.Ty = V->getType();
704 Entry.setAttributes(&CS, AttrI);
705 Args.push_back(Entry);
708 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
710 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
712 return LowerCallTo(CLI);
715 bool FastISel::SelectPatchpoint(const CallInst *I) {
716 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
721 // [live variables...])
722 CallingConv::ID CC = I->getCallingConv();
723 bool IsAnyRegCC = CC == CallingConv::AnyReg;
724 bool HasDef = !I->getType()->isVoidTy();
725 Value *Callee = I->getOperand(PatchPointOpers::TargetPos);
727 // Get the real number of arguments participating in the call <numArgs>
728 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
729 "Expected a constant integer.");
730 const auto *NumArgsVal =
731 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
732 unsigned NumArgs = NumArgsVal->getZExtValue();
734 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
735 // This includes all meta-operands up to but not including CC.
736 unsigned NumMetaOpers = PatchPointOpers::CCPos;
737 assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
738 "Not enough arguments provided to the patchpoint intrinsic");
740 // For AnyRegCC the arguments are lowered later on manually.
741 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
742 CallLoweringInfo CLI;
743 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
746 assert(CLI.Call && "No call instruction specified.");
748 SmallVector<MachineOperand, 32> Ops;
750 // Add an explicit result reg if we use the anyreg calling convention.
751 if (IsAnyRegCC && HasDef) {
752 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
753 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
754 CLI.NumResultRegs = 1;
755 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
758 // Add the <id> and <numBytes> constants.
759 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
760 "Expected a constant integer.");
761 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
762 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
764 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
765 "Expected a constant integer.");
766 const auto *NumBytes =
767 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
768 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
770 // Assume that the callee is a constant address or null pointer.
771 // FIXME: handle function symbols in the future.
773 if (const auto *C = dyn_cast<IntToPtrInst>(Callee))
774 CalleeAddr = cast<ConstantInt>(C->getOperand(0))->getZExtValue();
775 else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
776 if (C->getOpcode() == Instruction::IntToPtr)
777 CalleeAddr = cast<ConstantInt>(C->getOperand(0))->getZExtValue();
779 llvm_unreachable("Unsupported ConstantExpr.");
780 } else if (isa<ConstantPointerNull>(Callee))
783 llvm_unreachable("Unsupported callee address.");
785 Ops.push_back(MachineOperand::CreateImm(CalleeAddr));
787 // Adjust <numArgs> to account for any arguments that have been passed on
788 // the stack instead.
789 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
790 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
792 // Add the calling convention
793 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
795 // Add the arguments we omitted previously. The register allocator should
796 // place these in any free register.
798 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
799 unsigned Reg = getRegForValue(I->getArgOperand(i));
802 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
806 // Push the arguments from the call instruction.
807 for (auto Reg : CLI.OutRegs)
808 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
810 // Push live variables for the stack map.
811 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
814 // Push the register mask info.
815 Ops.push_back(MachineOperand::CreateRegMask(TRI.getCallPreservedMask(CC)));
817 // Add scratch registers as implicit def and early clobber.
818 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
819 for (unsigned i = 0; ScratchRegs[i]; ++i)
820 Ops.push_back(MachineOperand::CreateReg(
821 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
822 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
824 // Add implicit defs (return values).
825 for (auto Reg : CLI.InRegs)
826 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
829 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
830 TII.get(TargetOpcode::PATCHPOINT));
835 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
837 // Delete the original call instruction.
838 CLI.Call->eraseFromParent();
840 // Inform the Frame Information that we have a patchpoint in this function.
841 FuncInfo.MF->getFrameInfo()->setHasPatchPoint();
843 if (CLI.NumResultRegs)
844 UpdateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
848 /// Returns an AttributeSet representing the attributes applied to the return
849 /// value of the given call.
850 static AttributeSet getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
851 SmallVector<Attribute::AttrKind, 2> Attrs;
853 Attrs.push_back(Attribute::SExt);
855 Attrs.push_back(Attribute::ZExt);
857 Attrs.push_back(Attribute::InReg);
859 return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex,
863 bool FastISel::LowerCallTo(const CallInst *CI, const char *SymName,
865 ImmutableCallSite CS(CI);
867 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
868 FunctionType *FTy = cast<FunctionType>(PT->getElementType());
869 Type *RetTy = FTy->getReturnType();
872 Args.reserve(NumArgs);
874 // Populate the argument list.
875 // Attributes for args start at offset 1, after the return attribute.
876 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
877 Value *V = CI->getOperand(ArgI);
879 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
883 Entry.Ty = V->getType();
884 Entry.setAttributes(&CS, ArgI + 1);
885 Args.push_back(Entry);
888 CallLoweringInfo CLI;
889 CLI.setCallee(RetTy, FTy, SymName, std::move(Args), CS, NumArgs);
891 return LowerCallTo(CLI);
894 bool FastISel::LowerCallTo(CallLoweringInfo &CLI) {
895 // Handle the incoming return values from the call.
897 SmallVector<EVT, 4> RetTys;
898 ComputeValueVTs(TLI, CLI.RetTy, RetTys);
900 SmallVector<ISD::OutputArg, 4> Outs;
901 GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI);
903 bool CanLowerReturn = TLI.CanLowerReturn(CLI.CallConv, *FuncInfo.MF,
905 CLI.RetTy->getContext());
907 // FIXME: sret demotion isn't supported yet - bail out.
911 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
913 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
914 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
915 for (unsigned i = 0; i != NumRegs; ++i) {
916 ISD::InputArg MyFlags;
917 MyFlags.VT = RegisterVT;
919 MyFlags.Used = CLI.IsReturnValueUsed;
921 MyFlags.Flags.setSExt();
923 MyFlags.Flags.setZExt();
925 MyFlags.Flags.setInReg();
926 CLI.Ins.push_back(MyFlags);
930 // Handle all of the outgoing arguments.
932 for (auto &Arg : CLI.getArgs()) {
933 Type *FinalType = Arg.Ty;
935 FinalType = cast<PointerType>(Arg.Ty)->getElementType();
936 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
937 FinalType, CLI.CallConv, CLI.IsVarArg);
939 ISD::ArgFlagsTy Flags;
950 if (Arg.isInAlloca) {
952 // Set the byval flag for CCAssignFn callbacks that don't know about
953 // inalloca. This way we can know how many bytes we should've allocated
954 // and how many bytes a callee cleanup function will pop. If we port
955 // inalloca to more targets, we'll have to add custom inalloca handling in
956 // the various CC lowering callbacks.
959 if (Arg.isByVal || Arg.isInAlloca) {
960 PointerType *Ty = cast<PointerType>(Arg.Ty);
961 Type *ElementTy = Ty->getElementType();
962 unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
963 // For ByVal, alignment should come from FE. BE will guess if this info is
964 // not there, but there are cases it cannot get right.
965 unsigned FrameAlign = Arg.Alignment;
967 FrameAlign = TLI.getByValTypeAlignment(ElementTy);
968 Flags.setByValSize(FrameSize);
969 Flags.setByValAlign(FrameAlign);
974 Flags.setInConsecutiveRegs();
975 unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
976 Flags.setOrigAlign(OriginalAlignment);
978 CLI.OutVals.push_back(Arg.Val);
979 CLI.OutFlags.push_back(Flags);
982 if (!FastLowerCall(CLI))
985 // Set all unused physreg defs as dead.
986 assert(CLI.Call && "No call instruction specified.");
987 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
989 if (CLI.NumResultRegs && CLI.CS)
990 UpdateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs);
995 bool FastISel::LowerCall(const CallInst *CI) {
996 ImmutableCallSite CS(CI);
998 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
999 FunctionType *FuncTy = cast<FunctionType>(PT->getElementType());
1000 Type *RetTy = FuncTy->getReturnType();
1004 Args.reserve(CS.arg_size());
1006 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1011 if (V->getType()->isEmptyTy())
1015 Entry.Ty = V->getType();
1017 // Skip the first return-type Attribute to get to params.
1018 Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
1019 Args.push_back(Entry);
1022 // Check if target-independent constraints permit a tail call here.
1023 // Target-dependent constraints are checked within FastLowerCall.
1024 bool IsTailCall = CI->isTailCall();
1025 if (IsTailCall && !isInTailCallPosition(CS, TM, TLI))
1028 CallLoweringInfo CLI;
1029 CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
1030 .setTailCall(IsTailCall);
1032 return LowerCallTo(CLI);
1035 bool FastISel::SelectCall(const User *I) {
1036 const CallInst *Call = cast<CallInst>(I);
1038 // Handle simple inline asms.
1039 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
1040 // Don't attempt to handle constraints.
1041 if (!IA->getConstraintString().empty())
1044 unsigned ExtraInfo = 0;
1045 if (IA->hasSideEffects())
1046 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1047 if (IA->isAlignStack())
1048 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1050 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1051 TII.get(TargetOpcode::INLINEASM))
1052 .addExternalSymbol(IA->getAsmString().c_str())
1057 MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
1058 ComputeUsesVAFloatArgument(*Call, &MMI);
1060 // Handle intrinsic function calls.
1061 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1062 return SelectIntrinsicCall(II);
1064 // Usually, it does not make sense to initialize a value,
1065 // make an unrelated function call and use the value, because
1066 // it tends to be spilled on the stack. So, we move the pointer
1067 // to the last local value to the beginning of the block, so that
1068 // all the values which have already been materialized,
1069 // appear after the call. It also makes sense to skip intrinsics
1070 // since they tend to be inlined.
1071 flushLocalValueMap();
1073 return LowerCall(Call);
1076 bool FastISel::SelectIntrinsicCall(const IntrinsicInst *II) {
1077 switch (II->getIntrinsicID()) {
1079 // At -O0 we don't care about the lifetime intrinsics.
1080 case Intrinsic::lifetime_start:
1081 case Intrinsic::lifetime_end:
1082 // The donothing intrinsic does, well, nothing.
1083 case Intrinsic::donothing:
1085 case Intrinsic::dbg_declare: {
1086 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1087 DIVariable DIVar(DI->getVariable());
1088 assert((!DIVar || DIVar.isVariable()) &&
1089 "Variable in DbgDeclareInst should be either null or a DIVariable.");
1090 if (!DIVar || !FuncInfo.MF->getMMI().hasDebugInfo()) {
1091 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1095 const Value *Address = DI->getAddress();
1096 if (!Address || isa<UndefValue>(Address)) {
1097 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1101 unsigned Offset = 0;
1102 Optional<MachineOperand> Op;
1103 if (const Argument *Arg = dyn_cast<Argument>(Address))
1104 // Some arguments' frame index is recorded during argument lowering.
1105 Offset = FuncInfo.getArgumentFrameIndex(Arg);
1107 Op = MachineOperand::CreateFI(Offset);
1109 if (unsigned Reg = lookUpRegForValue(Address))
1110 Op = MachineOperand::CreateReg(Reg, false);
1112 // If we have a VLA that has a "use" in a metadata node that's then used
1113 // here but it has no other uses, then we have a problem. E.g.,
1115 // int foo (const int *x) {
1120 // If we assign 'a' a vreg and fast isel later on has to use the selection
1121 // DAG isel, it will want to copy the value to the vreg. However, there are
1122 // no uses, which goes counter to what selection DAG isel expects.
1123 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1124 (!isa<AllocaInst>(Address) ||
1125 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1126 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1131 Op->setIsDebug(true);
1132 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1133 TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0,
1136 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1137 TII.get(TargetOpcode::DBG_VALUE))
1140 .addMetadata(DI->getVariable());
1142 // We can't yet handle anything else here because it would require
1143 // generating code, thus altering codegen because of debug info.
1144 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1148 case Intrinsic::dbg_value: {
1149 // This form of DBG_VALUE is target-independent.
1150 const DbgValueInst *DI = cast<DbgValueInst>(II);
1151 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1152 const Value *V = DI->getValue();
1154 // Currently the optimizer can produce this; insert an undef to
1155 // help debugging. Probably the optimizer should not do this.
1156 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1157 .addReg(0U).addImm(DI->getOffset())
1158 .addMetadata(DI->getVariable());
1159 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
1160 if (CI->getBitWidth() > 64)
1161 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1162 .addCImm(CI).addImm(DI->getOffset())
1163 .addMetadata(DI->getVariable());
1165 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1166 .addImm(CI->getZExtValue()).addImm(DI->getOffset())
1167 .addMetadata(DI->getVariable());
1168 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
1169 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1170 .addFPImm(CF).addImm(DI->getOffset())
1171 .addMetadata(DI->getVariable());
1172 } else if (unsigned Reg = lookUpRegForValue(V)) {
1173 // FIXME: This does not handle register-indirect values at offset 0.
1174 bool IsIndirect = DI->getOffset() != 0;
1175 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect,
1176 Reg, DI->getOffset(), DI->getVariable());
1178 // We can't yet handle anything else here because it would require
1179 // generating code, thus altering codegen because of debug info.
1180 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1184 case Intrinsic::objectsize: {
1185 ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1186 unsigned long long Res = CI->isZero() ? -1ULL : 0;
1187 Constant *ResCI = ConstantInt::get(II->getType(), Res);
1188 unsigned ResultReg = getRegForValue(ResCI);
1191 UpdateValueMap(II, ResultReg);
1194 case Intrinsic::expect: {
1195 unsigned ResultReg = getRegForValue(II->getArgOperand(0));
1198 UpdateValueMap(II, ResultReg);
1201 case Intrinsic::experimental_stackmap:
1202 return SelectStackmap(II);
1203 case Intrinsic::experimental_patchpoint_void:
1204 case Intrinsic::experimental_patchpoint_i64:
1205 return SelectPatchpoint(II);
1208 return FastLowerIntrinsicCall(II);
1211 bool FastISel::SelectCast(const User *I, unsigned Opcode) {
1212 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
1213 EVT DstVT = TLI.getValueType(I->getType());
1215 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
1216 DstVT == MVT::Other || !DstVT.isSimple())
1217 // Unhandled type. Halt "fast" selection and bail.
1220 // Check if the destination type is legal.
1221 if (!TLI.isTypeLegal(DstVT))
1224 // Check if the source operand is legal.
1225 if (!TLI.isTypeLegal(SrcVT))
1228 unsigned InputReg = getRegForValue(I->getOperand(0));
1230 // Unhandled operand. Halt "fast" selection and bail.
1233 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1235 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
1236 DstVT.getSimpleVT(),
1238 InputReg, InputRegIsKill);
1242 UpdateValueMap(I, ResultReg);
1246 bool FastISel::SelectBitCast(const User *I) {
1247 // If the bitcast doesn't change the type, just use the operand value.
1248 if (I->getType() == I->getOperand(0)->getType()) {
1249 unsigned Reg = getRegForValue(I->getOperand(0));
1252 UpdateValueMap(I, Reg);
1256 // Bitcasts of other values become reg-reg copies or BITCAST operators.
1257 EVT SrcEVT = TLI.getValueType(I->getOperand(0)->getType());
1258 EVT DstEVT = TLI.getValueType(I->getType());
1259 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1260 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1261 // Unhandled type. Halt "fast" selection and bail.
1264 MVT SrcVT = SrcEVT.getSimpleVT();
1265 MVT DstVT = DstEVT.getSimpleVT();
1266 unsigned Op0 = getRegForValue(I->getOperand(0));
1268 // Unhandled operand. Halt "fast" selection and bail.
1271 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1273 // First, try to perform the bitcast by inserting a reg-reg copy.
1274 unsigned ResultReg = 0;
1275 if (SrcVT == DstVT) {
1276 const TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
1277 const TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
1278 // Don't attempt a cross-class copy. It will likely fail.
1279 if (SrcClass == DstClass) {
1280 ResultReg = createResultReg(DstClass);
1281 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1282 TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1286 // If the reg-reg copy failed, select a BITCAST opcode.
1288 ResultReg = FastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1293 UpdateValueMap(I, ResultReg);
1298 FastISel::SelectInstruction(const Instruction *I) {
1299 // Just before the terminator instruction, insert instructions to
1300 // feed PHI nodes in successor blocks.
1301 if (isa<TerminatorInst>(I))
1302 if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
1305 DbgLoc = I->getDebugLoc();
1307 MachineBasicBlock::iterator SavedInsertPt = FuncInfo.InsertPt;
1309 if (const CallInst *Call = dyn_cast<CallInst>(I)) {
1310 const Function *F = Call->getCalledFunction();
1313 // As a special case, don't handle calls to builtin library functions that
1314 // may be translated directly to target instructions.
1315 if (F && !F->hasLocalLinkage() && F->hasName() &&
1316 LibInfo->getLibFunc(F->getName(), Func) &&
1317 LibInfo->hasOptimizedCodeGen(Func))
1320 // Don't handle Intrinsic::trap if a trap funciton is specified.
1321 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1322 !TM.Options.getTrapFunctionName().empty())
1326 // First, try doing target-independent selection.
1327 if (SelectOperator(I, I->getOpcode())) {
1328 ++NumFastIselSuccessIndependent;
1329 DbgLoc = DebugLoc();
1332 // Remove dead code. However, ignore call instructions since we've flushed
1333 // the local value map and recomputed the insert point.
1334 if (!isa<CallInst>(I)) {
1335 recomputeInsertPt();
1336 if (SavedInsertPt != FuncInfo.InsertPt)
1337 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1340 // Next, try calling the target to attempt to handle the instruction.
1341 SavedInsertPt = FuncInfo.InsertPt;
1342 if (TargetSelectInstruction(I)) {
1343 ++NumFastIselSuccessTarget;
1344 DbgLoc = DebugLoc();
1347 // Check for dead code and remove as necessary.
1348 recomputeInsertPt();
1349 if (SavedInsertPt != FuncInfo.InsertPt)
1350 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1352 DbgLoc = DebugLoc();
1356 /// FastEmitBranch - Emit an unconditional branch to the given block,
1357 /// unless it is the immediate (fall-through) successor, and update
1360 FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DbgLoc) {
1361 if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
1362 FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1363 // For more accurate line information if this is the only instruction
1364 // in the block then emit it, otherwise we have the unconditional
1365 // fall-through case, which needs no instructions.
1367 // The unconditional branch case.
1368 TII.InsertBranch(*FuncInfo.MBB, MSucc, nullptr,
1369 SmallVector<MachineOperand, 0>(), DbgLoc);
1371 uint32_t BranchWeight = 0;
1373 BranchWeight = FuncInfo.BPI->getEdgeWeight(FuncInfo.MBB->getBasicBlock(),
1374 MSucc->getBasicBlock());
1375 FuncInfo.MBB->addSuccessor(MSucc, BranchWeight);
1378 /// SelectFNeg - Emit an FNeg operation.
1381 FastISel::SelectFNeg(const User *I) {
1382 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
1383 if (OpReg == 0) return false;
1385 bool OpRegIsKill = hasTrivialKill(I);
1387 // If the target has ISD::FNEG, use it.
1388 EVT VT = TLI.getValueType(I->getType());
1389 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
1390 ISD::FNEG, OpReg, OpRegIsKill);
1391 if (ResultReg != 0) {
1392 UpdateValueMap(I, ResultReg);
1396 // Bitcast the value to integer, twiddle the sign bit with xor,
1397 // and then bitcast it back to floating-point.
1398 if (VT.getSizeInBits() > 64) return false;
1399 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1400 if (!TLI.isTypeLegal(IntVT))
1403 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1404 ISD::BITCAST, OpReg, OpRegIsKill);
1408 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
1409 IntReg, /*Kill=*/true,
1410 UINT64_C(1) << (VT.getSizeInBits()-1),
1411 IntVT.getSimpleVT());
1412 if (IntResultReg == 0)
1415 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
1416 ISD::BITCAST, IntResultReg, /*Kill=*/true);
1420 UpdateValueMap(I, ResultReg);
1425 FastISel::SelectExtractValue(const User *U) {
1426 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1430 // Make sure we only try to handle extracts with a legal result. But also
1431 // allow i1 because it's easy.
1432 EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true);
1433 if (!RealVT.isSimple())
1435 MVT VT = RealVT.getSimpleVT();
1436 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1439 const Value *Op0 = EVI->getOperand(0);
1440 Type *AggTy = Op0->getType();
1442 // Get the base result register.
1444 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
1445 if (I != FuncInfo.ValueMap.end())
1446 ResultReg = I->second;
1447 else if (isa<Instruction>(Op0))
1448 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1450 return false; // fast-isel can't handle aggregate constants at the moment
1452 // Get the actual result register, which is an offset from the base register.
1453 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1455 SmallVector<EVT, 4> AggValueVTs;
1456 ComputeValueVTs(TLI, AggTy, AggValueVTs);
1458 for (unsigned i = 0; i < VTIndex; i++)
1459 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1461 UpdateValueMap(EVI, ResultReg);
1466 FastISel::SelectOperator(const User *I, unsigned Opcode) {
1468 case Instruction::Add:
1469 return SelectBinaryOp(I, ISD::ADD);
1470 case Instruction::FAdd:
1471 return SelectBinaryOp(I, ISD::FADD);
1472 case Instruction::Sub:
1473 return SelectBinaryOp(I, ISD::SUB);
1474 case Instruction::FSub:
1475 // FNeg is currently represented in LLVM IR as a special case of FSub.
1476 if (BinaryOperator::isFNeg(I))
1477 return SelectFNeg(I);
1478 return SelectBinaryOp(I, ISD::FSUB);
1479 case Instruction::Mul:
1480 return SelectBinaryOp(I, ISD::MUL);
1481 case Instruction::FMul:
1482 return SelectBinaryOp(I, ISD::FMUL);
1483 case Instruction::SDiv:
1484 return SelectBinaryOp(I, ISD::SDIV);
1485 case Instruction::UDiv:
1486 return SelectBinaryOp(I, ISD::UDIV);
1487 case Instruction::FDiv:
1488 return SelectBinaryOp(I, ISD::FDIV);
1489 case Instruction::SRem:
1490 return SelectBinaryOp(I, ISD::SREM);
1491 case Instruction::URem:
1492 return SelectBinaryOp(I, ISD::UREM);
1493 case Instruction::FRem:
1494 return SelectBinaryOp(I, ISD::FREM);
1495 case Instruction::Shl:
1496 return SelectBinaryOp(I, ISD::SHL);
1497 case Instruction::LShr:
1498 return SelectBinaryOp(I, ISD::SRL);
1499 case Instruction::AShr:
1500 return SelectBinaryOp(I, ISD::SRA);
1501 case Instruction::And:
1502 return SelectBinaryOp(I, ISD::AND);
1503 case Instruction::Or:
1504 return SelectBinaryOp(I, ISD::OR);
1505 case Instruction::Xor:
1506 return SelectBinaryOp(I, ISD::XOR);
1508 case Instruction::GetElementPtr:
1509 return SelectGetElementPtr(I);
1511 case Instruction::Br: {
1512 const BranchInst *BI = cast<BranchInst>(I);
1514 if (BI->isUnconditional()) {
1515 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1516 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1517 FastEmitBranch(MSucc, BI->getDebugLoc());
1521 // Conditional branches are not handed yet.
1522 // Halt "fast" selection and bail.
1526 case Instruction::Unreachable:
1527 if (TM.Options.TrapUnreachable)
1528 return FastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1532 case Instruction::Alloca:
1533 // FunctionLowering has the static-sized case covered.
1534 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1537 // Dynamic-sized alloca is not handled yet.
1540 case Instruction::Call:
1541 return SelectCall(I);
1543 case Instruction::BitCast:
1544 return SelectBitCast(I);
1546 case Instruction::FPToSI:
1547 return SelectCast(I, ISD::FP_TO_SINT);
1548 case Instruction::ZExt:
1549 return SelectCast(I, ISD::ZERO_EXTEND);
1550 case Instruction::SExt:
1551 return SelectCast(I, ISD::SIGN_EXTEND);
1552 case Instruction::Trunc:
1553 return SelectCast(I, ISD::TRUNCATE);
1554 case Instruction::SIToFP:
1555 return SelectCast(I, ISD::SINT_TO_FP);
1557 case Instruction::IntToPtr: // Deliberate fall-through.
1558 case Instruction::PtrToInt: {
1559 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
1560 EVT DstVT = TLI.getValueType(I->getType());
1561 if (DstVT.bitsGT(SrcVT))
1562 return SelectCast(I, ISD::ZERO_EXTEND);
1563 if (DstVT.bitsLT(SrcVT))
1564 return SelectCast(I, ISD::TRUNCATE);
1565 unsigned Reg = getRegForValue(I->getOperand(0));
1566 if (Reg == 0) return false;
1567 UpdateValueMap(I, Reg);
1571 case Instruction::ExtractValue:
1572 return SelectExtractValue(I);
1574 case Instruction::PHI:
1575 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1578 // Unhandled instruction. Halt "fast" selection and bail.
1583 FastISel::FastISel(FunctionLoweringInfo &funcInfo,
1584 const TargetLibraryInfo *libInfo)
1585 : FuncInfo(funcInfo),
1587 MRI(FuncInfo.MF->getRegInfo()),
1588 MFI(*FuncInfo.MF->getFrameInfo()),
1589 MCP(*FuncInfo.MF->getConstantPool()),
1590 TM(FuncInfo.MF->getTarget()),
1591 DL(*TM.getDataLayout()),
1592 TII(*TM.getInstrInfo()),
1593 TLI(*TM.getTargetLowering()),
1594 TRI(*TM.getRegisterInfo()),
1598 FastISel::~FastISel() {}
1600 bool FastISel::FastLowerArguments() {
1604 bool FastISel::FastLowerCall(CallLoweringInfo &/*CLI*/) {
1608 bool FastISel::FastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
1612 unsigned FastISel::FastEmit_(MVT, MVT,
1617 unsigned FastISel::FastEmit_r(MVT, MVT,
1619 unsigned /*Op0*/, bool /*Op0IsKill*/) {
1623 unsigned FastISel::FastEmit_rr(MVT, MVT,
1625 unsigned /*Op0*/, bool /*Op0IsKill*/,
1626 unsigned /*Op1*/, bool /*Op1IsKill*/) {
1630 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1634 unsigned FastISel::FastEmit_f(MVT, MVT,
1635 unsigned, const ConstantFP * /*FPImm*/) {
1639 unsigned FastISel::FastEmit_ri(MVT, MVT,
1641 unsigned /*Op0*/, bool /*Op0IsKill*/,
1646 unsigned FastISel::FastEmit_rf(MVT, MVT,
1648 unsigned /*Op0*/, bool /*Op0IsKill*/,
1649 const ConstantFP * /*FPImm*/) {
1653 unsigned FastISel::FastEmit_rri(MVT, MVT,
1655 unsigned /*Op0*/, bool /*Op0IsKill*/,
1656 unsigned /*Op1*/, bool /*Op1IsKill*/,
1661 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
1662 /// to emit an instruction with an immediate operand using FastEmit_ri.
1663 /// If that fails, it materializes the immediate into a register and try
1664 /// FastEmit_rr instead.
1665 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
1666 unsigned Op0, bool Op0IsKill,
1667 uint64_t Imm, MVT ImmType) {
1668 // If this is a multiply by a power of two, emit this as a shift left.
1669 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1672 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1673 // div x, 8 -> srl x, 3
1678 // Horrible hack (to be removed), check to make sure shift amounts are
1680 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1681 Imm >= VT.getSizeInBits())
1684 // First check if immediate type is legal. If not, we can't use the ri form.
1685 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1688 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1689 if (MaterialReg == 0) {
1690 // This is a bit ugly/slow, but failing here means falling out of
1691 // fast-isel, which would be very slow.
1692 IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(),
1693 VT.getSizeInBits());
1694 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1695 assert (MaterialReg != 0 && "Unable to materialize imm.");
1696 if (MaterialReg == 0) return 0;
1698 return FastEmit_rr(VT, VT, Opcode,
1700 MaterialReg, /*Kill=*/true);
1703 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
1704 return MRI.createVirtualRegister(RC);
1707 unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II,
1708 unsigned Op, unsigned OpNum) {
1709 if (TargetRegisterInfo::isVirtualRegister(Op)) {
1710 const TargetRegisterClass *RegClass =
1711 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1712 if (!MRI.constrainRegClass(Op, RegClass)) {
1713 // If it's not legal to COPY between the register classes, something
1714 // has gone very wrong before we got here.
1715 unsigned NewOp = createResultReg(RegClass);
1716 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1717 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1724 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
1725 const TargetRegisterClass* RC) {
1726 unsigned ResultReg = createResultReg(RC);
1727 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1729 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
1733 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
1734 const TargetRegisterClass *RC,
1735 unsigned Op0, bool Op0IsKill) {
1736 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1738 unsigned ResultReg = createResultReg(RC);
1739 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1741 if (II.getNumDefs() >= 1)
1742 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1743 .addReg(Op0, Op0IsKill * RegState::Kill);
1745 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1746 .addReg(Op0, Op0IsKill * RegState::Kill);
1747 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1748 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1754 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
1755 const TargetRegisterClass *RC,
1756 unsigned Op0, bool Op0IsKill,
1757 unsigned Op1, bool Op1IsKill) {
1758 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1760 unsigned ResultReg = createResultReg(RC);
1761 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1762 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1764 if (II.getNumDefs() >= 1)
1765 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1766 .addReg(Op0, Op0IsKill * RegState::Kill)
1767 .addReg(Op1, Op1IsKill * RegState::Kill);
1769 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1770 .addReg(Op0, Op0IsKill * RegState::Kill)
1771 .addReg(Op1, Op1IsKill * RegState::Kill);
1772 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1773 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1778 unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
1779 const TargetRegisterClass *RC,
1780 unsigned Op0, bool Op0IsKill,
1781 unsigned Op1, bool Op1IsKill,
1782 unsigned Op2, bool Op2IsKill) {
1783 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1785 unsigned ResultReg = createResultReg(RC);
1786 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1787 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1788 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1790 if (II.getNumDefs() >= 1)
1791 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1792 .addReg(Op0, Op0IsKill * RegState::Kill)
1793 .addReg(Op1, Op1IsKill * RegState::Kill)
1794 .addReg(Op2, Op2IsKill * RegState::Kill);
1796 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1797 .addReg(Op0, Op0IsKill * RegState::Kill)
1798 .addReg(Op1, Op1IsKill * RegState::Kill)
1799 .addReg(Op2, Op2IsKill * RegState::Kill);
1800 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1801 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1806 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
1807 const TargetRegisterClass *RC,
1808 unsigned Op0, bool Op0IsKill,
1810 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1812 unsigned ResultReg = createResultReg(RC);
1813 RC = TII.getRegClass(II, II.getNumDefs(), &TRI, *FuncInfo.MF);
1814 MRI.constrainRegClass(Op0, RC);
1816 if (II.getNumDefs() >= 1)
1817 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1818 .addReg(Op0, Op0IsKill * RegState::Kill)
1821 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1822 .addReg(Op0, Op0IsKill * RegState::Kill)
1824 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1825 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1830 unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
1831 const TargetRegisterClass *RC,
1832 unsigned Op0, bool Op0IsKill,
1833 uint64_t Imm1, uint64_t Imm2) {
1834 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1836 unsigned ResultReg = createResultReg(RC);
1837 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1839 if (II.getNumDefs() >= 1)
1840 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1841 .addReg(Op0, Op0IsKill * RegState::Kill)
1845 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1846 .addReg(Op0, Op0IsKill * RegState::Kill)
1849 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1850 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1855 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
1856 const TargetRegisterClass *RC,
1857 unsigned Op0, bool Op0IsKill,
1858 const ConstantFP *FPImm) {
1859 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1861 unsigned ResultReg = createResultReg(RC);
1862 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1864 if (II.getNumDefs() >= 1)
1865 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1866 .addReg(Op0, Op0IsKill * RegState::Kill)
1869 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1870 .addReg(Op0, Op0IsKill * RegState::Kill)
1872 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1873 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1878 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
1879 const TargetRegisterClass *RC,
1880 unsigned Op0, bool Op0IsKill,
1881 unsigned Op1, bool Op1IsKill,
1883 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1885 unsigned ResultReg = createResultReg(RC);
1886 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1887 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1889 if (II.getNumDefs() >= 1)
1890 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1891 .addReg(Op0, Op0IsKill * RegState::Kill)
1892 .addReg(Op1, Op1IsKill * RegState::Kill)
1895 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1896 .addReg(Op0, Op0IsKill * RegState::Kill)
1897 .addReg(Op1, Op1IsKill * RegState::Kill)
1899 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1900 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1905 unsigned FastISel::FastEmitInst_rrii(unsigned MachineInstOpcode,
1906 const TargetRegisterClass *RC,
1907 unsigned Op0, bool Op0IsKill,
1908 unsigned Op1, bool Op1IsKill,
1909 uint64_t Imm1, uint64_t Imm2) {
1910 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1912 unsigned ResultReg = createResultReg(RC);
1913 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1914 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1916 if (II.getNumDefs() >= 1)
1917 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1918 .addReg(Op0, Op0IsKill * RegState::Kill)
1919 .addReg(Op1, Op1IsKill * RegState::Kill)
1920 .addImm(Imm1).addImm(Imm2);
1922 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1923 .addReg(Op0, Op0IsKill * RegState::Kill)
1924 .addReg(Op1, Op1IsKill * RegState::Kill)
1925 .addImm(Imm1).addImm(Imm2);
1926 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1927 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1932 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
1933 const TargetRegisterClass *RC,
1935 unsigned ResultReg = createResultReg(RC);
1936 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1938 if (II.getNumDefs() >= 1)
1939 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg).addImm(Imm);
1941 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
1942 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1943 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1948 unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
1949 const TargetRegisterClass *RC,
1950 uint64_t Imm1, uint64_t Imm2) {
1951 unsigned ResultReg = createResultReg(RC);
1952 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1954 if (II.getNumDefs() >= 1)
1955 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1956 .addImm(Imm1).addImm(Imm2);
1958 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm1).addImm(Imm2);
1959 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1960 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1965 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1966 unsigned Op0, bool Op0IsKill,
1968 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1969 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1970 "Cannot yet extract from physregs");
1971 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
1972 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
1973 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
1974 DbgLoc, TII.get(TargetOpcode::COPY), ResultReg)
1975 .addReg(Op0, getKillRegState(Op0IsKill), Idx);
1979 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1980 /// with all but the least significant bit set to zero.
1981 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1982 return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1985 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1986 /// Emit code to ensure constants are copied into registers when needed.
1987 /// Remember the virtual registers that need to be added to the Machine PHI
1988 /// nodes as input. We cannot just directly add them, because expansion
1989 /// might result in multiple MBB's for one BB. As such, the start of the
1990 /// BB might correspond to a different MBB than the end.
1991 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1992 const TerminatorInst *TI = LLVMBB->getTerminator();
1994 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1995 unsigned OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1997 // Check successor nodes' PHI nodes that expect a constant to be available
1999 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2000 const BasicBlock *SuccBB = TI->getSuccessor(succ);
2001 if (!isa<PHINode>(SuccBB->begin())) continue;
2002 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2004 // If this terminator has multiple identical successors (common for
2005 // switches), only handle each succ once.
2006 if (!SuccsHandled.insert(SuccMBB)) continue;
2008 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2010 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2011 // nodes and Machine PHI nodes, but the incoming operands have not been
2013 for (BasicBlock::const_iterator I = SuccBB->begin();
2014 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
2016 // Ignore dead phi's.
2017 if (PN->use_empty()) continue;
2019 // Only handle legal types. Two interesting things to note here. First,
2020 // by bailing out early, we may leave behind some dead instructions,
2021 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2022 // own moves. Second, this check is necessary because FastISel doesn't
2023 // use CreateRegs to create registers, so it always creates
2024 // exactly one register for each non-void instruction.
2025 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
2026 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2027 // Handle integer promotions, though, because they're common and easy.
2028 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
2029 VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
2031 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
2036 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
2038 // Set the DebugLoc for the copy. Prefer the location of the operand
2039 // if there is one; use the location of the PHI otherwise.
2040 DbgLoc = PN->getDebugLoc();
2041 if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
2042 DbgLoc = Inst->getDebugLoc();
2044 unsigned Reg = getRegForValue(PHIOp);
2046 FuncInfo.PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
2049 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
2050 DbgLoc = DebugLoc();
2057 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2058 assert(LI->hasOneUse() &&
2059 "tryToFoldLoad expected a LoadInst with a single use");
2060 // We know that the load has a single use, but don't know what it is. If it
2061 // isn't one of the folded instructions, then we can't succeed here. Handle
2062 // this by scanning the single-use users of the load until we get to FoldInst.
2063 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2065 const Instruction *TheUser = LI->user_back();
2066 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2067 // Stay in the right block.
2068 TheUser->getParent() == FoldInst->getParent() &&
2069 --MaxUsers) { // Don't scan too far.
2070 // If there are multiple or no uses of this instruction, then bail out.
2071 if (!TheUser->hasOneUse())
2074 TheUser = TheUser->user_back();
2077 // If we didn't find the fold instruction, then we failed to collapse the
2079 if (TheUser != FoldInst)
2082 // Don't try to fold volatile loads. Target has to deal with alignment
2084 if (LI->isVolatile())
2087 // Figure out which vreg this is going into. If there is no assigned vreg yet
2088 // then there actually was no reference to it. Perhaps the load is referenced
2089 // by a dead instruction.
2090 unsigned LoadReg = getRegForValue(LI);
2094 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2095 // may mean that the instruction got lowered to multiple MIs, or the use of
2096 // the loaded value ended up being multiple operands of the result.
2097 if (!MRI.hasOneUse(LoadReg))
2100 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2101 MachineInstr *User = RI->getParent();
2103 // Set the insertion point properly. Folding the load can cause generation of
2104 // other random instructions (like sign extends) for addressing modes; make
2105 // sure they get inserted in a logical place before the new instruction.
2106 FuncInfo.InsertPt = User;
2107 FuncInfo.MBB = User->getParent();
2109 // Ask the target to try folding the load.
2110 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2113 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2115 if (!isa<AddOperator>(Add))
2117 // Type size needs to match.
2118 if (DL.getTypeSizeInBits(GEP->getType()) !=
2119 DL.getTypeSizeInBits(Add->getType()))
2121 // Must be in the same basic block.
2122 if (isa<Instruction>(Add) &&
2123 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2125 // Must have a constant operand.
2126 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2130 FastISel::createMachineMemOperandFor(const Instruction *I) const {
2137 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2138 Alignment = LI->getAlignment();
2139 IsVolatile = LI->isVolatile();
2140 Flags = MachineMemOperand::MOLoad;
2141 Ptr = LI->getPointerOperand();
2142 ValTy = LI->getType();
2143 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2144 Alignment = SI->getAlignment();
2145 IsVolatile = SI->isVolatile();
2146 Flags = MachineMemOperand::MOStore;
2147 Ptr = SI->getPointerOperand();
2148 ValTy = SI->getValueOperand()->getType();
2153 bool IsNonTemporal = I->getMetadata("nontemporal") != nullptr;
2154 bool IsInvariant = I->getMetadata("invariant.load") != nullptr;
2155 const MDNode *TBAAInfo = I->getMetadata(LLVMContext::MD_tbaa);
2156 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2158 if (Alignment == 0) // Ensure that codegen never sees alignment 0.
2159 Alignment = DL.getABITypeAlignment(ValTy);
2161 unsigned Size = TM.getDataLayout()->getTypeStoreSize(ValTy);
2164 Flags |= MachineMemOperand::MOVolatile;
2166 Flags |= MachineMemOperand::MONonTemporal;
2168 Flags |= MachineMemOperand::MOInvariant;
2170 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2171 Alignment, TBAAInfo, Ranges);