1 //===-- FunctionLoweringInfo.cpp ------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements routines for translating functions from LLVM IR into
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "function-lowering-info"
16 #include "FunctionLoweringInfo.h"
17 #include "llvm/CallingConv.h"
18 #include "llvm/DerivedTypes.h"
19 #include "llvm/Function.h"
20 #include "llvm/Instructions.h"
21 #include "llvm/IntrinsicInst.h"
22 #include "llvm/LLVMContext.h"
23 #include "llvm/Module.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineModuleInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/Analysis/DebugInfo.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetData.h"
32 #include "llvm/Target/TargetFrameInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetIntrinsicInfo.h"
35 #include "llvm/Target/TargetLowering.h"
36 #include "llvm/Target/TargetOptions.h"
37 #include "llvm/Support/Compiler.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/raw_ostream.h"
45 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
46 /// of insertvalue or extractvalue indices that identify a member, return
47 /// the linearized index of the start of the member.
49 unsigned llvm::ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
50 const unsigned *Indices,
51 const unsigned *IndicesEnd,
53 // Base case: We're done.
54 if (Indices && Indices == IndicesEnd)
57 // Given a struct type, recursively traverse the elements.
58 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
59 for (StructType::element_iterator EB = STy->element_begin(),
61 EE = STy->element_end();
63 if (Indices && *Indices == unsigned(EI - EB))
64 return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
65 CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
69 // Given an array type, recursively traverse the elements.
70 else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
71 const Type *EltTy = ATy->getElementType();
72 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
73 if (Indices && *Indices == i)
74 return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
75 CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
79 // We haven't found the type we're looking for, so keep searching.
83 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
84 /// EVTs that represent all the individual underlying
85 /// non-aggregate types that comprise it.
87 /// If Offsets is non-null, it points to a vector to be filled in
88 /// with the in-memory offsets of each of the individual values.
90 void llvm::ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
91 SmallVectorImpl<EVT> &ValueVTs,
92 SmallVectorImpl<uint64_t> *Offsets,
93 uint64_t StartingOffset) {
94 // Given a struct type, recursively traverse the elements.
95 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
96 const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
97 for (StructType::element_iterator EB = STy->element_begin(),
99 EE = STy->element_end();
101 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
102 StartingOffset + SL->getElementOffset(EI - EB));
105 // Given an array type, recursively traverse the elements.
106 if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
107 const Type *EltTy = ATy->getElementType();
108 uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
109 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
110 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
111 StartingOffset + i * EltSize);
114 // Interpret void as zero return values.
117 // Base case: we can get an EVT for this LLVM IR type.
118 ValueVTs.push_back(TLI.getValueType(Ty));
120 Offsets->push_back(StartingOffset);
123 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
124 /// PHI nodes or outside of the basic block that defines it, or used by a
125 /// switch or atomic instruction, which may expand to multiple basic blocks.
126 static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
127 if (I->use_empty()) return false;
128 if (isa<PHINode>(I)) return true;
129 const BasicBlock *BB = I->getParent();
130 for (Value::const_use_iterator UI = I->use_begin(), E = I->use_end();
132 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
137 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
138 /// entry block, return true. This includes arguments used by switches, since
139 /// the switch may expand into multiple basic blocks.
140 static bool isOnlyUsedInEntryBlock(const Argument *A, bool EnableFastISel) {
141 // With FastISel active, we may be splitting blocks, so force creation
142 // of virtual registers for all non-dead arguments.
143 // Don't force virtual registers for byval arguments though, because
144 // fast-isel can't handle those in all cases.
145 if (EnableFastISel && !A->hasByValAttr())
146 return A->use_empty();
148 const BasicBlock *Entry = A->getParent()->begin();
149 for (Value::const_use_iterator UI = A->use_begin(), E = A->use_end();
151 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
152 return false; // Use not in entry block.
156 FunctionLoweringInfo::FunctionLoweringInfo(const TargetLowering &tli)
160 void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
161 bool EnableFastISel) {
164 RegInfo = &MF->getRegInfo();
166 // Create a vreg for each argument register that is not dead and is used
167 // outside of the entry block for the function.
168 for (Function::const_arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end();
170 if (!isOnlyUsedInEntryBlock(AI, EnableFastISel))
171 InitializeRegForValue(AI);
173 // Initialize the mapping of values to registers. This is only set up for
174 // instruction values that are used outside of the block that defines
176 Function::const_iterator BB = Fn->begin(), EB = Fn->end();
177 for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I)
178 if (const AllocaInst *AI = dyn_cast<AllocaInst>(I))
179 if (const ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
180 const Type *Ty = AI->getAllocatedType();
181 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
183 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
186 TySize *= CUI->getZExtValue(); // Get total allocated size.
187 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
188 StaticAllocaMap[AI] =
189 MF->getFrameInfo()->CreateStackObject(TySize, Align, false);
192 for (; BB != EB; ++BB)
193 for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I)
194 if (isUsedOutsideOfDefiningBlock(I))
195 if (!isa<AllocaInst>(I) ||
196 !StaticAllocaMap.count(cast<AllocaInst>(I)))
197 InitializeRegForValue(I);
199 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
200 // also creates the initial PHI MachineInstrs, though none of the input
201 // operands are populated.
202 for (BB = Fn->begin(); BB != EB; ++BB) {
203 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB);
207 // Transfer the address-taken flag. This is necessary because there could
208 // be multiple MachineBasicBlocks corresponding to one BasicBlock, and only
209 // the first one should be marked.
210 if (BB->hasAddressTaken())
211 MBB->setHasAddressTaken();
213 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
215 for (BasicBlock::const_iterator I = BB->begin();
216 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
217 if (PN->use_empty()) continue;
219 DebugLoc DL = PN->getDebugLoc();
220 unsigned PHIReg = ValueMap[PN];
221 assert(PHIReg && "PHI node does not have an assigned virtual register!");
223 SmallVector<EVT, 4> ValueVTs;
224 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
225 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
226 EVT VT = ValueVTs[vti];
227 unsigned NumRegisters = TLI.getNumRegisters(Fn->getContext(), VT);
228 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
229 for (unsigned i = 0; i != NumRegisters; ++i)
230 BuildMI(MBB, DL, TII->get(TargetOpcode::PHI), PHIReg + i);
231 PHIReg += NumRegisters;
236 // Mark landing pad blocks.
237 for (BB = Fn->begin(); BB != EB; ++BB)
238 if (const InvokeInst *Invoke = dyn_cast<InvokeInst>(BB->getTerminator()))
239 MBBMap[Invoke->getSuccessor(1)]->setIsLandingPad();
242 /// clear - Clear out all the function-specific state. This returns this
243 /// FunctionLoweringInfo to an empty state, ready to be used for a
244 /// different function.
245 void FunctionLoweringInfo::clear() {
246 assert(CatchInfoFound.size() == CatchInfoLost.size() &&
247 "Not all catch info was assigned to a landing pad!");
251 StaticAllocaMap.clear();
253 CatchInfoLost.clear();
254 CatchInfoFound.clear();
256 LiveOutRegInfo.clear();
259 unsigned FunctionLoweringInfo::MakeReg(EVT VT) {
260 return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
263 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
264 /// the correctly promoted or expanded types. Assign these registers
265 /// consecutive vreg numbers and return the first assigned number.
267 /// In the case that the given value has struct or array type, this function
268 /// will assign registers for each member or element.
270 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
271 SmallVector<EVT, 4> ValueVTs;
272 ComputeValueVTs(TLI, V->getType(), ValueVTs);
274 unsigned FirstReg = 0;
275 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
276 EVT ValueVT = ValueVTs[Value];
277 EVT RegisterVT = TLI.getRegisterType(V->getContext(), ValueVT);
279 unsigned NumRegs = TLI.getNumRegisters(V->getContext(), ValueVT);
280 for (unsigned i = 0; i != NumRegs; ++i) {
281 unsigned R = MakeReg(RegisterVT);
282 if (!FirstReg) FirstReg = R;
288 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
289 GlobalVariable *llvm::ExtractTypeInfo(Value *V) {
290 V = V->stripPointerCasts();
291 GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
293 if (GV && GV->getName() == ".llvm.eh.catch.all.value") {
294 assert(GV->hasInitializer() &&
295 "The EH catch-all value must have an initializer");
296 Value *Init = GV->getInitializer();
297 GV = dyn_cast<GlobalVariable>(Init);
298 if (!GV) V = cast<ConstantPointerNull>(Init);
301 assert((GV || isa<ConstantPointerNull>(V)) &&
302 "TypeInfo must be a global variable or NULL");
306 /// AddCatchInfo - Extract the personality and type infos from an eh.selector
307 /// call, and add them to the specified machine basic block.
308 void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI,
309 MachineBasicBlock *MBB) {
310 // Inform the MachineModuleInfo of the personality for this landing pad.
311 const ConstantExpr *CE = cast<ConstantExpr>(I.getOperand(2));
312 assert(CE->getOpcode() == Instruction::BitCast &&
313 isa<Function>(CE->getOperand(0)) &&
314 "Personality should be a function");
315 MMI->addPersonality(MBB, cast<Function>(CE->getOperand(0)));
317 // Gather all the type infos for this landing pad and pass them along to
318 // MachineModuleInfo.
319 std::vector<const GlobalVariable *> TyInfo;
320 unsigned N = I.getNumOperands();
322 for (unsigned i = N - 1; i > 2; --i) {
323 if (const ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(i))) {
324 unsigned FilterLength = CI->getZExtValue();
325 unsigned FirstCatch = i + FilterLength + !FilterLength;
326 assert (FirstCatch <= N && "Invalid filter length");
328 if (FirstCatch < N) {
329 TyInfo.reserve(N - FirstCatch);
330 for (unsigned j = FirstCatch; j < N; ++j)
331 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
332 MMI->addCatchTypeInfo(MBB, TyInfo);
338 MMI->addCleanup(MBB);
341 TyInfo.reserve(FilterLength - 1);
342 for (unsigned j = i + 1; j < FirstCatch; ++j)
343 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
344 MMI->addFilterTypeInfo(MBB, TyInfo);
353 TyInfo.reserve(N - 3);
354 for (unsigned j = 3; j < N; ++j)
355 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
356 MMI->addCatchTypeInfo(MBB, TyInfo);
360 void llvm::CopyCatchInfo(const BasicBlock *SrcBB, const BasicBlock *DestBB,
361 MachineModuleInfo *MMI, FunctionLoweringInfo &FLI) {
362 for (BasicBlock::const_iterator I = SrcBB->begin(), E = --SrcBB->end();
364 if (const EHSelectorInst *EHSel = dyn_cast<EHSelectorInst>(I)) {
365 // Apply the catch info to DestBB.
366 AddCatchInfo(*EHSel, MMI, FLI.MBBMap[DestBB]);
368 if (!FLI.MBBMap[SrcBB]->isLandingPad())
369 FLI.CatchInfoFound.insert(EHSel);
374 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
375 /// processed uses a memory 'm' constraint.
377 llvm::hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
378 const TargetLowering &TLI) {
379 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
380 InlineAsm::ConstraintInfo &CI = CInfos[i];
381 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
382 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
383 if (CType == TargetLowering::C_Memory)
387 // Indirect operand accesses access memory.
395 /// getFCmpCondCode - Return the ISD condition code corresponding to
396 /// the given LLVM IR floating-point condition code. This includes
397 /// consideration of global floating-point math flags.
399 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
400 ISD::CondCode FPC, FOC;
402 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
403 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
404 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
405 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
406 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
407 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
408 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
409 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break;
410 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break;
411 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
412 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
413 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
414 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
415 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
416 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
417 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
419 llvm_unreachable("Invalid FCmp predicate opcode!");
420 FOC = FPC = ISD::SETFALSE;
423 if (FiniteOnlyFPMath())
429 /// getICmpCondCode - Return the ISD condition code corresponding to
430 /// the given LLVM IR integer condition code.
432 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
434 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
435 case ICmpInst::ICMP_NE: return ISD::SETNE;
436 case ICmpInst::ICMP_SLE: return ISD::SETLE;
437 case ICmpInst::ICMP_ULE: return ISD::SETULE;
438 case ICmpInst::ICMP_SGE: return ISD::SETGE;
439 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
440 case ICmpInst::ICMP_SLT: return ISD::SETLT;
441 case ICmpInst::ICMP_ULT: return ISD::SETULT;
442 case ICmpInst::ICMP_SGT: return ISD::SETGT;
443 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
445 llvm_unreachable("Invalid ICmp predicate opcode!");
450 /// Test if the given instruction is in a position to be optimized
451 /// with a tail-call. This roughly means that it's in a block with
452 /// a return and there's nothing that needs to be scheduled
453 /// between it and the return.
455 /// This function only tests target-independent requirements.
456 bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
457 const TargetLowering &TLI) {
458 const Instruction *I = CS.getInstruction();
459 const BasicBlock *ExitBB = I->getParent();
460 const TerminatorInst *Term = ExitBB->getTerminator();
461 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
462 const Function *F = ExitBB->getParent();
464 // The block must end in a return statement or unreachable.
466 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
467 // an unreachable, for now. The way tailcall optimization is currently
468 // implemented means it will add an epilogue followed by a jump. That is
469 // not profitable. Also, if the callee is a special function (e.g.
470 // longjmp on x86), it can end up causing miscompilation that has not
471 // been fully understood.
473 (!GuaranteedTailCallOpt || !isa<UnreachableInst>(Term))) return false;
475 // If I will have a chain, make sure no other instruction that will have a
476 // chain interposes between I and the return.
477 if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
478 !I->isSafeToSpeculativelyExecute())
479 for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
483 // Debug info intrinsics do not get in the way of tail call optimization.
484 if (isa<DbgInfoIntrinsic>(BBI))
486 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
487 !BBI->isSafeToSpeculativelyExecute())
491 // If the block ends with a void return or unreachable, it doesn't matter
492 // what the call's return type is.
493 if (!Ret || Ret->getNumOperands() == 0) return true;
495 // If the return value is undef, it doesn't matter what the call's
497 if (isa<UndefValue>(Ret->getOperand(0))) return true;
499 // Conservatively require the attributes of the call to match those of
500 // the return. Ignore noalias because it doesn't affect the call sequence.
501 unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
502 if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
505 // It's not safe to eliminate the sign / zero extension of the return value.
506 if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
509 // Otherwise, make sure the unmodified return value of I is the return value.
510 for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
511 U = dyn_cast<Instruction>(U->getOperand(0))) {
518 // Check for a truly no-op truncate.
519 if (isa<TruncInst>(U) &&
520 TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
522 // Check for a truly no-op bitcast.
523 if (isa<BitCastInst>(U) &&
524 (U->getOperand(0)->getType() == U->getType() ||
525 (U->getOperand(0)->getType()->isPointerTy() &&
526 U->getType()->isPointerTy())))
528 // Otherwise it's not a true no-op.