1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities --*- C++ ------*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines several CodeGen-specific LLVM IR analysis utilties.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/Analysis.h"
15 #include "llvm/DerivedTypes.h"
16 #include "llvm/Function.h"
17 #include "llvm/Instructions.h"
18 #include "llvm/IntrinsicInst.h"
19 #include "llvm/LLVMContext.h"
20 #include "llvm/Module.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/Target/TargetData.h"
23 #include "llvm/Target/TargetLowering.h"
24 #include "llvm/Target/TargetOptions.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/MathExtras.h"
29 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
30 /// of insertvalue or extractvalue indices that identify a member, return
31 /// the linearized index of the start of the member.
33 unsigned llvm::ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
34 const unsigned *Indices,
35 const unsigned *IndicesEnd,
37 // Base case: We're done.
38 if (Indices && Indices == IndicesEnd)
41 // Given a struct type, recursively traverse the elements.
42 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
43 for (StructType::element_iterator EB = STy->element_begin(),
45 EE = STy->element_end();
47 if (Indices && *Indices == unsigned(EI - EB))
48 return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
49 CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
53 // Given an array type, recursively traverse the elements.
54 else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
55 const Type *EltTy = ATy->getElementType();
56 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
57 if (Indices && *Indices == i)
58 return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
59 CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
63 // We haven't found the type we're looking for, so keep searching.
67 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
68 /// EVTs that represent all the individual underlying
69 /// non-aggregate types that comprise it.
71 /// If Offsets is non-null, it points to a vector to be filled in
72 /// with the in-memory offsets of each of the individual values.
74 void llvm::ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
75 SmallVectorImpl<EVT> &ValueVTs,
76 SmallVectorImpl<uint64_t> *Offsets,
77 uint64_t StartingOffset) {
78 // Given a struct type, recursively traverse the elements.
79 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
80 const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
81 for (StructType::element_iterator EB = STy->element_begin(),
83 EE = STy->element_end();
85 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
86 StartingOffset + SL->getElementOffset(EI - EB));
89 // Given an array type, recursively traverse the elements.
90 if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
91 const Type *EltTy = ATy->getElementType();
92 uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
93 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
94 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
95 StartingOffset + i * EltSize);
98 // Interpret void as zero return values.
101 // Base case: we can get an EVT for this LLVM IR type.
102 ValueVTs.push_back(TLI.getValueType(Ty));
104 Offsets->push_back(StartingOffset);
107 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
108 GlobalVariable *llvm::ExtractTypeInfo(Value *V) {
109 V = V->stripPointerCasts();
110 GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
112 if (GV && GV->getName() == ".llvm.eh.catch.all.value") {
113 assert(GV->hasInitializer() &&
114 "The EH catch-all value must have an initializer");
115 Value *Init = GV->getInitializer();
116 GV = dyn_cast<GlobalVariable>(Init);
117 if (!GV) V = cast<ConstantPointerNull>(Init);
120 assert((GV || isa<ConstantPointerNull>(V)) &&
121 "TypeInfo must be a global variable or NULL");
125 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
126 /// processed uses a memory 'm' constraint.
128 llvm::hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
129 const TargetLowering &TLI) {
130 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
131 InlineAsm::ConstraintInfo &CI = CInfos[i];
132 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
133 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
134 if (CType == TargetLowering::C_Memory)
138 // Indirect operand accesses access memory.
146 /// getFCmpCondCode - Return the ISD condition code corresponding to
147 /// the given LLVM IR floating-point condition code. This includes
148 /// consideration of global floating-point math flags.
150 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
151 ISD::CondCode FPC, FOC;
153 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
154 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
155 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
156 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
157 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
158 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
159 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
160 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break;
161 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break;
162 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
163 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
164 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
165 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
166 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
167 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
168 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
170 llvm_unreachable("Invalid FCmp predicate opcode!");
171 FOC = FPC = ISD::SETFALSE;
180 /// getICmpCondCode - Return the ISD condition code corresponding to
181 /// the given LLVM IR integer condition code.
183 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
185 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
186 case ICmpInst::ICMP_NE: return ISD::SETNE;
187 case ICmpInst::ICMP_SLE: return ISD::SETLE;
188 case ICmpInst::ICMP_ULE: return ISD::SETULE;
189 case ICmpInst::ICMP_SGE: return ISD::SETGE;
190 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
191 case ICmpInst::ICMP_SLT: return ISD::SETLT;
192 case ICmpInst::ICMP_ULT: return ISD::SETULT;
193 case ICmpInst::ICMP_SGT: return ISD::SETGT;
194 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
196 llvm_unreachable("Invalid ICmp predicate opcode!");
201 /// Test if the given instruction is in a position to be optimized
202 /// with a tail-call. This roughly means that it's in a block with
203 /// a return and there's nothing that needs to be scheduled
204 /// between it and the return.
206 /// This function only tests target-independent requirements.
207 bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
208 const TargetLowering &TLI) {
209 const Instruction *I = CS.getInstruction();
210 const BasicBlock *ExitBB = I->getParent();
211 const TerminatorInst *Term = ExitBB->getTerminator();
212 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
213 const Function *F = ExitBB->getParent();
215 // The block must end in a return statement or unreachable.
217 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
218 // an unreachable, for now. The way tailcall optimization is currently
219 // implemented means it will add an epilogue followed by a jump. That is
220 // not profitable. Also, if the callee is a special function (e.g.
221 // longjmp on x86), it can end up causing miscompilation that has not
222 // been fully understood.
224 (!GuaranteedTailCallOpt || !isa<UnreachableInst>(Term))) return false;
226 // If I will have a chain, make sure no other instruction that will have a
227 // chain interposes between I and the return.
228 if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
229 !I->isSafeToSpeculativelyExecute())
230 for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
234 // Debug info intrinsics do not get in the way of tail call optimization.
235 if (isa<DbgInfoIntrinsic>(BBI))
237 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
238 !BBI->isSafeToSpeculativelyExecute())
242 // If the block ends with a void return or unreachable, it doesn't matter
243 // what the call's return type is.
244 if (!Ret || Ret->getNumOperands() == 0) return true;
246 // If the return value is undef, it doesn't matter what the call's
248 if (isa<UndefValue>(Ret->getOperand(0))) return true;
250 // Conservatively require the attributes of the call to match those of
251 // the return. Ignore noalias because it doesn't affect the call sequence.
252 unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
253 if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
256 // It's not safe to eliminate the sign / zero extension of the return value.
257 if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
260 // Otherwise, make sure the unmodified return value of I is the return value.
261 for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
262 U = dyn_cast<Instruction>(U->getOperand(0))) {
269 // Check for a truly no-op truncate.
270 if (isa<TruncInst>(U) &&
271 TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
273 // Check for a truly no-op bitcast.
274 if (isa<BitCastInst>(U) &&
275 (U->getOperand(0)->getType() == U->getType() ||
276 (U->getOperand(0)->getType()->isPointerTy() &&
277 U->getType()->isPointerTy())))
279 // Otherwise it's not a true no-op.