1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines several CodeGen-specific LLVM IR analysis utilities.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/Analysis.h"
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/CodeGen/MachineFunction.h"
17 #include "llvm/CodeGen/SelectionDAG.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/Instructions.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/MathExtras.h"
27 #include "llvm/Target/TargetLowering.h"
28 #include "llvm/Transforms/Utils/GlobalStatus.h"
31 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
32 /// of insertvalue or extractvalue indices that identify a member, return
33 /// the linearized index of the start of the member.
35 unsigned llvm::ComputeLinearIndex(Type *Ty,
36 const unsigned *Indices,
37 const unsigned *IndicesEnd,
39 // Base case: We're done.
40 if (Indices && Indices == IndicesEnd)
43 // Given a struct type, recursively traverse the elements.
44 if (StructType *STy = dyn_cast<StructType>(Ty)) {
45 for (StructType::element_iterator EB = STy->element_begin(),
47 EE = STy->element_end();
49 if (Indices && *Indices == unsigned(EI - EB))
50 return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
51 CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
55 // Given an array type, recursively traverse the elements.
56 else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
57 Type *EltTy = ATy->getElementType();
58 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
59 if (Indices && *Indices == i)
60 return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
61 CurIndex = ComputeLinearIndex(EltTy, nullptr, nullptr, CurIndex);
65 // We haven't found the type we're looking for, so keep searching.
69 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
70 /// EVTs that represent all the individual underlying
71 /// non-aggregate types that comprise it.
73 /// If Offsets is non-null, it points to a vector to be filled in
74 /// with the in-memory offsets of each of the individual values.
76 void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
77 SmallVectorImpl<EVT> &ValueVTs,
78 SmallVectorImpl<uint64_t> *Offsets,
79 uint64_t StartingOffset) {
80 // Given a struct type, recursively traverse the elements.
81 if (StructType *STy = dyn_cast<StructType>(Ty)) {
82 const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy);
83 for (StructType::element_iterator EB = STy->element_begin(),
85 EE = STy->element_end();
87 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
88 StartingOffset + SL->getElementOffset(EI - EB));
91 // Given an array type, recursively traverse the elements.
92 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
93 Type *EltTy = ATy->getElementType();
94 uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy);
95 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
96 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
97 StartingOffset + i * EltSize);
100 // Interpret void as zero return values.
103 // Base case: we can get an EVT for this LLVM IR type.
104 ValueVTs.push_back(TLI.getValueType(Ty));
106 Offsets->push_back(StartingOffset);
109 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
110 GlobalVariable *llvm::ExtractTypeInfo(Value *V) {
111 V = V->stripPointerCasts();
112 GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
114 if (GV && GV->getName() == "llvm.eh.catch.all.value") {
115 assert(GV->hasInitializer() &&
116 "The EH catch-all value must have an initializer");
117 Value *Init = GV->getInitializer();
118 GV = dyn_cast<GlobalVariable>(Init);
119 if (!GV) V = cast<ConstantPointerNull>(Init);
122 assert((GV || isa<ConstantPointerNull>(V)) &&
123 "TypeInfo must be a global variable or NULL");
127 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
128 /// processed uses a memory 'm' constraint.
130 llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
131 const TargetLowering &TLI) {
132 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
133 InlineAsm::ConstraintInfo &CI = CInfos[i];
134 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
135 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
136 if (CType == TargetLowering::C_Memory)
140 // Indirect operand accesses access memory.
148 /// getFCmpCondCode - Return the ISD condition code corresponding to
149 /// the given LLVM IR floating-point condition code. This includes
150 /// consideration of global floating-point math flags.
152 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
154 case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
155 case FCmpInst::FCMP_OEQ: return ISD::SETOEQ;
156 case FCmpInst::FCMP_OGT: return ISD::SETOGT;
157 case FCmpInst::FCMP_OGE: return ISD::SETOGE;
158 case FCmpInst::FCMP_OLT: return ISD::SETOLT;
159 case FCmpInst::FCMP_OLE: return ISD::SETOLE;
160 case FCmpInst::FCMP_ONE: return ISD::SETONE;
161 case FCmpInst::FCMP_ORD: return ISD::SETO;
162 case FCmpInst::FCMP_UNO: return ISD::SETUO;
163 case FCmpInst::FCMP_UEQ: return ISD::SETUEQ;
164 case FCmpInst::FCMP_UGT: return ISD::SETUGT;
165 case FCmpInst::FCMP_UGE: return ISD::SETUGE;
166 case FCmpInst::FCMP_ULT: return ISD::SETULT;
167 case FCmpInst::FCMP_ULE: return ISD::SETULE;
168 case FCmpInst::FCMP_UNE: return ISD::SETUNE;
169 case FCmpInst::FCMP_TRUE: return ISD::SETTRUE;
170 default: llvm_unreachable("Invalid FCmp predicate opcode!");
174 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
176 case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
177 case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
178 case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
179 case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
180 case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
181 case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
186 /// getICmpCondCode - Return the ISD condition code corresponding to
187 /// the given LLVM IR integer condition code.
189 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
191 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
192 case ICmpInst::ICMP_NE: return ISD::SETNE;
193 case ICmpInst::ICMP_SLE: return ISD::SETLE;
194 case ICmpInst::ICMP_ULE: return ISD::SETULE;
195 case ICmpInst::ICMP_SGE: return ISD::SETGE;
196 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
197 case ICmpInst::ICMP_SLT: return ISD::SETLT;
198 case ICmpInst::ICMP_ULT: return ISD::SETULT;
199 case ICmpInst::ICMP_SGT: return ISD::SETGT;
200 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
202 llvm_unreachable("Invalid ICmp predicate opcode!");
206 static bool isNoopBitcast(Type *T1, Type *T2,
207 const TargetLoweringBase& TLI) {
208 return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
209 (isa<VectorType>(T1) && isa<VectorType>(T2) &&
210 TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
213 /// Look through operations that will be free to find the earliest source of
216 /// @param ValLoc If V has aggegate type, we will be interested in a particular
217 /// scalar component. This records its address; the reverse of this list gives a
218 /// sequence of indices appropriate for an extractvalue to locate the important
219 /// value. This value is updated during the function and on exit will indicate
220 /// similar information for the Value returned.
222 /// @param DataBits If this function looks through truncate instructions, this
223 /// will record the smallest size attained.
224 static const Value *getNoopInput(const Value *V,
225 SmallVectorImpl<unsigned> &ValLoc,
227 const TargetLoweringBase &TLI) {
229 // Try to look through V1; if V1 is not an instruction, it can't be looked
231 const Instruction *I = dyn_cast<Instruction>(V);
232 if (!I || I->getNumOperands() == 0) return V;
233 const Value *NoopInput = nullptr;
235 Value *Op = I->getOperand(0);
236 if (isa<BitCastInst>(I)) {
237 // Look through truly no-op bitcasts.
238 if (isNoopBitcast(Op->getType(), I->getType(), TLI))
240 } else if (isa<GetElementPtrInst>(I)) {
241 // Look through getelementptr
242 if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
244 } else if (isa<IntToPtrInst>(I)) {
245 // Look through inttoptr.
246 // Make sure this isn't a truncating or extending cast. We could
247 // support this eventually, but don't bother for now.
248 if (!isa<VectorType>(I->getType()) &&
249 TLI.getPointerTy().getSizeInBits() ==
250 cast<IntegerType>(Op->getType())->getBitWidth())
252 } else if (isa<PtrToIntInst>(I)) {
253 // Look through ptrtoint.
254 // Make sure this isn't a truncating or extending cast. We could
255 // support this eventually, but don't bother for now.
256 if (!isa<VectorType>(I->getType()) &&
257 TLI.getPointerTy().getSizeInBits() ==
258 cast<IntegerType>(I->getType())->getBitWidth())
260 } else if (isa<TruncInst>(I) &&
261 TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
262 DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits());
264 } else if (isa<CallInst>(I)) {
265 // Look through call (skipping callee)
266 for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 1;
268 unsigned attrInd = i - I->op_begin() + 1;
269 if (cast<CallInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
270 isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
275 } else if (isa<InvokeInst>(I)) {
276 // Look through invoke (skipping BB, BB, Callee)
277 for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 3;
279 unsigned attrInd = i - I->op_begin() + 1;
280 if (cast<InvokeInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
281 isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
286 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
287 // Value may come from either the aggregate or the scalar
288 ArrayRef<unsigned> InsertLoc = IVI->getIndices();
289 if (std::equal(InsertLoc.rbegin(), InsertLoc.rend(),
291 // The type being inserted is a nested sub-type of the aggregate; we
292 // have to remove those initial indices to get the location we're
293 // interested in for the operand.
294 ValLoc.resize(ValLoc.size() - InsertLoc.size());
295 NoopInput = IVI->getInsertedValueOperand();
297 // The struct we're inserting into has the value we're interested in, no
298 // change of address.
301 } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
302 // The part we're interested in will inevitably be some sub-section of the
303 // previous aggregate. Combine the two paths to obtain the true address of
305 ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
306 std::copy(ExtractLoc.rbegin(), ExtractLoc.rend(),
307 std::back_inserter(ValLoc));
310 // Terminate if we couldn't find anything to look through.
318 /// Return true if this scalar return value only has bits discarded on its path
319 /// from the "tail call" to the "ret". This includes the obvious noop
320 /// instructions handled by getNoopInput above as well as free truncations (or
321 /// extensions prior to the call).
322 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
323 SmallVectorImpl<unsigned> &RetIndices,
324 SmallVectorImpl<unsigned> &CallIndices,
325 bool AllowDifferingSizes,
326 const TargetLoweringBase &TLI) {
328 // Trace the sub-value needed by the return value as far back up the graph as
329 // possible, in the hope that it will intersect with the value produced by the
330 // call. In the simple case with no "returned" attribute, the hope is actually
331 // that we end up back at the tail call instruction itself.
332 unsigned BitsRequired = UINT_MAX;
333 RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI);
335 // If this slot in the value returned is undef, it doesn't matter what the
336 // call puts there, it'll be fine.
337 if (isa<UndefValue>(RetVal))
340 // Now do a similar search up through the graph to find where the value
341 // actually returned by the "tail call" comes from. In the simple case without
342 // a "returned" attribute, the search will be blocked immediately and the loop
344 unsigned BitsProvided = UINT_MAX;
345 CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI);
347 // There's no hope if we can't actually trace them to (the same part of!) the
349 if (CallVal != RetVal || CallIndices != RetIndices)
352 // However, intervening truncates may have made the call non-tail. Make sure
353 // all the bits that are needed by the "ret" have been provided by the "tail
354 // call". FIXME: with sufficiently cunning bit-tracking, we could look through
356 if (BitsProvided < BitsRequired ||
357 (!AllowDifferingSizes && BitsProvided != BitsRequired))
363 /// For an aggregate type, determine whether a given index is within bounds or
365 static bool indexReallyValid(CompositeType *T, unsigned Idx) {
366 if (ArrayType *AT = dyn_cast<ArrayType>(T))
367 return Idx < AT->getNumElements();
369 return Idx < cast<StructType>(T)->getNumElements();
372 /// Move the given iterators to the next leaf type in depth first traversal.
374 /// Performs a depth-first traversal of the type as specified by its arguments,
375 /// stopping at the next leaf node (which may be a legitimate scalar type or an
376 /// empty struct or array).
378 /// @param SubTypes List of the partial components making up the type from
379 /// outermost to innermost non-empty aggregate. The element currently
380 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
382 /// @param Path Set of extractvalue indices leading from the outermost type
383 /// (SubTypes[0]) to the leaf node currently represented.
385 /// @returns true if a new type was found, false otherwise. Calling this
386 /// function again on a finished iterator will repeatedly return
387 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
388 /// aggregate or a non-aggregate
389 static bool advanceToNextLeafType(SmallVectorImpl<CompositeType *> &SubTypes,
390 SmallVectorImpl<unsigned> &Path) {
391 // First march back up the tree until we can successfully increment one of the
392 // coordinates in Path.
393 while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
398 // If we reached the top, then the iterator is done.
402 // We know there's *some* valid leaf now, so march back down the tree picking
403 // out the left-most element at each node.
405 Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back());
406 while (DeeperType->isAggregateType()) {
407 CompositeType *CT = cast<CompositeType>(DeeperType);
408 if (!indexReallyValid(CT, 0))
411 SubTypes.push_back(CT);
414 DeeperType = CT->getTypeAtIndex(0U);
420 /// Find the first non-empty, scalar-like type in Next and setup the iterator
423 /// Assuming Next is an aggregate of some kind, this function will traverse the
424 /// tree from left to right (i.e. depth-first) looking for the first
425 /// non-aggregate type which will play a role in function return.
427 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
428 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
429 /// i32 in that type.
430 static bool firstRealType(Type *Next,
431 SmallVectorImpl<CompositeType *> &SubTypes,
432 SmallVectorImpl<unsigned> &Path) {
433 // First initialise the iterator components to the first "leaf" node
434 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
435 // despite nominally being an aggregate).
436 while (Next->isAggregateType() &&
437 indexReallyValid(cast<CompositeType>(Next), 0)) {
438 SubTypes.push_back(cast<CompositeType>(Next));
440 Next = cast<CompositeType>(Next)->getTypeAtIndex(0U);
443 // If there's no Path now, Next was originally scalar already (or empty
444 // leaf). We're done.
448 // Otherwise, use normal iteration to keep looking through the tree until we
449 // find a non-aggregate type.
450 while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()) {
451 if (!advanceToNextLeafType(SubTypes, Path))
458 /// Set the iterator data-structures to the next non-empty, non-aggregate
460 static bool nextRealType(SmallVectorImpl<CompositeType *> &SubTypes,
461 SmallVectorImpl<unsigned> &Path) {
463 if (!advanceToNextLeafType(SubTypes, Path))
466 assert(!Path.empty() && "found a leaf but didn't set the path?");
467 } while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType());
473 /// Test if the given instruction is in a position to be optimized
474 /// with a tail-call. This roughly means that it's in a block with
475 /// a return and there's nothing that needs to be scheduled
476 /// between it and the return.
478 /// This function only tests target-independent requirements.
479 bool llvm::isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM) {
480 const Instruction *I = CS.getInstruction();
481 const BasicBlock *ExitBB = I->getParent();
482 const TerminatorInst *Term = ExitBB->getTerminator();
483 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
485 // The block must end in a return statement or unreachable.
487 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
488 // an unreachable, for now. The way tailcall optimization is currently
489 // implemented means it will add an epilogue followed by a jump. That is
490 // not profitable. Also, if the callee is a special function (e.g.
491 // longjmp on x86), it can end up causing miscompilation that has not
492 // been fully understood.
494 (!TM.Options.GuaranteedTailCallOpt || !isa<UnreachableInst>(Term)))
497 // If I will have a chain, make sure no other instruction that will have a
498 // chain interposes between I and the return.
499 if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
500 !isSafeToSpeculativelyExecute(I))
501 for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
504 // Debug info intrinsics do not get in the way of tail call optimization.
505 if (isa<DbgInfoIntrinsic>(BBI))
507 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
508 !isSafeToSpeculativelyExecute(BBI))
512 return returnTypeIsEligibleForTailCall(ExitBB->getParent(), I, Ret,
513 *TM.getTargetLowering());
516 bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
517 const Instruction *I,
518 const ReturnInst *Ret,
519 const TargetLoweringBase &TLI) {
520 // If the block ends with a void return or unreachable, it doesn't matter
521 // what the call's return type is.
522 if (!Ret || Ret->getNumOperands() == 0) return true;
524 // If the return value is undef, it doesn't matter what the call's
526 if (isa<UndefValue>(Ret->getOperand(0))) return true;
528 // Make sure the attributes attached to each return are compatible.
529 AttrBuilder CallerAttrs(F->getAttributes(),
530 AttributeSet::ReturnIndex);
531 AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
532 AttributeSet::ReturnIndex);
534 // Noalias is completely benign as far as calling convention goes, it
535 // shouldn't affect whether the call is a tail call.
536 CallerAttrs = CallerAttrs.removeAttribute(Attribute::NoAlias);
537 CalleeAttrs = CalleeAttrs.removeAttribute(Attribute::NoAlias);
539 bool AllowDifferingSizes = true;
540 if (CallerAttrs.contains(Attribute::ZExt)) {
541 if (!CalleeAttrs.contains(Attribute::ZExt))
544 AllowDifferingSizes = false;
545 CallerAttrs.removeAttribute(Attribute::ZExt);
546 CalleeAttrs.removeAttribute(Attribute::ZExt);
547 } else if (CallerAttrs.contains(Attribute::SExt)) {
548 if (!CalleeAttrs.contains(Attribute::SExt))
551 AllowDifferingSizes = false;
552 CallerAttrs.removeAttribute(Attribute::SExt);
553 CalleeAttrs.removeAttribute(Attribute::SExt);
556 // If they're still different, there's some facet we don't understand
557 // (currently only "inreg", but in future who knows). It may be OK but the
558 // only safe option is to reject the tail call.
559 if (CallerAttrs != CalleeAttrs)
562 const Value *RetVal = Ret->getOperand(0), *CallVal = I;
563 SmallVector<unsigned, 4> RetPath, CallPath;
564 SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes;
566 bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
567 bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
569 // Nothing's actually returned, it doesn't matter what the callee put there
570 // it's a valid tail call.
574 // Iterate pairwise through each of the value types making up the tail call
575 // and the corresponding return. For each one we want to know whether it's
576 // essentially going directly from the tail call to the ret, via operations
577 // that end up not generating any code.
579 // We allow a certain amount of covariance here. For example it's permitted
580 // for the tail call to define more bits than the ret actually cares about
581 // (e.g. via a truncate).
584 // We've exhausted the values produced by the tail call instruction, the
585 // rest are essentially undef. The type doesn't really matter, but we need
587 Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back());
588 CallVal = UndefValue::get(SlotType);
591 // The manipulations performed when we're looking through an insertvalue or
592 // an extractvalue would happen at the front of the RetPath list, so since
593 // we have to copy it anyway it's more efficient to create a reversed copy.
595 SmallVector<unsigned, 4> TmpRetPath, TmpCallPath;
596 copy(RetPath.rbegin(), RetPath.rend(), std::back_inserter(TmpRetPath));
597 copy(CallPath.rbegin(), CallPath.rend(), std::back_inserter(TmpCallPath));
599 // Finally, we can check whether the value produced by the tail call at this
600 // index is compatible with the value we return.
601 if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
602 AllowDifferingSizes, TLI))
605 CallEmpty = !nextRealType(CallSubTypes, CallPath);
606 } while(nextRealType(RetSubTypes, RetPath));
611 bool llvm::canBeOmittedFromSymbolTable(const GlobalValue *GV) {
612 if (!GV->hasLinkOnceODRLinkage())
615 if (GV->hasUnnamedAddr())
618 // If it is a non constant variable, it needs to be uniqued across shared
620 if (const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV)) {
621 if (!Var->isConstant())
625 // An alias can point to a variable. We could try to resolve the alias to
626 // decide, but for now just don't hide them.
627 if (isa<GlobalAlias>(GV))
631 if (GlobalStatus::analyzeGlobal(GV, GS))
634 return !GS.IsCompared;