X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FAnalysis.cpp;h=98d4c8afc7b98b0c2ce53e390f1b0c1669e0957b;hb=5e6c2d403c3fb1a635bd4859ce1510c4bd917255;hp=8e11fe1c9cf4ca4042e47d44bba7e6f448b70fb9;hpb=31fbd9f7b00356d2128b644b99dc8f9d7f32e976;p=oota-llvm.git diff --git a/lib/CodeGen/Analysis.cpp b/lib/CodeGen/Analysis.cpp index 8e11fe1c9cf..98d4c8afc7b 100644 --- a/lib/CodeGen/Analysis.cpp +++ b/lib/CodeGen/Analysis.cpp @@ -81,27 +81,27 @@ unsigned llvm::ComputeLinearIndex(Type *Ty, /// If Offsets is non-null, it points to a vector to be filled in /// with the in-memory offsets of each of the individual values. /// -void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty, - SmallVectorImpl &ValueVTs, +void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, + Type *Ty, SmallVectorImpl &ValueVTs, SmallVectorImpl *Offsets, uint64_t StartingOffset) { // Given a struct type, recursively traverse the elements. if (StructType *STy = dyn_cast(Ty)) { - const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy); + const StructLayout *SL = DL.getStructLayout(STy); for (StructType::element_iterator EB = STy->element_begin(), EI = EB, EE = STy->element_end(); EI != EE; ++EI) - ComputeValueVTs(TLI, *EI, ValueVTs, Offsets, + ComputeValueVTs(TLI, DL, *EI, ValueVTs, Offsets, StartingOffset + SL->getElementOffset(EI - EB)); return; } // Given an array type, recursively traverse the elements. if (ArrayType *ATy = dyn_cast(Ty)) { Type *EltTy = ATy->getElementType(); - uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy); + uint64_t EltSize = DL.getTypeAllocSize(EltTy); for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) - ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets, + ComputeValueVTs(TLI, DL, EltTy, ValueVTs, Offsets, StartingOffset + i * EltSize); return; } @@ -109,7 +109,7 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty, if (Ty->isVoidTy()) return; // Base case: we can get an EVT for this LLVM IR type. - ValueVTs.push_back(TLI.getValueType(Ty)); + ValueVTs.push_back(TLI.getValueType(DL, Ty)); if (Offsets) Offsets->push_back(StartingOffset); } @@ -233,7 +233,8 @@ static bool isNoopBitcast(Type *T1, Type *T2, static const Value *getNoopInput(const Value *V, SmallVectorImpl &ValLoc, unsigned &DataBits, - const TargetLoweringBase &TLI) { + const TargetLoweringBase &TLI, + const DataLayout &DL) { while (true) { // Try to look through V1; if V1 is not an instruction, it can't be looked // through. @@ -255,16 +256,16 @@ static const Value *getNoopInput(const Value *V, // Make sure this isn't a truncating or extending cast. We could // support this eventually, but don't bother for now. if (!isa(I->getType()) && - TLI.getPointerTy().getSizeInBits() == - cast(Op->getType())->getBitWidth()) + DL.getPointerSizeInBits() == + cast(Op->getType())->getBitWidth()) NoopInput = Op; } else if (isa(I)) { // Look through ptrtoint. // Make sure this isn't a truncating or extending cast. We could // support this eventually, but don't bother for now. if (!isa(I->getType()) && - TLI.getPointerTy().getSizeInBits() == - cast(I->getType())->getBitWidth()) + DL.getPointerSizeInBits() == + cast(I->getType())->getBitWidth()) NoopInput = Op; } else if (isa(I) && TLI.allowTruncateForTailCall(Op->getType(), I->getType())) { @@ -295,8 +296,8 @@ static const Value *getNoopInput(const Value *V, } else if (const InsertValueInst *IVI = dyn_cast(V)) { // Value may come from either the aggregate or the scalar ArrayRef InsertLoc = IVI->getIndices(); - if (std::equal(InsertLoc.rbegin(), InsertLoc.rend(), - ValLoc.rbegin())) { + if (ValLoc.size() >= InsertLoc.size() && + std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) { // The type being inserted is a nested sub-type of the aggregate; we // have to remove those initial indices to get the location we're // interested in for the operand. @@ -331,14 +332,15 @@ static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal, SmallVectorImpl &RetIndices, SmallVectorImpl &CallIndices, bool AllowDifferingSizes, - const TargetLoweringBase &TLI) { + const TargetLoweringBase &TLI, + const DataLayout &DL) { // Trace the sub-value needed by the return value as far back up the graph as // possible, in the hope that it will intersect with the value produced by the // call. In the simple case with no "returned" attribute, the hope is actually // that we end up back at the tail call instruction itself. unsigned BitsRequired = UINT_MAX; - RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI); + RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL); // If this slot in the value returned is undef, it doesn't matter what the // call puts there, it'll be fine. @@ -350,7 +352,7 @@ static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal, // a "returned" attribute, the search will be blocked immediately and the loop // a Noop. unsigned BitsProvided = UINT_MAX; - CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI); + CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL); // There's no hope if we can't actually trace them to (the same part of!) the // same value. @@ -606,7 +608,8 @@ bool llvm::returnTypeIsEligibleForTailCall(const Function *F, // Finally, we can check whether the value produced by the tail call at this // index is compatible with the value we return. if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath, - AllowDifferingSizes, TLI)) + AllowDifferingSizes, TLI, + F->getParent()->getDataLayout())) return false; CallEmpty = !nextRealType(CallSubTypes, CallPath);