#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Operator.h"
-#include "llvm/InstVisitor.h"
-#include "llvm/Support/CallSite.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
friend class InstVisitor<CallAnalyzer, bool>;
// DataLayout if available, or null.
- const DataLayout *const TD;
+ const DataLayout *const DL;
/// The TargetTransformInfo available for this compilation.
const TargetTransformInfo &TTI;
bool visitUnreachableInst(UnreachableInst &I);
public:
- CallAnalyzer(const DataLayout *TD, const TargetTransformInfo &TTI,
+ CallAnalyzer(const DataLayout *DL, const TargetTransformInfo &TTI,
Function &Callee, int Threshold)
- : TD(TD), TTI(TTI), F(Callee), Threshold(Threshold), Cost(0),
+ : DL(DL), TTI(TTI), F(Callee), Threshold(Threshold), Cost(0),
IsCallerRecursive(false), IsRecursiveCall(false),
ExposesReturnsTwice(false), HasDynamicAlloca(false),
ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false),
/// Returns false if unable to compute the offset for any reason. Respects any
/// simplified values known during the analysis of this callsite.
bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
- if (!TD)
+ if (!DL)
return false;
- unsigned IntPtrWidth = TD->getPointerSizeInBits();
+ unsigned IntPtrWidth = DL->getPointerSizeInBits();
assert(IntPtrWidth == Offset.getBitWidth());
for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
unsigned ElementIdx = OpC->getZExtValue();
- const StructLayout *SL = TD->getStructLayout(STy);
+ const StructLayout *SL = DL->getStructLayout(STy);
Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
continue;
}
- APInt TypeSize(IntPtrWidth, TD->getTypeAllocSize(GTI.getIndexedType()));
+ APInt TypeSize(IntPtrWidth, DL->getTypeAllocSize(GTI.getIndexedType()));
Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
}
return true;
}
bool CallAnalyzer::visitAlloca(AllocaInst &I) {
- // FIXME: Check whether inlining will turn a dynamic alloca into a static
+ // Check whether inlining will turn a dynamic alloca into a static
// alloca, and handle that case.
+ if (I.isArrayAllocation()) {
+ if (Constant *Size = SimplifiedValues.lookup(I.getArraySize())) {
+ ConstantInt *AllocSize = dyn_cast<ConstantInt>(Size);
+ assert(AllocSize && "Allocation size not a constant int?");
+ Type *Ty = I.getAllocatedType();
+ AllocatedSize += Ty->getPrimitiveSizeInBits() * AllocSize->getZExtValue();
+ return Base::visitAlloca(I);
+ }
+ }
// Accumulate the allocated size.
if (I.isStaticAlloca()) {
Type *Ty = I.getAllocatedType();
- AllocatedSize += (TD ? TD->getTypeAllocSize(Ty) :
+ AllocatedSize += (DL ? DL->getTypeAllocSize(Ty) :
Ty->getPrimitiveSizeInBits());
}
// Try to fold GEPs of constant-offset call site argument pointers. This
// requires target data and inbounds GEPs.
- if (TD && I.isInBounds()) {
+ if (DL && I.isInBounds()) {
// Check if we have a base + offset for the pointer.
Value *Ptr = I.getPointerOperand();
std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr);
}
bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
+ const DataLayout *DL = I.getDataLayout();
// Propagate constants through ptrtoint.
Constant *COp = dyn_cast<Constant>(I.getOperand(0));
if (!COp)
// Track base/offset pairs when converted to a plain integer provided the
// integer is large enough to represent the pointer.
unsigned IntegerSize = I.getType()->getScalarSizeInBits();
- if (TD && IntegerSize >= TD->getPointerSizeInBits()) {
+ if (DL && IntegerSize >= DL->getPointerSizeInBits()) {
std::pair<Value *, APInt> BaseAndOffset
= ConstantOffsetPtrs.lookup(I.getOperand(0));
if (BaseAndOffset.first)
}
bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
+ const DataLayout *DL = I.getDataLayout();
// Propagate constants through ptrtoint.
Constant *COp = dyn_cast<Constant>(I.getOperand(0));
if (!COp)
// modifications provided the integer is not too large.
Value *Op = I.getOperand(0);
unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
- if (TD && IntegerSize <= TD->getPointerSizeInBits()) {
+ if (DL && IntegerSize <= DL->getPointerSizeInBits()) {
std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
if (BaseAndOffset.first)
ConstantOffsetPtrs[&I] = BaseAndOffset;
COp = SimplifiedValues.lookup(Operand);
if (COp)
if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(),
- COp, TD)) {
+ COp, DL)) {
SimplifiedValues[&I] = C;
return true;
}
// a common base.
Value *LHSBase, *RHSBase;
APInt LHSOffset, RHSOffset;
- llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
+ std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
if (LHSBase) {
- llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
+ std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
if (RHSBase && LHSBase == RHSBase) {
// We have common bases, fold the icmp to a constant based on the
// offsets.
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
Value *LHSBase, *RHSBase;
APInt LHSOffset, RHSOffset;
- llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
+ std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
if (LHSBase) {
- llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
+ std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
if (RHSBase && LHSBase == RHSBase) {
// We have common bases, fold the subtract to a constant based on the
// offsets.
if (!isa<Constant>(RHS))
if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
RHS = SimpleRHS;
- Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, TD);
+ Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL);
if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) {
SimplifiedValues[&I] = C;
return true;
return false;
}
if (CS.isCall() &&
- cast<CallInst>(CS.getInstruction())->hasFnAttr(Attribute::NoDuplicate))
+ cast<CallInst>(CS.getInstruction())->cannotDuplicate())
ContainsNoDuplicateCall = true;
if (Function *F = CS.getCalledFunction()) {
// during devirtualization and so we want to give it a hefty bonus for
// inlining, but cap that bonus in the event that inlining wouldn't pan
// out. Pretend to inline the function, with a custom threshold.
- CallAnalyzer CA(TD, TTI, *F, InlineConstants::IndirectCallThreshold);
+ CallAnalyzer CA(DL, TTI, *F, InlineConstants::IndirectCallThreshold);
if (CA.analyzeCall(CS)) {
// We were able to inline the indirect call! Subtract the cost from the
// bonus we want to apply, but don't go below zero.
/// returns 0 if V is not a pointer, and returns the constant '0' if there are
/// no constant offsets applied.
ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
- if (!TD || !V->getType()->isPointerTy())
+ if (!DL || !V->getType()->isPointerTy())
return 0;
- unsigned IntPtrWidth = TD->getPointerSizeInBits();
+ unsigned IntPtrWidth = DL->getPointerSizeInBits();
APInt Offset = APInt::getNullValue(IntPtrWidth);
// Even though we don't look through PHI nodes, we could be called on an
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
} while (Visited.insert(V));
- Type *IntPtrTy = TD->getIntPtrType(V->getContext());
+ Type *IntPtrTy = DL->getIntPtrType(V->getContext());
return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
}
// Give out bonuses per argument, as the instructions setting them up will
// be gone after inlining.
for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
- if (TD && CS.isByValArgument(I)) {
+ if (DL && CS.isByValArgument(I)) {
// We approximate the number of loads and stores needed by dividing the
// size of the byval type by the target's pointer size.
PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
- unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType());
- unsigned PointerSize = TD->getPointerSizeInBits();
+ unsigned TypeSize = DL->getTypeSizeInBits(PTy->getElementType());
+ unsigned PointerSize = DL->getPointerSizeInBits();
// Ceiling division.
unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
Function *Caller = CS.getInstruction()->getParent()->getParent();
// Check if the caller function is recursive itself.
- for (Value::use_iterator U = Caller->use_begin(), E = Caller->use_end();
- U != E; ++U) {
- CallSite Site(cast<Value>(*U));
+ for (User *U : Caller->users()) {
+ CallSite Site(U);
if (!Site)
continue;
Instruction *I = Site.getInstruction();
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// \brief Dump stats about this call's analysis.
void CallAnalyzer::dump() {
-#define DEBUG_PRINT_STAT(x) llvm::dbgs() << " " #x ": " << x << "\n"
+#define DEBUG_PRINT_STAT(x) dbgs() << " " #x ": " << x << "\n"
DEBUG_PRINT_STAT(NumConstantArgs);
DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
DEBUG_PRINT_STAT(NumAllocaArgs);
char InlineCostAnalysis::ID = 0;
-InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID), TD(0) {}
+InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID) {}
InlineCostAnalysis::~InlineCostAnalysis() {}
}
bool InlineCostAnalysis::runOnSCC(CallGraphSCC &SCC) {
- TD = getAnalysisIfAvailable<DataLayout>();
TTI = &getAnalysis<TargetTransformInfo>();
return false;
}
DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
<< "...\n");
- CallAnalyzer CA(TD, *TTI, *Callee, Threshold);
+ CallAnalyzer CA(Callee->getDataLayout(), *TTI, *Callee, Threshold);
bool ShouldInline = CA.analyzeCall(CS);
DEBUG(CA.dump());
F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
Attribute::ReturnsTwice);
for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
- // Disallow inlining of functions which contain an indirect branch.
- if (isa<IndirectBrInst>(BI->getTerminator()))
+ // Disallow inlining of functions which contain an indirect branch,
+ // unless the always_inline attribute is set.
+ // The attribute serves as a assertion that no local address
+ // like a block label can escpape the function.
+ // Revisit enabling inlining for functions with indirect branches
+ // when a more sophisticated espape/points-to analysis becomes available.
+ if (isa<IndirectBrInst>(BI->getTerminator()) &&
+ !F.hasFnAttribute(Attribute::AlwaysInline))
return false;
for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;