#include "PPCTargetMachine.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/CostTable.h"
+#include "llvm/Target/TargetLowering.h"
using namespace llvm;
// Declare the pass initialization routine locally as target-specific passes
namespace {
-class PPCTTI : public ImmutablePass, public TargetTransformInfo {
+class PPCTTI final : public ImmutablePass, public TargetTransformInfo {
const PPCTargetMachine *TM;
const PPCSubtarget *ST;
const PPCTargetLowering *TLI;
initializePPCTTIPass(*PassRegistry::getPassRegistry());
}
- virtual void initializePass() {
+ virtual void initializePass() override {
pushTTIStack(this);
}
- virtual void finalizePass() {
- popTTIStack();
- }
-
- virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
TargetTransformInfo::getAnalysisUsage(AU);
}
static char ID;
/// Provide necessary pointer adjustments for the two base classes.
- virtual void *getAdjustedAnalysisPointer(const void *ID) {
+ virtual void *getAdjustedAnalysisPointer(const void *ID) override {
if (ID == &TargetTransformInfo::ID)
return (TargetTransformInfo*)this;
return this;
/// \name Scalar TTI Implementations
/// @{
- virtual PopcntSupportKind getPopcntSupport(unsigned TyWidth) const;
+ virtual PopcntSupportKind
+ getPopcntSupport(unsigned TyWidth) const override;
+ virtual void getUnrollingPreferences(
+ Loop *L, UnrollingPreferences &UP) const override;
/// @}
/// \name Vector TTI Implementations
/// @{
- virtual unsigned getNumberOfRegisters(bool Vector) const;
- virtual unsigned getRegisterBitWidth(bool Vector) const;
- virtual unsigned getMaximumUnrollFactor() const;
- virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty) const;
+ virtual unsigned getNumberOfRegisters(bool Vector) const override;
+ virtual unsigned getRegisterBitWidth(bool Vector) const override;
+ virtual unsigned getMaximumUnrollFactor() const override;
+ virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
+ OperandValueKind,
+ OperandValueKind) const override;
virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
- int Index, Type *SubTp) const;
+ int Index, Type *SubTp) const override;
virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
- Type *Src) const;
+ Type *Src) const override;
virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
- Type *CondTy) const;
+ Type *CondTy) const override;
virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
- unsigned Index) const;
+ unsigned Index) const override;
virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src,
unsigned Alignment,
- unsigned AddressSpace) const;
+ unsigned AddressSpace) const override;
/// @}
};
PPCTTI::PopcntSupportKind PPCTTI::getPopcntSupport(unsigned TyWidth) const {
assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
- // FIXME: PPC currently does not have custom popcnt lowering even though
- // there is hardware support. Once this is fixed, update this function
- // to reflect the real capabilities of the hardware.
+ if (ST->hasPOPCNTD() && TyWidth <= 64)
+ return PSK_FastHardware;
return PSK_Software;
}
+void PPCTTI::getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const {
+ if (ST->getDarwinDirective() == PPC::DIR_A2) {
+ // The A2 is in-order with a deep pipeline, and concatenation unrolling
+ // helps expose latency-hiding opportunities to the instruction scheduler.
+ UP.Partial = UP.Runtime = true;
+ }
+}
+
unsigned PPCTTI::getNumberOfRegisters(bool Vector) const {
if (Vector && !ST->hasAltivec())
return 0;
- return 32;
+ return ST->hasVSX() ? 64 : 32;
}
unsigned PPCTTI::getRegisterBitWidth(bool Vector) const {
return 2;
}
-unsigned PPCTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty) const {
+unsigned PPCTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
+ OperandValueKind Op1Info,
+ OperandValueKind Op2Info) const {
assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
// Fallback to the default implementation.
- return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty);
+ return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info,
+ Op2Info);
}
unsigned PPCTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
unsigned Index) const {
assert(Val->isVectorTy() && "This must be a vector type");
- const unsigned Awful = 1000;
+ int ISD = TLI->InstructionOpcodeToISD(Opcode);
+ assert(ISD && "Invalid opcode");
- // Vector element insert/extract with Altivec is very expensive.
- // Until VSX is available, avoid vectorizing loops that require
- // these operations.
- if (Opcode == ISD::EXTRACT_VECTOR_ELT ||
- Opcode == ISD::INSERT_VECTOR_ELT)
- return Awful;
+ if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
+ // Double-precision scalars are already located in index #0.
+ if (Index == 0)
+ return 0;
- // We don't vectorize SREM/UREM so well. Constrain the vectorizer
- // for those as well.
- if (Opcode == ISD::SREM || Opcode == ISD::UREM)
- return Awful;
+ return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
+ }
- // VSELECT is not yet implemented, leading to use of insert/extract
- // and ISEL, hence not a good idea.
- if (Opcode == ISD::VSELECT)
- return Awful;
+ // Estimated cost of a load-hit-store delay. This was obtained
+ // experimentally as a minimum needed to prevent unprofitable
+ // vectorization for the paq8p benchmark. It may need to be
+ // raised further if other unprofitable cases remain.
+ unsigned LHSPenalty = 12;
+
+ // Vector element insert/extract with Altivec is very expensive,
+ // because they require store and reload with the attendant
+ // processor stall for load-hit-store. Until VSX is available,
+ // these need to be estimated as very costly.
+ if (ISD == ISD::EXTRACT_VECTOR_ELT ||
+ ISD == ISD::INSERT_VECTOR_ELT)
+ return LHSPenalty +
+ TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
}
// Each load/store unit costs 1.
unsigned Cost = LT.first * 1;
+ // FIXME: Update this for VSX loads/stores that support unaligned access.
+
// PPC in general does not support unaligned loads and stores. They'll need
// to be decomposed based on the alignment factor.
unsigned SrcBytes = LT.second.getStoreSize();