1 //===- TargetTransformInfo.h ------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This pass exposes codegen information to IR-level passes. Every
11 /// transformation that uses codegen information is broken into three parts:
12 /// 1. The IR-level analysis pass.
13 /// 2. The IR-level transformation interface which provides the needed
15 /// 3. Codegen-level implementation which uses target-specific hooks.
17 /// This file defines #2, which is the interface that IR-level transformations
18 /// use for querying the codegen.
20 //===----------------------------------------------------------------------===//
22 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
23 #define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
25 #include "llvm/ADT/Optional.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/Intrinsics.h"
28 #include "llvm/Pass.h"
29 #include "llvm/Support/DataTypes.h"
37 class PreservedAnalyses;
42 /// \brief Information about a load/store intrinsic defined by the target.
43 struct MemIntrinsicInfo {
45 : ReadMem(false), WriteMem(false), IsSimple(false), MatchingId(0),
46 NumMemRefs(0), PtrVal(nullptr) {}
49 /// True only if this memory operation is non-volatile, non-atomic, and
50 /// unordered. (See LoadInst/StoreInst for details on each)
52 // Same Id is set by the target for corresponding load/store intrinsics.
53 unsigned short MatchingId;
58 /// \brief This pass provides access to the codegen interfaces that are needed
59 /// for IR-level transformations.
60 class TargetTransformInfo {
62 /// \brief Construct a TTI object using a type implementing the \c Concept
65 /// This is used by targets to construct a TTI wrapping their target-specific
66 /// implementaion that encodes appropriate costs for their target.
67 template <typename T> TargetTransformInfo(T Impl);
69 /// \brief Construct a baseline TTI object using a minimal implementation of
70 /// the \c Concept API below.
72 /// The TTI implementation will reflect the information in the DataLayout
73 /// provided if non-null.
74 explicit TargetTransformInfo(const DataLayout &DL);
76 // Provide move semantics.
77 TargetTransformInfo(TargetTransformInfo &&Arg);
78 TargetTransformInfo &operator=(TargetTransformInfo &&RHS);
80 // We need to define the destructor out-of-line to define our sub-classes
82 ~TargetTransformInfo();
84 /// \brief Handle the invalidation of this information.
86 /// When used as a result of \c TargetIRAnalysis this method will be called
87 /// when the function this was computed for changes. When it returns false,
88 /// the information is preserved across those changes.
89 bool invalidate(Function &, const PreservedAnalyses &) {
90 // FIXME: We should probably in some way ensure that the subtarget
91 // information for a function hasn't changed.
95 /// \name Generic Target Information
98 /// \brief Underlying constants for 'cost' values in this interface.
100 /// Many APIs in this interface return a cost. This enum defines the
101 /// fundamental values that should be used to interpret (and produce) those
102 /// costs. The costs are returned as an int rather than a member of this
103 /// enumeration because it is expected that the cost of one IR instruction
104 /// may have a multiplicative factor to it or otherwise won't fit directly
105 /// into the enum. Moreover, it is common to sum or average costs which works
106 /// better as simple integral values. Thus this enum only provides constants.
107 /// Also note that the returned costs are signed integers to make it natural
108 /// to add, subtract, and test with zero (a common boundary condition). It is
109 /// not expected that 2^32 is a realistic cost to be modeling at any point.
111 /// Note that these costs should usually reflect the intersection of code-size
112 /// cost and execution cost. A free instruction is typically one that folds
113 /// into another instruction. For example, reg-to-reg moves can often be
114 /// skipped by renaming the registers in the CPU, but they still are encoded
115 /// and thus wouldn't be considered 'free' here.
116 enum TargetCostConstants {
117 TCC_Free = 0, ///< Expected to fold away in lowering.
118 TCC_Basic = 1, ///< The cost of a typical 'add' instruction.
119 TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86.
122 /// \brief Estimate the cost of a specific operation when lowered.
124 /// Note that this is designed to work on an arbitrary synthetic opcode, and
125 /// thus work for hypothetical queries before an instruction has even been
126 /// formed. However, this does *not* work for GEPs, and must not be called
127 /// for a GEP instruction. Instead, use the dedicated getGEPCost interface as
128 /// analyzing a GEP's cost required more information.
130 /// Typically only the result type is required, and the operand type can be
131 /// omitted. However, if the opcode is one of the cast instructions, the
132 /// operand type is required.
134 /// The returned cost is defined in terms of \c TargetCostConstants, see its
135 /// comments for a detailed explanation of the cost values.
136 int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy = nullptr) const;
138 /// \brief Estimate the cost of a GEP operation when lowered.
140 /// The contract for this function is the same as \c getOperationCost except
141 /// that it supports an interface that provides extra information specific to
142 /// the GEP operation.
143 int getGEPCost(Type *PointeeType, const Value *Ptr,
144 ArrayRef<const Value *> Operands) const;
146 /// \brief Estimate the cost of a function call when lowered.
148 /// The contract for this is the same as \c getOperationCost except that it
149 /// supports an interface that provides extra information specific to call
152 /// This is the most basic query for estimating call cost: it only knows the
153 /// function type and (potentially) the number of arguments at the call site.
154 /// The latter is only interesting for varargs function types.
155 int getCallCost(FunctionType *FTy, int NumArgs = -1) const;
157 /// \brief Estimate the cost of calling a specific function when lowered.
159 /// This overload adds the ability to reason about the particular function
160 /// being called in the event it is a library call with special lowering.
161 int getCallCost(const Function *F, int NumArgs = -1) const;
163 /// \brief Estimate the cost of calling a specific function when lowered.
165 /// This overload allows specifying a set of candidate argument values.
166 int getCallCost(const Function *F, ArrayRef<const Value *> Arguments) const;
168 /// \brief Estimate the cost of an intrinsic when lowered.
170 /// Mirrors the \c getCallCost method but uses an intrinsic identifier.
171 int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
172 ArrayRef<Type *> ParamTys) const;
174 /// \brief Estimate the cost of an intrinsic when lowered.
176 /// Mirrors the \c getCallCost method but uses an intrinsic identifier.
177 int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
178 ArrayRef<const Value *> Arguments) const;
180 /// \brief Estimate the cost of a given IR user when lowered.
182 /// This can estimate the cost of either a ConstantExpr or Instruction when
183 /// lowered. It has two primary advantages over the \c getOperationCost and
184 /// \c getGEPCost above, and one significant disadvantage: it can only be
185 /// used when the IR construct has already been formed.
187 /// The advantages are that it can inspect the SSA use graph to reason more
188 /// accurately about the cost. For example, all-constant-GEPs can often be
189 /// folded into a load or other instruction, but if they are used in some
190 /// other context they may not be folded. This routine can distinguish such
193 /// The returned cost is defined in terms of \c TargetCostConstants, see its
194 /// comments for a detailed explanation of the cost values.
195 int getUserCost(const User *U) const;
197 /// \brief Return true if branch divergence exists.
199 /// Branch divergence has a significantly negative impact on GPU performance
200 /// when threads in the same wavefront take different paths due to conditional
202 bool hasBranchDivergence() const;
204 /// \brief Returns whether V is a source of divergence.
206 /// This function provides the target-dependent information for
207 /// the target-independent DivergenceAnalysis. DivergenceAnalysis first
208 /// builds the dependency graph, and then runs the reachability algorithm
209 /// starting with the sources of divergence.
210 bool isSourceOfDivergence(const Value *V) const;
212 /// \brief Test whether calls to a function lower to actual program function
215 /// The idea is to test whether the program is likely to require a 'call'
216 /// instruction or equivalent in order to call the given function.
218 /// FIXME: It's not clear that this is a good or useful query API. Client's
219 /// should probably move to simpler cost metrics using the above.
220 /// Alternatively, we could split the cost interface into distinct code-size
221 /// and execution-speed costs. This would allow modelling the core of this
222 /// query more accurately as a call is a single small instruction, but
223 /// incurs significant execution cost.
224 bool isLoweredToCall(const Function *F) const;
226 /// Parameters that control the generic loop unrolling transformation.
227 struct UnrollingPreferences {
228 /// The cost threshold for the unrolled loop. Should be relative to the
229 /// getUserCost values returned by this API, and the expectation is that
230 /// the unrolled loop's instructions when run through that interface should
231 /// not exceed this cost. However, this is only an estimate. Also, specific
232 /// loops may be unrolled even with a cost above this threshold if deemed
233 /// profitable. Set this to UINT_MAX to disable the loop body cost
236 /// If complete unrolling will reduce the cost of the loop below its
237 /// expected dynamic cost while rolled by this percentage, apply a discount
238 /// (below) to its unrolled cost.
239 unsigned PercentDynamicCostSavedThreshold;
240 /// The discount applied to the unrolled cost when the *dynamic* cost
241 /// savings of unrolling exceed the \c PercentDynamicCostSavedThreshold.
242 unsigned DynamicCostSavingsDiscount;
243 /// The cost threshold for the unrolled loop when optimizing for size (set
244 /// to UINT_MAX to disable).
245 unsigned OptSizeThreshold;
246 /// The cost threshold for the unrolled loop, like Threshold, but used
247 /// for partial/runtime unrolling (set to UINT_MAX to disable).
248 unsigned PartialThreshold;
249 /// The cost threshold for the unrolled loop when optimizing for size, like
250 /// OptSizeThreshold, but used for partial/runtime unrolling (set to
251 /// UINT_MAX to disable).
252 unsigned PartialOptSizeThreshold;
253 /// A forced unrolling factor (the number of concatenated bodies of the
254 /// original loop in the unrolled loop body). When set to 0, the unrolling
255 /// transformation will select an unrolling factor based on the current cost
256 /// threshold and other factors.
258 // Set the maximum unrolling factor. The unrolling factor may be selected
259 // using the appropriate cost threshold, but may not exceed this number
260 // (set to UINT_MAX to disable). This does not apply in cases where the
261 // loop is being fully unrolled.
263 /// Allow partial unrolling (unrolling of loops to expand the size of the
264 /// loop body, not only to eliminate small constant-trip-count loops).
266 /// Allow runtime unrolling (unrolling of loops to expand the size of the
267 /// loop body even when the number of loop iterations is not known at
270 /// Allow emitting expensive instructions (such as divisions) when computing
271 /// the trip count of a loop for runtime unrolling.
272 bool AllowExpensiveTripCount;
275 /// \brief Get target-customized preferences for the generic loop unrolling
276 /// transformation. The caller will initialize UP with the current
277 /// target-independent defaults.
278 void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const;
282 /// \name Scalar Target Information
285 /// \brief Flags indicating the kind of support for population count.
287 /// Compared to the SW implementation, HW support is supposed to
288 /// significantly boost the performance when the population is dense, and it
289 /// may or may not degrade performance if the population is sparse. A HW
290 /// support is considered as "Fast" if it can outperform, or is on a par
291 /// with, SW implementation when the population is sparse; otherwise, it is
292 /// considered as "Slow".
293 enum PopcntSupportKind { PSK_Software, PSK_SlowHardware, PSK_FastHardware };
295 /// \brief Return true if the specified immediate is legal add immediate, that
296 /// is the target has add instructions which can add a register with the
297 /// immediate without having to materialize the immediate into a register.
298 bool isLegalAddImmediate(int64_t Imm) const;
300 /// \brief Return true if the specified immediate is legal icmp immediate,
301 /// that is the target has icmp instructions which can compare a register
302 /// against the immediate without having to materialize the immediate into a
304 bool isLegalICmpImmediate(int64_t Imm) const;
306 /// \brief Return true if the addressing mode represented by AM is legal for
307 /// this target, for a load/store of the specified type.
308 /// The type may be VoidTy, in which case only return true if the addressing
309 /// mode is legal for a load/store of any legal type.
310 /// TODO: Handle pre/postinc as well.
311 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
312 bool HasBaseReg, int64_t Scale,
313 unsigned AddrSpace = 0) const;
315 /// \brief Return true if the target supports masked load/store
316 /// AVX2 and AVX-512 targets allow masks for consecutive load and store for
317 /// 32 and 64 bit elements.
318 bool isLegalMaskedStore(Type *DataType) const;
319 bool isLegalMaskedLoad(Type *DataType) const;
321 /// \brief Return true if the target supports masked gather/scatter
322 /// AVX-512 fully supports gather and scatter for vectors with 32 and 64
323 /// bits scalar type.
324 bool isLegalMaskedScatter(Type *DataType) const;
325 bool isLegalMaskedGather(Type *DataType) const;
327 /// \brief Return the cost of the scaling factor used in the addressing
328 /// mode represented by AM for this target, for a load/store
329 /// of the specified type.
330 /// If the AM is supported, the return value must be >= 0.
331 /// If the AM is not supported, it returns a negative value.
332 /// TODO: Handle pre/postinc as well.
333 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
334 bool HasBaseReg, int64_t Scale,
335 unsigned AddrSpace = 0) const;
337 /// \brief Return true if it's free to truncate a value of type Ty1 to type
338 /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
339 /// by referencing its sub-register AX.
340 bool isTruncateFree(Type *Ty1, Type *Ty2) const;
342 /// \brief Return true if it is profitable to hoist instruction in the
343 /// then/else to before if.
344 bool isProfitableToHoist(Instruction *I) const;
346 /// \brief Return true if this type is legal.
347 bool isTypeLegal(Type *Ty) const;
349 /// \brief Returns the target's jmp_buf alignment in bytes.
350 unsigned getJumpBufAlignment() const;
352 /// \brief Returns the target's jmp_buf size in bytes.
353 unsigned getJumpBufSize() const;
355 /// \brief Return true if switches should be turned into lookup tables for the
357 bool shouldBuildLookupTables() const;
359 /// \brief Don't restrict interleaved unrolling to small loops.
360 bool enableAggressiveInterleaving(bool LoopHasReductions) const;
362 /// \brief Enable matching of interleaved access groups.
363 bool enableInterleavedAccessVectorization() const;
365 /// \brief Return hardware support for population count.
366 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
368 /// \brief Return true if the hardware has a fast square-root instruction.
369 bool haveFastSqrt(Type *Ty) const;
371 /// \brief Return the expected cost of supporting the floating point operation
372 /// of the specified type.
373 int getFPOpCost(Type *Ty) const;
375 /// \brief Return the expected cost of materializing for the given integer
376 /// immediate of the specified type.
377 int getIntImmCost(const APInt &Imm, Type *Ty) const;
379 /// \brief Return the expected cost of materialization for the given integer
380 /// immediate of the specified type for a given instruction. The cost can be
381 /// zero if the immediate can be folded into the specified instruction.
382 int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
384 int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
388 /// \name Vector Target Information
391 /// \brief The various kinds of shuffle patterns for vector queries.
393 SK_Broadcast, ///< Broadcast element 0 to all other elements.
394 SK_Reverse, ///< Reverse the order of the vector.
395 SK_Alternate, ///< Choose alternate elements from vector.
396 SK_InsertSubvector, ///< InsertSubvector. Index indicates start offset.
397 SK_ExtractSubvector ///< ExtractSubvector Index indicates start offset.
400 /// \brief Additional information about an operand's possible values.
401 enum OperandValueKind {
402 OK_AnyValue, // Operand can have any value.
403 OK_UniformValue, // Operand is uniform (splat of a value).
404 OK_UniformConstantValue, // Operand is uniform constant.
405 OK_NonUniformConstantValue // Operand is a non uniform constant value.
408 /// \brief Additional properties of an operand's values.
409 enum OperandValueProperties { OP_None = 0, OP_PowerOf2 = 1 };
411 /// \return The number of scalar or vector registers that the target has.
412 /// If 'Vectors' is true, it returns the number of vector registers. If it is
413 /// set to false, it returns the number of scalar registers.
414 unsigned getNumberOfRegisters(bool Vector) const;
416 /// \return The width of the largest scalar or vector register type.
417 unsigned getRegisterBitWidth(bool Vector) const;
419 /// \return The maximum interleave factor that any transform should try to
420 /// perform for this target. This number depends on the level of parallelism
421 /// and the number of execution units in the CPU.
422 unsigned getMaxInterleaveFactor(unsigned VF) const;
424 /// \return The expected cost of arithmetic ops, such as mul, xor, fsub, etc.
425 int getArithmeticInstrCost(
426 unsigned Opcode, Type *Ty, OperandValueKind Opd1Info = OK_AnyValue,
427 OperandValueKind Opd2Info = OK_AnyValue,
428 OperandValueProperties Opd1PropInfo = OP_None,
429 OperandValueProperties Opd2PropInfo = OP_None) const;
431 /// \return The cost of a shuffle instruction of kind Kind and of type Tp.
432 /// The index and subtype parameters are used by the subvector insertion and
433 /// extraction shuffle kinds.
434 int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index = 0,
435 Type *SubTp = nullptr) const;
437 /// \return The expected cost of cast instructions, such as bitcast, trunc,
439 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const;
441 /// \return The expected cost of control-flow related instructions such as
443 int getCFInstrCost(unsigned Opcode) const;
445 /// \returns The expected cost of compare and select instructions.
446 int getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
447 Type *CondTy = nullptr) const;
449 /// \return The expected cost of vector Insert and Extract.
450 /// Use -1 to indicate that there is no information on the index value.
451 int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index = -1) const;
453 /// \return The cost of Load and Store instructions.
454 int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
455 unsigned AddressSpace) const;
457 /// \return The cost of masked Load and Store instructions.
458 int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
459 unsigned AddressSpace) const;
461 /// \return The cost of Gather or Scatter operation
462 /// \p Opcode - is a type of memory access Load or Store
463 /// \p DataTy - a vector type of the data to be loaded or stored
464 /// \p Ptr - pointer [or vector of pointers] - address[es] in memory
465 /// \p VariableMask - true when the memory access is predicated with a mask
466 /// that is not a compile-time constant
467 /// \p Alignment - alignment of single element
468 int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr,
469 bool VariableMask, unsigned Alignment) const;
471 /// \return The cost of the interleaved memory operation.
472 /// \p Opcode is the memory operation code
473 /// \p VecTy is the vector type of the interleaved access.
474 /// \p Factor is the interleave factor
475 /// \p Indices is the indices for interleaved load members (as interleaved
476 /// load allows gaps)
477 /// \p Alignment is the alignment of the memory operation
478 /// \p AddressSpace is address space of the pointer.
479 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
480 ArrayRef<unsigned> Indices, unsigned Alignment,
481 unsigned AddressSpace) const;
483 /// \brief Calculate the cost of performing a vector reduction.
485 /// This is the cost of reducing the vector value of type \p Ty to a scalar
486 /// value using the operation denoted by \p Opcode. The form of the reduction
487 /// can either be a pairwise reduction or a reduction that splits the vector
488 /// at every reduction level.
492 /// ((v0+v1), (v2, v3), undef, undef)
495 /// ((v0+v2), (v1+v3), undef, undef)
496 int getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwiseForm) const;
498 /// \returns The cost of Intrinsic instructions. Types analysis only.
499 int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
500 ArrayRef<Type *> Tys) const;
502 /// \returns The cost of Intrinsic instructions. Analyses the real arguments.
503 int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
504 ArrayRef<Value *> Args) const;
506 /// \returns The cost of Call instructions.
507 int getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) const;
509 /// \returns The number of pieces into which the provided type must be
510 /// split during legalization. Zero is returned when the answer is unknown.
511 unsigned getNumberOfParts(Type *Tp) const;
513 /// \returns The cost of the address computation. For most targets this can be
514 /// merged into the instruction indexing mode. Some targets might want to
515 /// distinguish between address computation for memory operations on vector
516 /// types and scalar types. Such targets should override this function.
517 /// The 'IsComplex' parameter is a hint that the address computation is likely
518 /// to involve multiple instructions and as such unlikely to be merged into
519 /// the address indexing mode.
520 int getAddressComputationCost(Type *Ty, bool IsComplex = false) const;
522 /// \returns The cost, if any, of keeping values of the given types alive
525 /// Some types may require the use of register classes that do not have
526 /// any callee-saved registers, so would require a spill and fill.
527 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
529 /// \returns True if the intrinsic is a supported memory intrinsic. Info
530 /// will contain additional information - whether the intrinsic may write
531 /// or read to memory, volatility and the pointer. Info is undefined
532 /// if false is returned.
533 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
535 /// \returns A value which is the result of the given memory intrinsic. New
536 /// instructions may be created to extract the result from the given intrinsic
537 /// memory operation. Returns nullptr if the target cannot create a result
538 /// from the given intrinsic.
539 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
540 Type *ExpectedType) const;
542 /// \returns True if the two functions have compatible attributes for inlining
544 bool areInlineCompatible(const Function *Caller,
545 const Function *Callee) const;
550 /// \brief The abstract base class used to type erase specific TTI
554 /// \brief The template model for the base class which wraps a concrete
555 /// implementation in a type erased interface.
556 template <typename T> class Model;
558 std::unique_ptr<Concept> TTIImpl;
561 class TargetTransformInfo::Concept {
563 virtual ~Concept() = 0;
564 virtual const DataLayout &getDataLayout() const = 0;
565 virtual int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) = 0;
566 virtual int getGEPCost(Type *PointeeType, const Value *Ptr,
567 ArrayRef<const Value *> Operands) = 0;
568 virtual int getCallCost(FunctionType *FTy, int NumArgs) = 0;
569 virtual int getCallCost(const Function *F, int NumArgs) = 0;
570 virtual int getCallCost(const Function *F,
571 ArrayRef<const Value *> Arguments) = 0;
572 virtual int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
573 ArrayRef<Type *> ParamTys) = 0;
574 virtual int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
575 ArrayRef<const Value *> Arguments) = 0;
576 virtual int getUserCost(const User *U) = 0;
577 virtual bool hasBranchDivergence() = 0;
578 virtual bool isSourceOfDivergence(const Value *V) = 0;
579 virtual bool isLoweredToCall(const Function *F) = 0;
580 virtual void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) = 0;
581 virtual bool isLegalAddImmediate(int64_t Imm) = 0;
582 virtual bool isLegalICmpImmediate(int64_t Imm) = 0;
583 virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
584 int64_t BaseOffset, bool HasBaseReg,
586 unsigned AddrSpace) = 0;
587 virtual bool isLegalMaskedStore(Type *DataType) = 0;
588 virtual bool isLegalMaskedLoad(Type *DataType) = 0;
589 virtual bool isLegalMaskedScatter(Type *DataType) = 0;
590 virtual bool isLegalMaskedGather(Type *DataType) = 0;
591 virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
592 int64_t BaseOffset, bool HasBaseReg,
593 int64_t Scale, unsigned AddrSpace) = 0;
594 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) = 0;
595 virtual bool isProfitableToHoist(Instruction *I) = 0;
596 virtual bool isTypeLegal(Type *Ty) = 0;
597 virtual unsigned getJumpBufAlignment() = 0;
598 virtual unsigned getJumpBufSize() = 0;
599 virtual bool shouldBuildLookupTables() = 0;
600 virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0;
601 virtual bool enableInterleavedAccessVectorization() = 0;
602 virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0;
603 virtual bool haveFastSqrt(Type *Ty) = 0;
604 virtual int getFPOpCost(Type *Ty) = 0;
605 virtual int getIntImmCost(const APInt &Imm, Type *Ty) = 0;
606 virtual int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
608 virtual int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
610 virtual unsigned getNumberOfRegisters(bool Vector) = 0;
611 virtual unsigned getRegisterBitWidth(bool Vector) = 0;
612 virtual unsigned getMaxInterleaveFactor(unsigned VF) = 0;
614 getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
615 OperandValueKind Opd2Info,
616 OperandValueProperties Opd1PropInfo,
617 OperandValueProperties Opd2PropInfo) = 0;
618 virtual int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
620 virtual int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) = 0;
621 virtual int getCFInstrCost(unsigned Opcode) = 0;
622 virtual int getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
624 virtual int getVectorInstrCost(unsigned Opcode, Type *Val,
626 virtual int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
627 unsigned AddressSpace) = 0;
628 virtual int getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
630 unsigned AddressSpace) = 0;
631 virtual int getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
632 Value *Ptr, bool VariableMask,
633 unsigned Alignment) = 0;
634 virtual int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
636 ArrayRef<unsigned> Indices,
638 unsigned AddressSpace) = 0;
639 virtual int getReductionCost(unsigned Opcode, Type *Ty,
640 bool IsPairwiseForm) = 0;
641 virtual int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
642 ArrayRef<Type *> Tys) = 0;
643 virtual int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
644 ArrayRef<Value *> Args) = 0;
645 virtual int getCallInstrCost(Function *F, Type *RetTy,
646 ArrayRef<Type *> Tys) = 0;
647 virtual unsigned getNumberOfParts(Type *Tp) = 0;
648 virtual int getAddressComputationCost(Type *Ty, bool IsComplex) = 0;
649 virtual unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) = 0;
650 virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst,
651 MemIntrinsicInfo &Info) = 0;
652 virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
653 Type *ExpectedType) = 0;
654 virtual bool areInlineCompatible(const Function *Caller,
655 const Function *Callee) const = 0;
658 template <typename T>
659 class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
663 Model(T Impl) : Impl(std::move(Impl)) {}
666 const DataLayout &getDataLayout() const override {
667 return Impl.getDataLayout();
670 int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) override {
671 return Impl.getOperationCost(Opcode, Ty, OpTy);
673 int getGEPCost(Type *PointeeType, const Value *Ptr,
674 ArrayRef<const Value *> Operands) override {
675 return Impl.getGEPCost(PointeeType, Ptr, Operands);
677 int getCallCost(FunctionType *FTy, int NumArgs) override {
678 return Impl.getCallCost(FTy, NumArgs);
680 int getCallCost(const Function *F, int NumArgs) override {
681 return Impl.getCallCost(F, NumArgs);
683 int getCallCost(const Function *F,
684 ArrayRef<const Value *> Arguments) override {
685 return Impl.getCallCost(F, Arguments);
687 int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
688 ArrayRef<Type *> ParamTys) override {
689 return Impl.getIntrinsicCost(IID, RetTy, ParamTys);
691 int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
692 ArrayRef<const Value *> Arguments) override {
693 return Impl.getIntrinsicCost(IID, RetTy, Arguments);
695 int getUserCost(const User *U) override { return Impl.getUserCost(U); }
696 bool hasBranchDivergence() override { return Impl.hasBranchDivergence(); }
697 bool isSourceOfDivergence(const Value *V) override {
698 return Impl.isSourceOfDivergence(V);
700 bool isLoweredToCall(const Function *F) override {
701 return Impl.isLoweredToCall(F);
703 void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) override {
704 return Impl.getUnrollingPreferences(L, UP);
706 bool isLegalAddImmediate(int64_t Imm) override {
707 return Impl.isLegalAddImmediate(Imm);
709 bool isLegalICmpImmediate(int64_t Imm) override {
710 return Impl.isLegalICmpImmediate(Imm);
712 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
713 bool HasBaseReg, int64_t Scale,
714 unsigned AddrSpace) override {
715 return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
718 bool isLegalMaskedStore(Type *DataType) override {
719 return Impl.isLegalMaskedStore(DataType);
721 bool isLegalMaskedLoad(Type *DataType) override {
722 return Impl.isLegalMaskedLoad(DataType);
724 bool isLegalMaskedScatter(Type *DataType) override {
725 return Impl.isLegalMaskedScatter(DataType);
727 bool isLegalMaskedGather(Type *DataType) override {
728 return Impl.isLegalMaskedGather(DataType);
730 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
731 bool HasBaseReg, int64_t Scale,
732 unsigned AddrSpace) override {
733 return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg,
736 bool isTruncateFree(Type *Ty1, Type *Ty2) override {
737 return Impl.isTruncateFree(Ty1, Ty2);
739 bool isProfitableToHoist(Instruction *I) override {
740 return Impl.isProfitableToHoist(I);
742 bool isTypeLegal(Type *Ty) override { return Impl.isTypeLegal(Ty); }
743 unsigned getJumpBufAlignment() override { return Impl.getJumpBufAlignment(); }
744 unsigned getJumpBufSize() override { return Impl.getJumpBufSize(); }
745 bool shouldBuildLookupTables() override {
746 return Impl.shouldBuildLookupTables();
748 bool enableAggressiveInterleaving(bool LoopHasReductions) override {
749 return Impl.enableAggressiveInterleaving(LoopHasReductions);
751 bool enableInterleavedAccessVectorization() override {
752 return Impl.enableInterleavedAccessVectorization();
754 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) override {
755 return Impl.getPopcntSupport(IntTyWidthInBit);
757 bool haveFastSqrt(Type *Ty) override { return Impl.haveFastSqrt(Ty); }
759 int getFPOpCost(Type *Ty) override { return Impl.getFPOpCost(Ty); }
761 int getIntImmCost(const APInt &Imm, Type *Ty) override {
762 return Impl.getIntImmCost(Imm, Ty);
764 int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
766 return Impl.getIntImmCost(Opc, Idx, Imm, Ty);
768 int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
770 return Impl.getIntImmCost(IID, Idx, Imm, Ty);
772 unsigned getNumberOfRegisters(bool Vector) override {
773 return Impl.getNumberOfRegisters(Vector);
775 unsigned getRegisterBitWidth(bool Vector) override {
776 return Impl.getRegisterBitWidth(Vector);
778 unsigned getMaxInterleaveFactor(unsigned VF) override {
779 return Impl.getMaxInterleaveFactor(VF);
782 getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
783 OperandValueKind Opd2Info,
784 OperandValueProperties Opd1PropInfo,
785 OperandValueProperties Opd2PropInfo) override {
786 return Impl.getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
787 Opd1PropInfo, Opd2PropInfo);
789 int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
790 Type *SubTp) override {
791 return Impl.getShuffleCost(Kind, Tp, Index, SubTp);
793 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) override {
794 return Impl.getCastInstrCost(Opcode, Dst, Src);
796 int getCFInstrCost(unsigned Opcode) override {
797 return Impl.getCFInstrCost(Opcode);
799 int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) override {
800 return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy);
802 int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) override {
803 return Impl.getVectorInstrCost(Opcode, Val, Index);
805 int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
806 unsigned AddressSpace) override {
807 return Impl.getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
809 int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
810 unsigned AddressSpace) override {
811 return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
813 int getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
814 Value *Ptr, bool VariableMask,
815 unsigned Alignment) override {
816 return Impl.getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
819 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
820 ArrayRef<unsigned> Indices, unsigned Alignment,
821 unsigned AddressSpace) override {
822 return Impl.getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
823 Alignment, AddressSpace);
825 int getReductionCost(unsigned Opcode, Type *Ty,
826 bool IsPairwiseForm) override {
827 return Impl.getReductionCost(Opcode, Ty, IsPairwiseForm);
829 int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
830 ArrayRef<Type *> Tys) override {
831 return Impl.getIntrinsicInstrCost(ID, RetTy, Tys);
833 int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
834 ArrayRef<Value *> Args) override {
835 return Impl.getIntrinsicInstrCost(ID, RetTy, Args);
837 int getCallInstrCost(Function *F, Type *RetTy,
838 ArrayRef<Type *> Tys) override {
839 return Impl.getCallInstrCost(F, RetTy, Tys);
841 unsigned getNumberOfParts(Type *Tp) override {
842 return Impl.getNumberOfParts(Tp);
844 int getAddressComputationCost(Type *Ty, bool IsComplex) override {
845 return Impl.getAddressComputationCost(Ty, IsComplex);
847 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) override {
848 return Impl.getCostOfKeepingLiveOverCall(Tys);
850 bool getTgtMemIntrinsic(IntrinsicInst *Inst,
851 MemIntrinsicInfo &Info) override {
852 return Impl.getTgtMemIntrinsic(Inst, Info);
854 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
855 Type *ExpectedType) override {
856 return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
858 bool areInlineCompatible(const Function *Caller,
859 const Function *Callee) const override {
860 return Impl.areInlineCompatible(Caller, Callee);
864 template <typename T>
865 TargetTransformInfo::TargetTransformInfo(T Impl)
866 : TTIImpl(new Model<T>(Impl)) {}
868 /// \brief Analysis pass providing the \c TargetTransformInfo.
870 /// The core idea of the TargetIRAnalysis is to expose an interface through
871 /// which LLVM targets can analyze and provide information about the middle
872 /// end's target-independent IR. This supports use cases such as target-aware
873 /// cost modeling of IR constructs.
875 /// This is a function analysis because much of the cost modeling for targets
876 /// is done in a subtarget specific way and LLVM supports compiling different
877 /// functions targeting different subtargets in order to support runtime
878 /// dispatch according to the observed subtarget.
879 class TargetIRAnalysis {
881 typedef TargetTransformInfo Result;
883 /// \brief Opaque, unique identifier for this analysis pass.
884 static void *ID() { return (void *)&PassID; }
886 /// \brief Provide access to a name for this pass for debugging purposes.
887 static StringRef name() { return "TargetIRAnalysis"; }
889 /// \brief Default construct a target IR analysis.
891 /// This will use the module's datalayout to construct a baseline
892 /// conservative TTI result.
895 /// \brief Construct an IR analysis pass around a target-provide callback.
897 /// The callback will be called with a particular function for which the TTI
898 /// is needed and must return a TTI object for that function.
899 TargetIRAnalysis(std::function<Result(const Function &)> TTICallback);
901 // Value semantics. We spell out the constructors for MSVC.
902 TargetIRAnalysis(const TargetIRAnalysis &Arg)
903 : TTICallback(Arg.TTICallback) {}
904 TargetIRAnalysis(TargetIRAnalysis &&Arg)
905 : TTICallback(std::move(Arg.TTICallback)) {}
906 TargetIRAnalysis &operator=(const TargetIRAnalysis &RHS) {
907 TTICallback = RHS.TTICallback;
910 TargetIRAnalysis &operator=(TargetIRAnalysis &&RHS) {
911 TTICallback = std::move(RHS.TTICallback);
915 Result run(const Function &F);
920 /// \brief The callback used to produce a result.
922 /// We use a completely opaque callback so that targets can provide whatever
923 /// mechanism they desire for constructing the TTI for a given function.
925 /// FIXME: Should we really use std::function? It's relatively inefficient.
926 /// It might be possible to arrange for even stateful callbacks to outlive
927 /// the analysis and thus use a function_ref which would be lighter weight.
928 /// This may also be less error prone as the callback is likely to reference
929 /// the external TargetMachine, and that reference needs to never dangle.
930 std::function<Result(const Function &)> TTICallback;
932 /// \brief Helper function used as the callback in the default constructor.
933 static Result getDefaultTTI(const Function &F);
936 /// \brief Wrapper pass for TargetTransformInfo.
938 /// This pass can be constructed from a TTI object which it stores internally
939 /// and is queried by passes.
940 class TargetTransformInfoWrapperPass : public ImmutablePass {
941 TargetIRAnalysis TIRA;
942 Optional<TargetTransformInfo> TTI;
944 virtual void anchor();
949 /// \brief We must provide a default constructor for the pass but it should
952 /// Use the constructor below or call one of the creation routines.
953 TargetTransformInfoWrapperPass();
955 explicit TargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
957 TargetTransformInfo &getTTI(const Function &F);
960 /// \brief Create an analysis pass wrapper around a TTI object.
962 /// This analysis pass just holds the TTI instance and makes it available to
964 ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
966 } // End llvm namespace