1 //===- TargetTransformInfo.h ------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This pass exposes codegen information to IR-level passes. Every
11 /// transformation that uses codegen information is broken into three parts:
12 /// 1. The IR-level analysis pass.
13 /// 2. The IR-level transformation interface which provides the needed
15 /// 3. Codegen-level implementation which uses target-specific hooks.
17 /// This file defines #2, which is the interface that IR-level transformations
18 /// use for querying the codegen.
20 //===----------------------------------------------------------------------===//
22 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
23 #define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
25 #include "llvm/ADT/Optional.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/Intrinsics.h"
28 #include "llvm/IR/Operator.h"
29 #include "llvm/IR/PassManager.h"
30 #include "llvm/Pass.h"
31 #include "llvm/Support/DataTypes.h"
39 class ScalarEvolution;
45 /// \brief Information about a load/store intrinsic defined by the target.
46 struct MemIntrinsicInfo {
47 /// This is the pointer that the intrinsic is loading from or storing to.
48 /// If this is non-null, then analysis/optimization passes can assume that
49 /// this intrinsic is functionally equivalent to a load/store from this
51 Value *PtrVal = nullptr;
53 // Ordering for atomic operations.
54 AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
56 // Same Id is set by the target for corresponding load/store intrinsics.
57 unsigned short MatchingId = 0;
60 bool WriteMem = false;
61 bool IsVolatile = false;
63 bool isUnordered() const {
64 return (Ordering == AtomicOrdering::NotAtomic ||
65 Ordering == AtomicOrdering::Unordered) && !IsVolatile;
69 /// \brief This pass provides access to the codegen interfaces that are needed
70 /// for IR-level transformations.
71 class TargetTransformInfo {
73 /// \brief Construct a TTI object using a type implementing the \c Concept
76 /// This is used by targets to construct a TTI wrapping their target-specific
77 /// implementaion that encodes appropriate costs for their target.
78 template <typename T> TargetTransformInfo(T Impl);
80 /// \brief Construct a baseline TTI object using a minimal implementation of
81 /// the \c Concept API below.
83 /// The TTI implementation will reflect the information in the DataLayout
84 /// provided if non-null.
85 explicit TargetTransformInfo(const DataLayout &DL);
87 // Provide move semantics.
88 TargetTransformInfo(TargetTransformInfo &&Arg);
89 TargetTransformInfo &operator=(TargetTransformInfo &&RHS);
91 // We need to define the destructor out-of-line to define our sub-classes
93 ~TargetTransformInfo();
95 /// \brief Handle the invalidation of this information.
97 /// When used as a result of \c TargetIRAnalysis this method will be called
98 /// when the function this was computed for changes. When it returns false,
99 /// the information is preserved across those changes.
100 bool invalidate(Function &, const PreservedAnalyses &,
101 FunctionAnalysisManager::Invalidator &) {
102 // FIXME: We should probably in some way ensure that the subtarget
103 // information for a function hasn't changed.
107 /// \name Generic Target Information
110 /// \brief Underlying constants for 'cost' values in this interface.
112 /// Many APIs in this interface return a cost. This enum defines the
113 /// fundamental values that should be used to interpret (and produce) those
114 /// costs. The costs are returned as an int rather than a member of this
115 /// enumeration because it is expected that the cost of one IR instruction
116 /// may have a multiplicative factor to it or otherwise won't fit directly
117 /// into the enum. Moreover, it is common to sum or average costs which works
118 /// better as simple integral values. Thus this enum only provides constants.
119 /// Also note that the returned costs are signed integers to make it natural
120 /// to add, subtract, and test with zero (a common boundary condition). It is
121 /// not expected that 2^32 is a realistic cost to be modeling at any point.
123 /// Note that these costs should usually reflect the intersection of code-size
124 /// cost and execution cost. A free instruction is typically one that folds
125 /// into another instruction. For example, reg-to-reg moves can often be
126 /// skipped by renaming the registers in the CPU, but they still are encoded
127 /// and thus wouldn't be considered 'free' here.
128 enum TargetCostConstants {
129 TCC_Free = 0, ///< Expected to fold away in lowering.
130 TCC_Basic = 1, ///< The cost of a typical 'add' instruction.
131 TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86.
134 /// \brief Estimate the cost of a specific operation when lowered.
136 /// Note that this is designed to work on an arbitrary synthetic opcode, and
137 /// thus work for hypothetical queries before an instruction has even been
138 /// formed. However, this does *not* work for GEPs, and must not be called
139 /// for a GEP instruction. Instead, use the dedicated getGEPCost interface as
140 /// analyzing a GEP's cost required more information.
142 /// Typically only the result type is required, and the operand type can be
143 /// omitted. However, if the opcode is one of the cast instructions, the
144 /// operand type is required.
146 /// The returned cost is defined in terms of \c TargetCostConstants, see its
147 /// comments for a detailed explanation of the cost values.
148 int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy = nullptr) const;
150 /// \brief Estimate the cost of a GEP operation when lowered.
152 /// The contract for this function is the same as \c getOperationCost except
153 /// that it supports an interface that provides extra information specific to
154 /// the GEP operation.
155 int getGEPCost(Type *PointeeType, const Value *Ptr,
156 ArrayRef<const Value *> Operands) const;
158 /// \brief Estimate the cost of a EXT operation when lowered.
160 /// The contract for this function is the same as \c getOperationCost except
161 /// that it supports an interface that provides extra information specific to
162 /// the EXT operation.
163 int getExtCost(const Instruction *I, const Value *Src) const;
165 /// \brief Estimate the cost of a function call when lowered.
167 /// The contract for this is the same as \c getOperationCost except that it
168 /// supports an interface that provides extra information specific to call
171 /// This is the most basic query for estimating call cost: it only knows the
172 /// function type and (potentially) the number of arguments at the call site.
173 /// The latter is only interesting for varargs function types.
174 int getCallCost(FunctionType *FTy, int NumArgs = -1) const;
176 /// \brief Estimate the cost of calling a specific function when lowered.
178 /// This overload adds the ability to reason about the particular function
179 /// being called in the event it is a library call with special lowering.
180 int getCallCost(const Function *F, int NumArgs = -1) const;
182 /// \brief Estimate the cost of calling a specific function when lowered.
184 /// This overload allows specifying a set of candidate argument values.
185 int getCallCost(const Function *F, ArrayRef<const Value *> Arguments) const;
187 /// \returns A value by which our inlining threshold should be multiplied.
188 /// This is primarily used to bump up the inlining threshold wholesale on
189 /// targets where calls are unusually expensive.
191 /// TODO: This is a rather blunt instrument. Perhaps altering the costs of
192 /// individual classes of instructions would be better.
193 unsigned getInliningThresholdMultiplier() const;
195 /// \brief Estimate the cost of an intrinsic when lowered.
197 /// Mirrors the \c getCallCost method but uses an intrinsic identifier.
198 int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
199 ArrayRef<Type *> ParamTys) const;
201 /// \brief Estimate the cost of an intrinsic when lowered.
203 /// Mirrors the \c getCallCost method but uses an intrinsic identifier.
204 int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
205 ArrayRef<const Value *> Arguments) const;
207 /// \return The estimated number of case clusters when lowering \p 'SI'.
208 /// \p JTSize Set a jump table size only when \p SI is suitable for a jump
210 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
211 unsigned &JTSize) const;
213 /// \brief Estimate the cost of a given IR user when lowered.
215 /// This can estimate the cost of either a ConstantExpr or Instruction when
216 /// lowered. It has two primary advantages over the \c getOperationCost and
217 /// \c getGEPCost above, and one significant disadvantage: it can only be
218 /// used when the IR construct has already been formed.
220 /// The advantages are that it can inspect the SSA use graph to reason more
221 /// accurately about the cost. For example, all-constant-GEPs can often be
222 /// folded into a load or other instruction, but if they are used in some
223 /// other context they may not be folded. This routine can distinguish such
226 /// \p Operands is a list of operands which can be a result of transformations
227 /// of the current operands. The number of the operands on the list must equal
228 /// to the number of the current operands the IR user has. Their order on the
229 /// list must be the same as the order of the current operands the IR user
232 /// The returned cost is defined in terms of \c TargetCostConstants, see its
233 /// comments for a detailed explanation of the cost values.
234 int getUserCost(const User *U, ArrayRef<const Value *> Operands) const;
236 /// \brief This is a helper function which calls the two-argument getUserCost
237 /// with \p Operands which are the current operands U has.
238 int getUserCost(const User *U) const {
239 SmallVector<const Value *, 4> Operands(U->value_op_begin(),
241 return getUserCost(U, Operands);
244 /// \brief Return true if branch divergence exists.
246 /// Branch divergence has a significantly negative impact on GPU performance
247 /// when threads in the same wavefront take different paths due to conditional
249 bool hasBranchDivergence() const;
251 /// \brief Returns whether V is a source of divergence.
253 /// This function provides the target-dependent information for
254 /// the target-independent DivergenceAnalysis. DivergenceAnalysis first
255 /// builds the dependency graph, and then runs the reachability algorithm
256 /// starting with the sources of divergence.
257 bool isSourceOfDivergence(const Value *V) const;
259 // \brief Returns true for the target specific
260 // set of operations which produce uniform result
261 // even taking non-unform arguments
262 bool isAlwaysUniform(const Value *V) const;
264 /// Returns the address space ID for a target's 'flat' address space. Note
265 /// this is not necessarily the same as addrspace(0), which LLVM sometimes
266 /// refers to as the generic address space. The flat address space is a
267 /// generic address space that can be used access multiple segments of memory
268 /// with different address spaces. Access of a memory location through a
269 /// pointer with this address space is expected to be legal but slower
270 /// compared to the same memory location accessed through a pointer with a
271 /// different address space.
273 /// This is for for targets with different pointer representations which can
274 /// be converted with the addrspacecast instruction. If a pointer is converted
275 /// to this address space, optimizations should attempt to replace the access
276 /// with the source address space.
278 /// \returns ~0u if the target does not have such a flat address space to
280 unsigned getFlatAddressSpace() const;
282 /// \brief Test whether calls to a function lower to actual program function
285 /// The idea is to test whether the program is likely to require a 'call'
286 /// instruction or equivalent in order to call the given function.
288 /// FIXME: It's not clear that this is a good or useful query API. Client's
289 /// should probably move to simpler cost metrics using the above.
290 /// Alternatively, we could split the cost interface into distinct code-size
291 /// and execution-speed costs. This would allow modelling the core of this
292 /// query more accurately as a call is a single small instruction, but
293 /// incurs significant execution cost.
294 bool isLoweredToCall(const Function *F) const;
297 /// TODO: Some of these could be merged. Also, a lexical ordering
298 /// isn't always optimal.
303 unsigned NumBaseAdds;
309 /// Parameters that control the generic loop unrolling transformation.
310 struct UnrollingPreferences {
311 /// The cost threshold for the unrolled loop. Should be relative to the
312 /// getUserCost values returned by this API, and the expectation is that
313 /// the unrolled loop's instructions when run through that interface should
314 /// not exceed this cost. However, this is only an estimate. Also, specific
315 /// loops may be unrolled even with a cost above this threshold if deemed
316 /// profitable. Set this to UINT_MAX to disable the loop body cost
319 /// If complete unrolling will reduce the cost of the loop, we will boost
320 /// the Threshold by a certain percent to allow more aggressive complete
321 /// unrolling. This value provides the maximum boost percentage that we
322 /// can apply to Threshold (The value should be no less than 100).
323 /// BoostedThreshold = Threshold * min(RolledCost / UnrolledCost,
324 /// MaxPercentThresholdBoost / 100)
325 /// E.g. if complete unrolling reduces the loop execution time by 50%
326 /// then we boost the threshold by the factor of 2x. If unrolling is not
327 /// expected to reduce the running time, then we do not increase the
329 unsigned MaxPercentThresholdBoost;
330 /// The cost threshold for the unrolled loop when optimizing for size (set
331 /// to UINT_MAX to disable).
332 unsigned OptSizeThreshold;
333 /// The cost threshold for the unrolled loop, like Threshold, but used
334 /// for partial/runtime unrolling (set to UINT_MAX to disable).
335 unsigned PartialThreshold;
336 /// The cost threshold for the unrolled loop when optimizing for size, like
337 /// OptSizeThreshold, but used for partial/runtime unrolling (set to
338 /// UINT_MAX to disable).
339 unsigned PartialOptSizeThreshold;
340 /// A forced unrolling factor (the number of concatenated bodies of the
341 /// original loop in the unrolled loop body). When set to 0, the unrolling
342 /// transformation will select an unrolling factor based on the current cost
343 /// threshold and other factors.
345 /// A forced peeling factor (the number of bodied of the original loop
346 /// that should be peeled off before the loop body). When set to 0, the
347 /// unrolling transformation will select a peeling factor based on profile
348 /// information and other factors.
350 /// Default unroll count for loops with run-time trip count.
351 unsigned DefaultUnrollRuntimeCount;
352 // Set the maximum unrolling factor. The unrolling factor may be selected
353 // using the appropriate cost threshold, but may not exceed this number
354 // (set to UINT_MAX to disable). This does not apply in cases where the
355 // loop is being fully unrolled.
357 /// Set the maximum unrolling factor for full unrolling. Like MaxCount, but
358 /// applies even if full unrolling is selected. This allows a target to fall
359 /// back to Partial unrolling if full unrolling is above FullUnrollMaxCount.
360 unsigned FullUnrollMaxCount;
361 // Represents number of instructions optimized when "back edge"
362 // becomes "fall through" in unrolled loop.
363 // For now we count a conditional branch on a backedge and a comparison
366 /// Allow partial unrolling (unrolling of loops to expand the size of the
367 /// loop body, not only to eliminate small constant-trip-count loops).
369 /// Allow runtime unrolling (unrolling of loops to expand the size of the
370 /// loop body even when the number of loop iterations is not known at
373 /// Allow generation of a loop remainder (extra iterations after unroll).
375 /// Allow emitting expensive instructions (such as divisions) when computing
376 /// the trip count of a loop for runtime unrolling.
377 bool AllowExpensiveTripCount;
378 /// Apply loop unroll on any kind of loop
379 /// (mainly to loops that fail runtime unrolling).
381 /// Allow using trip count upper bound to unroll loops.
383 /// Allow peeling off loop iterations for loops with low dynamic tripcount.
387 /// \brief Get target-customized preferences for the generic loop unrolling
388 /// transformation. The caller will initialize UP with the current
389 /// target-independent defaults.
390 void getUnrollingPreferences(Loop *L, ScalarEvolution &,
391 UnrollingPreferences &UP) const;
395 /// \name Scalar Target Information
398 /// \brief Flags indicating the kind of support for population count.
400 /// Compared to the SW implementation, HW support is supposed to
401 /// significantly boost the performance when the population is dense, and it
402 /// may or may not degrade performance if the population is sparse. A HW
403 /// support is considered as "Fast" if it can outperform, or is on a par
404 /// with, SW implementation when the population is sparse; otherwise, it is
405 /// considered as "Slow".
406 enum PopcntSupportKind { PSK_Software, PSK_SlowHardware, PSK_FastHardware };
408 /// \brief Return true if the specified immediate is legal add immediate, that
409 /// is the target has add instructions which can add a register with the
410 /// immediate without having to materialize the immediate into a register.
411 bool isLegalAddImmediate(int64_t Imm) const;
413 /// \brief Return true if the specified immediate is legal icmp immediate,
414 /// that is the target has icmp instructions which can compare a register
415 /// against the immediate without having to materialize the immediate into a
417 bool isLegalICmpImmediate(int64_t Imm) const;
419 /// \brief Return true if the addressing mode represented by AM is legal for
420 /// this target, for a load/store of the specified type.
421 /// The type may be VoidTy, in which case only return true if the addressing
422 /// mode is legal for a load/store of any legal type.
423 /// TODO: Handle pre/postinc as well.
424 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
425 bool HasBaseReg, int64_t Scale,
426 unsigned AddrSpace = 0) const;
428 /// \brief Return true if LSR cost of C1 is lower than C1.
429 bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
430 TargetTransformInfo::LSRCost &C2) const;
432 /// \brief Return true if the target supports masked load/store
433 /// AVX2 and AVX-512 targets allow masks for consecutive load and store
434 bool isLegalMaskedStore(Type *DataType) const;
435 bool isLegalMaskedLoad(Type *DataType) const;
437 /// \brief Return true if the target supports masked gather/scatter
438 /// AVX-512 fully supports gather and scatter for vectors with 32 and 64
439 /// bits scalar type.
440 bool isLegalMaskedScatter(Type *DataType) const;
441 bool isLegalMaskedGather(Type *DataType) const;
443 /// Return true if target doesn't mind addresses in vectors.
444 bool prefersVectorizedAddressing() const;
446 /// \brief Return the cost of the scaling factor used in the addressing
447 /// mode represented by AM for this target, for a load/store
448 /// of the specified type.
449 /// If the AM is supported, the return value must be >= 0.
450 /// If the AM is not supported, it returns a negative value.
451 /// TODO: Handle pre/postinc as well.
452 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
453 bool HasBaseReg, int64_t Scale,
454 unsigned AddrSpace = 0) const;
456 /// \brief Return true if target supports the load / store
457 /// instruction with the given Offset on the form reg + Offset. It
458 /// may be that Offset is too big for a certain type (register
460 bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) const;
462 /// \brief Return true if it's free to truncate a value of type Ty1 to type
463 /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
464 /// by referencing its sub-register AX.
465 bool isTruncateFree(Type *Ty1, Type *Ty2) const;
467 /// \brief Return true if it is profitable to hoist instruction in the
468 /// then/else to before if.
469 bool isProfitableToHoist(Instruction *I) const;
471 /// \brief Return true if this type is legal.
472 bool isTypeLegal(Type *Ty) const;
474 /// \brief Returns the target's jmp_buf alignment in bytes.
475 unsigned getJumpBufAlignment() const;
477 /// \brief Returns the target's jmp_buf size in bytes.
478 unsigned getJumpBufSize() const;
480 /// \brief Return true if switches should be turned into lookup tables for the
482 bool shouldBuildLookupTables() const;
484 /// \brief Return true if switches should be turned into lookup tables
485 /// containing this constant value for the target.
486 bool shouldBuildLookupTablesForConstant(Constant *C) const;
488 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
490 unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
493 /// If target has efficient vector element load/store instructions, it can
494 /// return true here so that insertion/extraction costs are not added to
495 /// the scalarization cost of a load/store.
496 bool supportsEfficientVectorElementLoadStore() const;
498 /// \brief Don't restrict interleaved unrolling to small loops.
499 bool enableAggressiveInterleaving(bool LoopHasReductions) const;
501 /// \brief Enable inline expansion of memcmp
502 bool expandMemCmp(Instruction *I, unsigned &MaxLoadSize) const;
504 /// \brief Enable matching of interleaved access groups.
505 bool enableInterleavedAccessVectorization() const;
507 /// \brief Indicate that it is potentially unsafe to automatically vectorize
508 /// floating-point operations because the semantics of vector and scalar
509 /// floating-point semantics may differ. For example, ARM NEON v7 SIMD math
510 /// does not support IEEE-754 denormal numbers, while depending on the
511 /// platform, scalar floating-point math does.
512 /// This applies to floating-point math operations and calls, not memory
513 /// operations, shuffles, or casts.
514 bool isFPVectorizationPotentiallyUnsafe() const;
516 /// \brief Determine if the target supports unaligned memory accesses.
517 bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
518 unsigned BitWidth, unsigned AddressSpace = 0,
519 unsigned Alignment = 1,
520 bool *Fast = nullptr) const;
522 /// \brief Return hardware support for population count.
523 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
525 /// \brief Return true if the hardware has a fast square-root instruction.
526 bool haveFastSqrt(Type *Ty) const;
528 /// \brief Return the expected cost of supporting the floating point operation
529 /// of the specified type.
530 int getFPOpCost(Type *Ty) const;
532 /// \brief Return the expected cost of materializing for the given integer
533 /// immediate of the specified type.
534 int getIntImmCost(const APInt &Imm, Type *Ty) const;
536 /// \brief Return the expected cost of materialization for the given integer
537 /// immediate of the specified type for a given instruction. The cost can be
538 /// zero if the immediate can be folded into the specified instruction.
539 int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
541 int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
544 /// \brief Return the expected cost for the given integer when optimising
545 /// for size. This is different than the other integer immediate cost
546 /// functions in that it is subtarget agnostic. This is useful when you e.g.
547 /// target one ISA such as Aarch32 but smaller encodings could be possible
548 /// with another such as Thumb. This return value is used as a penalty when
549 /// the total costs for a constant is calculated (the bigger the cost, the
550 /// more beneficial constant hoisting is).
551 int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm,
555 /// \name Vector Target Information
558 /// \brief The various kinds of shuffle patterns for vector queries.
560 SK_Broadcast, ///< Broadcast element 0 to all other elements.
561 SK_Reverse, ///< Reverse the order of the vector.
562 SK_Alternate, ///< Choose alternate elements from vector.
563 SK_InsertSubvector, ///< InsertSubvector. Index indicates start offset.
564 SK_ExtractSubvector,///< ExtractSubvector Index indicates start offset.
565 SK_PermuteTwoSrc, ///< Merge elements from two source vectors into one
566 ///< with any shuffle mask.
567 SK_PermuteSingleSrc ///< Shuffle elements of single source vector with any
571 /// \brief Additional information about an operand's possible values.
572 enum OperandValueKind {
573 OK_AnyValue, // Operand can have any value.
574 OK_UniformValue, // Operand is uniform (splat of a value).
575 OK_UniformConstantValue, // Operand is uniform constant.
576 OK_NonUniformConstantValue // Operand is a non uniform constant value.
579 /// \brief Additional properties of an operand's values.
580 enum OperandValueProperties { OP_None = 0, OP_PowerOf2 = 1 };
582 /// \return The number of scalar or vector registers that the target has.
583 /// If 'Vectors' is true, it returns the number of vector registers. If it is
584 /// set to false, it returns the number of scalar registers.
585 unsigned getNumberOfRegisters(bool Vector) const;
587 /// \return The width of the largest scalar or vector register type.
588 unsigned getRegisterBitWidth(bool Vector) const;
590 /// \return The width of the smallest vector register type.
591 unsigned getMinVectorRegisterBitWidth() const;
593 /// \return True if it should be considered for address type promotion.
594 /// \p AllowPromotionWithoutCommonHeader Set true if promoting \p I is
595 /// profitable without finding other extensions fed by the same input.
596 bool shouldConsiderAddressTypePromotion(
597 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const;
599 /// \return The size of a cache line in bytes.
600 unsigned getCacheLineSize() const;
602 /// \return How much before a load we should place the prefetch instruction.
603 /// This is currently measured in number of instructions.
604 unsigned getPrefetchDistance() const;
606 /// \return Some HW prefetchers can handle accesses up to a certain constant
607 /// stride. This is the minimum stride in bytes where it makes sense to start
608 /// adding SW prefetches. The default is 1, i.e. prefetch with any stride.
609 unsigned getMinPrefetchStride() const;
611 /// \return The maximum number of iterations to prefetch ahead. If the
612 /// required number of iterations is more than this number, no prefetching is
614 unsigned getMaxPrefetchIterationsAhead() const;
616 /// \return The maximum interleave factor that any transform should try to
617 /// perform for this target. This number depends on the level of parallelism
618 /// and the number of execution units in the CPU.
619 unsigned getMaxInterleaveFactor(unsigned VF) const;
621 /// \return The expected cost of arithmetic ops, such as mul, xor, fsub, etc.
622 /// \p Args is an optional argument which holds the instruction operands
623 /// values so the TTI can analyize those values searching for special
624 /// cases\optimizations based on those values.
625 int getArithmeticInstrCost(
626 unsigned Opcode, Type *Ty, OperandValueKind Opd1Info = OK_AnyValue,
627 OperandValueKind Opd2Info = OK_AnyValue,
628 OperandValueProperties Opd1PropInfo = OP_None,
629 OperandValueProperties Opd2PropInfo = OP_None,
630 ArrayRef<const Value *> Args = ArrayRef<const Value *>()) const;
632 /// \return The cost of a shuffle instruction of kind Kind and of type Tp.
633 /// The index and subtype parameters are used by the subvector insertion and
634 /// extraction shuffle kinds.
635 int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index = 0,
636 Type *SubTp = nullptr) const;
638 /// \return The expected cost of cast instructions, such as bitcast, trunc,
639 /// zext, etc. If there is an existing instruction that holds Opcode, it
640 /// may be passed in the 'I' parameter.
641 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
642 const Instruction *I = nullptr) const;
644 /// \return The expected cost of a sign- or zero-extended vector extract. Use
645 /// -1 to indicate that there is no information about the index value.
646 int getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
647 unsigned Index = -1) const;
649 /// \return The expected cost of control-flow related instructions such as
651 int getCFInstrCost(unsigned Opcode) const;
653 /// \returns The expected cost of compare and select instructions. If there
654 /// is an existing instruction that holds Opcode, it may be passed in the
656 int getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
657 Type *CondTy = nullptr, const Instruction *I = nullptr) const;
659 /// \return The expected cost of vector Insert and Extract.
660 /// Use -1 to indicate that there is no information on the index value.
661 int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index = -1) const;
663 /// \return The cost of Load and Store instructions.
664 int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
665 unsigned AddressSpace, const Instruction *I = nullptr) const;
667 /// \return The cost of masked Load and Store instructions.
668 int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
669 unsigned AddressSpace) const;
671 /// \return The cost of Gather or Scatter operation
672 /// \p Opcode - is a type of memory access Load or Store
673 /// \p DataTy - a vector type of the data to be loaded or stored
674 /// \p Ptr - pointer [or vector of pointers] - address[es] in memory
675 /// \p VariableMask - true when the memory access is predicated with a mask
676 /// that is not a compile-time constant
677 /// \p Alignment - alignment of single element
678 int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr,
679 bool VariableMask, unsigned Alignment) const;
681 /// \return The cost of the interleaved memory operation.
682 /// \p Opcode is the memory operation code
683 /// \p VecTy is the vector type of the interleaved access.
684 /// \p Factor is the interleave factor
685 /// \p Indices is the indices for interleaved load members (as interleaved
686 /// load allows gaps)
687 /// \p Alignment is the alignment of the memory operation
688 /// \p AddressSpace is address space of the pointer.
689 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
690 ArrayRef<unsigned> Indices, unsigned Alignment,
691 unsigned AddressSpace) const;
693 /// \brief Calculate the cost of performing a vector reduction.
695 /// This is the cost of reducing the vector value of type \p Ty to a scalar
696 /// value using the operation denoted by \p Opcode. The form of the reduction
697 /// can either be a pairwise reduction or a reduction that splits the vector
698 /// at every reduction level.
702 /// ((v0+v1), (v2, v3), undef, undef)
705 /// ((v0+v2), (v1+v3), undef, undef)
706 int getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwiseForm) const;
708 /// \returns The cost of Intrinsic instructions. Analyses the real arguments.
709 /// Three cases are handled: 1. scalar instruction 2. vector instruction
710 /// 3. scalar instruction which is to be vectorized with VF.
711 int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
712 ArrayRef<Value *> Args, FastMathFlags FMF,
713 unsigned VF = 1) const;
715 /// \returns The cost of Intrinsic instructions. Types analysis only.
716 /// If ScalarizationCostPassed is UINT_MAX, the cost of scalarizing the
717 /// arguments and the return value will be computed based on types.
718 int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
719 ArrayRef<Type *> Tys, FastMathFlags FMF,
720 unsigned ScalarizationCostPassed = UINT_MAX) const;
722 /// \returns The cost of Call instructions.
723 int getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) const;
725 /// \returns The number of pieces into which the provided type must be
726 /// split during legalization. Zero is returned when the answer is unknown.
727 unsigned getNumberOfParts(Type *Tp) const;
729 /// \returns The cost of the address computation. For most targets this can be
730 /// merged into the instruction indexing mode. Some targets might want to
731 /// distinguish between address computation for memory operations on vector
732 /// types and scalar types. Such targets should override this function.
733 /// The 'SE' parameter holds pointer for the scalar evolution object which
734 /// is used in order to get the Ptr step value in case of constant stride.
735 /// The 'Ptr' parameter holds SCEV of the access pointer.
736 int getAddressComputationCost(Type *Ty, ScalarEvolution *SE = nullptr,
737 const SCEV *Ptr = nullptr) const;
739 /// \returns The cost, if any, of keeping values of the given types alive
742 /// Some types may require the use of register classes that do not have
743 /// any callee-saved registers, so would require a spill and fill.
744 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
746 /// \returns True if the intrinsic is a supported memory intrinsic. Info
747 /// will contain additional information - whether the intrinsic may write
748 /// or read to memory, volatility and the pointer. Info is undefined
749 /// if false is returned.
750 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
752 /// \returns The maximum element size, in bytes, for an element
753 /// unordered-atomic memory intrinsic.
754 unsigned getAtomicMemIntrinsicMaxElementSize() const;
756 /// \returns A value which is the result of the given memory intrinsic. New
757 /// instructions may be created to extract the result from the given intrinsic
758 /// memory operation. Returns nullptr if the target cannot create a result
759 /// from the given intrinsic.
760 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
761 Type *ExpectedType) const;
763 /// \returns The type to use in a loop expansion of a memcpy call.
764 Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
765 unsigned SrcAlign, unsigned DestAlign) const;
767 /// \param[out] OpsOut The operand types to copy RemainingBytes of memory.
768 /// \param RemainingBytes The number of bytes to copy.
770 /// Calculates the operand types to use when copying \p RemainingBytes of
771 /// memory, where source and destination alignments are \p SrcAlign and
772 /// \p DestAlign respectively.
773 void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
774 LLVMContext &Context,
775 unsigned RemainingBytes,
777 unsigned DestAlign) const;
779 /// \returns True if we want to test the new memcpy lowering functionality in
781 /// Temporary. Will be removed once we move to the new functionality and
783 bool useWideIRMemcpyLoopLowering() const;
785 /// \returns True if the two functions have compatible attributes for inlining
787 bool areInlineCompatible(const Function *Caller,
788 const Function *Callee) const;
790 /// \returns The bitwidth of the largest vector type that should be used to
791 /// load/store in the given address space.
792 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
794 /// \returns True if the load instruction is legal to vectorize.
795 bool isLegalToVectorizeLoad(LoadInst *LI) const;
797 /// \returns True if the store instruction is legal to vectorize.
798 bool isLegalToVectorizeStore(StoreInst *SI) const;
800 /// \returns True if it is legal to vectorize the given load chain.
801 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
803 unsigned AddrSpace) const;
805 /// \returns True if it is legal to vectorize the given store chain.
806 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
808 unsigned AddrSpace) const;
810 /// \returns The new vector factor value if the target doesn't support \p
811 /// SizeInBytes loads or has a better vector factor.
812 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
813 unsigned ChainSizeInBytes,
814 VectorType *VecTy) const;
816 /// \returns The new vector factor value if the target doesn't support \p
817 /// SizeInBytes stores or has a better vector factor.
818 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
819 unsigned ChainSizeInBytes,
820 VectorType *VecTy) const;
822 /// Flags describing the kind of vector reduction.
823 struct ReductionFlags {
824 ReductionFlags() : IsMaxOp(false), IsSigned(false), NoNaN(false) {}
825 bool IsMaxOp; ///< If the op a min/max kind, true if it's a max operation.
826 bool IsSigned; ///< Whether the operation is a signed int reduction.
827 bool NoNaN; ///< If op is an fp min/max, whether NaNs may be present.
830 /// \returns True if the target wants to handle the given reduction idiom in
831 /// the intrinsics form instead of the shuffle form.
832 bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
833 ReductionFlags Flags) const;
835 /// \returns True if the target wants to expand the given reduction intrinsic
836 /// into a shuffle sequence.
837 bool shouldExpandReduction(const IntrinsicInst *II) const;
841 /// \brief The abstract base class used to type erase specific TTI
845 /// \brief The template model for the base class which wraps a concrete
846 /// implementation in a type erased interface.
847 template <typename T> class Model;
849 std::unique_ptr<Concept> TTIImpl;
852 class TargetTransformInfo::Concept {
854 virtual ~Concept() = 0;
855 virtual const DataLayout &getDataLayout() const = 0;
856 virtual int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) = 0;
857 virtual int getGEPCost(Type *PointeeType, const Value *Ptr,
858 ArrayRef<const Value *> Operands) = 0;
859 virtual int getExtCost(const Instruction *I, const Value *Src) = 0;
860 virtual int getCallCost(FunctionType *FTy, int NumArgs) = 0;
861 virtual int getCallCost(const Function *F, int NumArgs) = 0;
862 virtual int getCallCost(const Function *F,
863 ArrayRef<const Value *> Arguments) = 0;
864 virtual unsigned getInliningThresholdMultiplier() = 0;
865 virtual int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
866 ArrayRef<Type *> ParamTys) = 0;
867 virtual int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
868 ArrayRef<const Value *> Arguments) = 0;
869 virtual unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
870 unsigned &JTSize) = 0;
872 getUserCost(const User *U, ArrayRef<const Value *> Operands) = 0;
873 virtual bool hasBranchDivergence() = 0;
874 virtual bool isSourceOfDivergence(const Value *V) = 0;
875 virtual bool isAlwaysUniform(const Value *V) = 0;
876 virtual unsigned getFlatAddressSpace() = 0;
877 virtual bool isLoweredToCall(const Function *F) = 0;
878 virtual void getUnrollingPreferences(Loop *L, ScalarEvolution &,
879 UnrollingPreferences &UP) = 0;
880 virtual bool isLegalAddImmediate(int64_t Imm) = 0;
881 virtual bool isLegalICmpImmediate(int64_t Imm) = 0;
882 virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
883 int64_t BaseOffset, bool HasBaseReg,
885 unsigned AddrSpace) = 0;
886 virtual bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
887 TargetTransformInfo::LSRCost &C2) = 0;
888 virtual bool isLegalMaskedStore(Type *DataType) = 0;
889 virtual bool isLegalMaskedLoad(Type *DataType) = 0;
890 virtual bool isLegalMaskedScatter(Type *DataType) = 0;
891 virtual bool isLegalMaskedGather(Type *DataType) = 0;
892 virtual bool prefersVectorizedAddressing() = 0;
893 virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
894 int64_t BaseOffset, bool HasBaseReg,
895 int64_t Scale, unsigned AddrSpace) = 0;
896 virtual bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) = 0;
897 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) = 0;
898 virtual bool isProfitableToHoist(Instruction *I) = 0;
899 virtual bool isTypeLegal(Type *Ty) = 0;
900 virtual unsigned getJumpBufAlignment() = 0;
901 virtual unsigned getJumpBufSize() = 0;
902 virtual bool shouldBuildLookupTables() = 0;
903 virtual bool shouldBuildLookupTablesForConstant(Constant *C) = 0;
905 getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) = 0;
906 virtual unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
908 virtual bool supportsEfficientVectorElementLoadStore() = 0;
909 virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0;
910 virtual bool expandMemCmp(Instruction *I, unsigned &MaxLoadSize) = 0;
911 virtual bool enableInterleavedAccessVectorization() = 0;
912 virtual bool isFPVectorizationPotentiallyUnsafe() = 0;
913 virtual bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
915 unsigned AddressSpace,
918 virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0;
919 virtual bool haveFastSqrt(Type *Ty) = 0;
920 virtual int getFPOpCost(Type *Ty) = 0;
921 virtual int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm,
923 virtual int getIntImmCost(const APInt &Imm, Type *Ty) = 0;
924 virtual int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
926 virtual int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
928 virtual unsigned getNumberOfRegisters(bool Vector) = 0;
929 virtual unsigned getRegisterBitWidth(bool Vector) const = 0;
930 virtual unsigned getMinVectorRegisterBitWidth() = 0;
931 virtual bool shouldConsiderAddressTypePromotion(
932 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) = 0;
933 virtual unsigned getCacheLineSize() = 0;
934 virtual unsigned getPrefetchDistance() = 0;
935 virtual unsigned getMinPrefetchStride() = 0;
936 virtual unsigned getMaxPrefetchIterationsAhead() = 0;
937 virtual unsigned getMaxInterleaveFactor(unsigned VF) = 0;
939 getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
940 OperandValueKind Opd2Info,
941 OperandValueProperties Opd1PropInfo,
942 OperandValueProperties Opd2PropInfo,
943 ArrayRef<const Value *> Args) = 0;
944 virtual int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
946 virtual int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
947 const Instruction *I) = 0;
948 virtual int getExtractWithExtendCost(unsigned Opcode, Type *Dst,
949 VectorType *VecTy, unsigned Index) = 0;
950 virtual int getCFInstrCost(unsigned Opcode) = 0;
951 virtual int getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
952 Type *CondTy, const Instruction *I) = 0;
953 virtual int getVectorInstrCost(unsigned Opcode, Type *Val,
955 virtual int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
956 unsigned AddressSpace, const Instruction *I) = 0;
957 virtual int getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
959 unsigned AddressSpace) = 0;
960 virtual int getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
961 Value *Ptr, bool VariableMask,
962 unsigned Alignment) = 0;
963 virtual int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
965 ArrayRef<unsigned> Indices,
967 unsigned AddressSpace) = 0;
968 virtual int getReductionCost(unsigned Opcode, Type *Ty,
969 bool IsPairwiseForm) = 0;
970 virtual int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
971 ArrayRef<Type *> Tys, FastMathFlags FMF,
972 unsigned ScalarizationCostPassed) = 0;
973 virtual int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
974 ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) = 0;
975 virtual int getCallInstrCost(Function *F, Type *RetTy,
976 ArrayRef<Type *> Tys) = 0;
977 virtual unsigned getNumberOfParts(Type *Tp) = 0;
978 virtual int getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
979 const SCEV *Ptr) = 0;
980 virtual unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) = 0;
981 virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst,
982 MemIntrinsicInfo &Info) = 0;
983 virtual unsigned getAtomicMemIntrinsicMaxElementSize() const = 0;
984 virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
985 Type *ExpectedType) = 0;
986 virtual Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
988 unsigned DestAlign) const = 0;
989 virtual void getMemcpyLoopResidualLoweringType(
990 SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
991 unsigned RemainingBytes, unsigned SrcAlign, unsigned DestAlign) const = 0;
992 virtual bool areInlineCompatible(const Function *Caller,
993 const Function *Callee) const = 0;
994 virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0;
995 virtual bool isLegalToVectorizeLoad(LoadInst *LI) const = 0;
996 virtual bool isLegalToVectorizeStore(StoreInst *SI) const = 0;
997 virtual bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
999 unsigned AddrSpace) const = 0;
1000 virtual bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
1002 unsigned AddrSpace) const = 0;
1003 virtual unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
1004 unsigned ChainSizeInBytes,
1005 VectorType *VecTy) const = 0;
1006 virtual unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
1007 unsigned ChainSizeInBytes,
1008 VectorType *VecTy) const = 0;
1009 virtual bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
1010 ReductionFlags) const = 0;
1011 virtual bool shouldExpandReduction(const IntrinsicInst *II) const = 0;
1014 template <typename T>
1015 class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
1019 Model(T Impl) : Impl(std::move(Impl)) {}
1020 ~Model() override {}
1022 const DataLayout &getDataLayout() const override {
1023 return Impl.getDataLayout();
1026 int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) override {
1027 return Impl.getOperationCost(Opcode, Ty, OpTy);
1029 int getGEPCost(Type *PointeeType, const Value *Ptr,
1030 ArrayRef<const Value *> Operands) override {
1031 return Impl.getGEPCost(PointeeType, Ptr, Operands);
1033 int getExtCost(const Instruction *I, const Value *Src) override {
1034 return Impl.getExtCost(I, Src);
1036 int getCallCost(FunctionType *FTy, int NumArgs) override {
1037 return Impl.getCallCost(FTy, NumArgs);
1039 int getCallCost(const Function *F, int NumArgs) override {
1040 return Impl.getCallCost(F, NumArgs);
1042 int getCallCost(const Function *F,
1043 ArrayRef<const Value *> Arguments) override {
1044 return Impl.getCallCost(F, Arguments);
1046 unsigned getInliningThresholdMultiplier() override {
1047 return Impl.getInliningThresholdMultiplier();
1049 int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
1050 ArrayRef<Type *> ParamTys) override {
1051 return Impl.getIntrinsicCost(IID, RetTy, ParamTys);
1053 int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
1054 ArrayRef<const Value *> Arguments) override {
1055 return Impl.getIntrinsicCost(IID, RetTy, Arguments);
1057 int getUserCost(const User *U, ArrayRef<const Value *> Operands) override {
1058 return Impl.getUserCost(U, Operands);
1060 bool hasBranchDivergence() override { return Impl.hasBranchDivergence(); }
1061 bool isSourceOfDivergence(const Value *V) override {
1062 return Impl.isSourceOfDivergence(V);
1065 bool isAlwaysUniform(const Value *V) override {
1066 return Impl.isAlwaysUniform(V);
1069 unsigned getFlatAddressSpace() override {
1070 return Impl.getFlatAddressSpace();
1073 bool isLoweredToCall(const Function *F) override {
1074 return Impl.isLoweredToCall(F);
1076 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1077 UnrollingPreferences &UP) override {
1078 return Impl.getUnrollingPreferences(L, SE, UP);
1080 bool isLegalAddImmediate(int64_t Imm) override {
1081 return Impl.isLegalAddImmediate(Imm);
1083 bool isLegalICmpImmediate(int64_t Imm) override {
1084 return Impl.isLegalICmpImmediate(Imm);
1086 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
1087 bool HasBaseReg, int64_t Scale,
1088 unsigned AddrSpace) override {
1089 return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
1092 bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
1093 TargetTransformInfo::LSRCost &C2) override {
1094 return Impl.isLSRCostLess(C1, C2);
1096 bool isLegalMaskedStore(Type *DataType) override {
1097 return Impl.isLegalMaskedStore(DataType);
1099 bool isLegalMaskedLoad(Type *DataType) override {
1100 return Impl.isLegalMaskedLoad(DataType);
1102 bool isLegalMaskedScatter(Type *DataType) override {
1103 return Impl.isLegalMaskedScatter(DataType);
1105 bool isLegalMaskedGather(Type *DataType) override {
1106 return Impl.isLegalMaskedGather(DataType);
1108 bool prefersVectorizedAddressing() override {
1109 return Impl.prefersVectorizedAddressing();
1111 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
1112 bool HasBaseReg, int64_t Scale,
1113 unsigned AddrSpace) override {
1114 return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg,
1117 bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) override {
1118 return Impl.isFoldableMemAccessOffset(I, Offset);
1120 bool isTruncateFree(Type *Ty1, Type *Ty2) override {
1121 return Impl.isTruncateFree(Ty1, Ty2);
1123 bool isProfitableToHoist(Instruction *I) override {
1124 return Impl.isProfitableToHoist(I);
1126 bool isTypeLegal(Type *Ty) override { return Impl.isTypeLegal(Ty); }
1127 unsigned getJumpBufAlignment() override { return Impl.getJumpBufAlignment(); }
1128 unsigned getJumpBufSize() override { return Impl.getJumpBufSize(); }
1129 bool shouldBuildLookupTables() override {
1130 return Impl.shouldBuildLookupTables();
1132 bool shouldBuildLookupTablesForConstant(Constant *C) override {
1133 return Impl.shouldBuildLookupTablesForConstant(C);
1135 unsigned getScalarizationOverhead(Type *Ty, bool Insert,
1136 bool Extract) override {
1137 return Impl.getScalarizationOverhead(Ty, Insert, Extract);
1139 unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
1140 unsigned VF) override {
1141 return Impl.getOperandsScalarizationOverhead(Args, VF);
1144 bool supportsEfficientVectorElementLoadStore() override {
1145 return Impl.supportsEfficientVectorElementLoadStore();
1148 bool enableAggressiveInterleaving(bool LoopHasReductions) override {
1149 return Impl.enableAggressiveInterleaving(LoopHasReductions);
1151 bool expandMemCmp(Instruction *I, unsigned &MaxLoadSize) override {
1152 return Impl.expandMemCmp(I, MaxLoadSize);
1154 bool enableInterleavedAccessVectorization() override {
1155 return Impl.enableInterleavedAccessVectorization();
1157 bool isFPVectorizationPotentiallyUnsafe() override {
1158 return Impl.isFPVectorizationPotentiallyUnsafe();
1160 bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
1161 unsigned BitWidth, unsigned AddressSpace,
1162 unsigned Alignment, bool *Fast) override {
1163 return Impl.allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace,
1166 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) override {
1167 return Impl.getPopcntSupport(IntTyWidthInBit);
1169 bool haveFastSqrt(Type *Ty) override { return Impl.haveFastSqrt(Ty); }
1171 int getFPOpCost(Type *Ty) override { return Impl.getFPOpCost(Ty); }
1173 int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm,
1174 Type *Ty) override {
1175 return Impl.getIntImmCodeSizeCost(Opc, Idx, Imm, Ty);
1177 int getIntImmCost(const APInt &Imm, Type *Ty) override {
1178 return Impl.getIntImmCost(Imm, Ty);
1180 int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
1181 Type *Ty) override {
1182 return Impl.getIntImmCost(Opc, Idx, Imm, Ty);
1184 int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
1185 Type *Ty) override {
1186 return Impl.getIntImmCost(IID, Idx, Imm, Ty);
1188 unsigned getNumberOfRegisters(bool Vector) override {
1189 return Impl.getNumberOfRegisters(Vector);
1191 unsigned getRegisterBitWidth(bool Vector) const override {
1192 return Impl.getRegisterBitWidth(Vector);
1194 unsigned getMinVectorRegisterBitWidth() override {
1195 return Impl.getMinVectorRegisterBitWidth();
1197 bool shouldConsiderAddressTypePromotion(
1198 const Instruction &I, bool &AllowPromotionWithoutCommonHeader) override {
1199 return Impl.shouldConsiderAddressTypePromotion(
1200 I, AllowPromotionWithoutCommonHeader);
1202 unsigned getCacheLineSize() override {
1203 return Impl.getCacheLineSize();
1205 unsigned getPrefetchDistance() override { return Impl.getPrefetchDistance(); }
1206 unsigned getMinPrefetchStride() override {
1207 return Impl.getMinPrefetchStride();
1209 unsigned getMaxPrefetchIterationsAhead() override {
1210 return Impl.getMaxPrefetchIterationsAhead();
1212 unsigned getMaxInterleaveFactor(unsigned VF) override {
1213 return Impl.getMaxInterleaveFactor(VF);
1215 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
1216 unsigned &JTSize) override {
1217 return Impl.getEstimatedNumberOfCaseClusters(SI, JTSize);
1220 getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
1221 OperandValueKind Opd2Info,
1222 OperandValueProperties Opd1PropInfo,
1223 OperandValueProperties Opd2PropInfo,
1224 ArrayRef<const Value *> Args) override {
1225 return Impl.getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
1226 Opd1PropInfo, Opd2PropInfo, Args);
1228 int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
1229 Type *SubTp) override {
1230 return Impl.getShuffleCost(Kind, Tp, Index, SubTp);
1232 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1233 const Instruction *I) override {
1234 return Impl.getCastInstrCost(Opcode, Dst, Src, I);
1236 int getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
1237 unsigned Index) override {
1238 return Impl.getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
1240 int getCFInstrCost(unsigned Opcode) override {
1241 return Impl.getCFInstrCost(Opcode);
1243 int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
1244 const Instruction *I) override {
1245 return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
1247 int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) override {
1248 return Impl.getVectorInstrCost(Opcode, Val, Index);
1250 int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
1251 unsigned AddressSpace, const Instruction *I) override {
1252 return Impl.getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, I);
1254 int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
1255 unsigned AddressSpace) override {
1256 return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
1258 int getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
1259 Value *Ptr, bool VariableMask,
1260 unsigned Alignment) override {
1261 return Impl.getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1264 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
1265 ArrayRef<unsigned> Indices, unsigned Alignment,
1266 unsigned AddressSpace) override {
1267 return Impl.getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1268 Alignment, AddressSpace);
1270 int getReductionCost(unsigned Opcode, Type *Ty,
1271 bool IsPairwiseForm) override {
1272 return Impl.getReductionCost(Opcode, Ty, IsPairwiseForm);
1274 int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, ArrayRef<Type *> Tys,
1275 FastMathFlags FMF, unsigned ScalarizationCostPassed) override {
1276 return Impl.getIntrinsicInstrCost(ID, RetTy, Tys, FMF,
1277 ScalarizationCostPassed);
1279 int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
1280 ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) override {
1281 return Impl.getIntrinsicInstrCost(ID, RetTy, Args, FMF, VF);
1283 int getCallInstrCost(Function *F, Type *RetTy,
1284 ArrayRef<Type *> Tys) override {
1285 return Impl.getCallInstrCost(F, RetTy, Tys);
1287 unsigned getNumberOfParts(Type *Tp) override {
1288 return Impl.getNumberOfParts(Tp);
1290 int getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
1291 const SCEV *Ptr) override {
1292 return Impl.getAddressComputationCost(Ty, SE, Ptr);
1294 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) override {
1295 return Impl.getCostOfKeepingLiveOverCall(Tys);
1297 bool getTgtMemIntrinsic(IntrinsicInst *Inst,
1298 MemIntrinsicInfo &Info) override {
1299 return Impl.getTgtMemIntrinsic(Inst, Info);
1301 unsigned getAtomicMemIntrinsicMaxElementSize() const override {
1302 return Impl.getAtomicMemIntrinsicMaxElementSize();
1304 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
1305 Type *ExpectedType) override {
1306 return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
1308 Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
1310 unsigned DestAlign) const override {
1311 return Impl.getMemcpyLoopLoweringType(Context, Length, SrcAlign, DestAlign);
1313 void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
1314 LLVMContext &Context,
1315 unsigned RemainingBytes,
1317 unsigned DestAlign) const override {
1318 Impl.getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes,
1319 SrcAlign, DestAlign);
1321 bool areInlineCompatible(const Function *Caller,
1322 const Function *Callee) const override {
1323 return Impl.areInlineCompatible(Caller, Callee);
1325 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const override {
1326 return Impl.getLoadStoreVecRegBitWidth(AddrSpace);
1328 bool isLegalToVectorizeLoad(LoadInst *LI) const override {
1329 return Impl.isLegalToVectorizeLoad(LI);
1331 bool isLegalToVectorizeStore(StoreInst *SI) const override {
1332 return Impl.isLegalToVectorizeStore(SI);
1334 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
1336 unsigned AddrSpace) const override {
1337 return Impl.isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
1340 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
1342 unsigned AddrSpace) const override {
1343 return Impl.isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
1346 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
1347 unsigned ChainSizeInBytes,
1348 VectorType *VecTy) const override {
1349 return Impl.getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
1351 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
1352 unsigned ChainSizeInBytes,
1353 VectorType *VecTy) const override {
1354 return Impl.getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
1356 bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
1357 ReductionFlags Flags) const override {
1358 return Impl.useReductionIntrinsic(Opcode, Ty, Flags);
1360 bool shouldExpandReduction(const IntrinsicInst *II) const override {
1361 return Impl.shouldExpandReduction(II);
1365 template <typename T>
1366 TargetTransformInfo::TargetTransformInfo(T Impl)
1367 : TTIImpl(new Model<T>(Impl)) {}
1369 /// \brief Analysis pass providing the \c TargetTransformInfo.
1371 /// The core idea of the TargetIRAnalysis is to expose an interface through
1372 /// which LLVM targets can analyze and provide information about the middle
1373 /// end's target-independent IR. This supports use cases such as target-aware
1374 /// cost modeling of IR constructs.
1376 /// This is a function analysis because much of the cost modeling for targets
1377 /// is done in a subtarget specific way and LLVM supports compiling different
1378 /// functions targeting different subtargets in order to support runtime
1379 /// dispatch according to the observed subtarget.
1380 class TargetIRAnalysis : public AnalysisInfoMixin<TargetIRAnalysis> {
1382 typedef TargetTransformInfo Result;
1384 /// \brief Default construct a target IR analysis.
1386 /// This will use the module's datalayout to construct a baseline
1387 /// conservative TTI result.
1390 /// \brief Construct an IR analysis pass around a target-provide callback.
1392 /// The callback will be called with a particular function for which the TTI
1393 /// is needed and must return a TTI object for that function.
1394 TargetIRAnalysis(std::function<Result(const Function &)> TTICallback);
1396 // Value semantics. We spell out the constructors for MSVC.
1397 TargetIRAnalysis(const TargetIRAnalysis &Arg)
1398 : TTICallback(Arg.TTICallback) {}
1399 TargetIRAnalysis(TargetIRAnalysis &&Arg)
1400 : TTICallback(std::move(Arg.TTICallback)) {}
1401 TargetIRAnalysis &operator=(const TargetIRAnalysis &RHS) {
1402 TTICallback = RHS.TTICallback;
1405 TargetIRAnalysis &operator=(TargetIRAnalysis &&RHS) {
1406 TTICallback = std::move(RHS.TTICallback);
1410 Result run(const Function &F, FunctionAnalysisManager &);
1413 friend AnalysisInfoMixin<TargetIRAnalysis>;
1414 static AnalysisKey Key;
1416 /// \brief The callback used to produce a result.
1418 /// We use a completely opaque callback so that targets can provide whatever
1419 /// mechanism they desire for constructing the TTI for a given function.
1421 /// FIXME: Should we really use std::function? It's relatively inefficient.
1422 /// It might be possible to arrange for even stateful callbacks to outlive
1423 /// the analysis and thus use a function_ref which would be lighter weight.
1424 /// This may also be less error prone as the callback is likely to reference
1425 /// the external TargetMachine, and that reference needs to never dangle.
1426 std::function<Result(const Function &)> TTICallback;
1428 /// \brief Helper function used as the callback in the default constructor.
1429 static Result getDefaultTTI(const Function &F);
1432 /// \brief Wrapper pass for TargetTransformInfo.
1434 /// This pass can be constructed from a TTI object which it stores internally
1435 /// and is queried by passes.
1436 class TargetTransformInfoWrapperPass : public ImmutablePass {
1437 TargetIRAnalysis TIRA;
1438 Optional<TargetTransformInfo> TTI;
1440 virtual void anchor();
1445 /// \brief We must provide a default constructor for the pass but it should
1448 /// Use the constructor below or call one of the creation routines.
1449 TargetTransformInfoWrapperPass();
1451 explicit TargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
1453 TargetTransformInfo &getTTI(const Function &F);
1456 /// \brief Create an analysis pass wrapper around a TTI object.
1458 /// This analysis pass just holds the TTI instance and makes it available to
1460 ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
1462 } // End llvm namespace