1 //===- TargetTransformInfo.h ------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This pass exposes codegen information to IR-level passes. Every
11 /// transformation that uses codegen information is broken into three parts:
12 /// 1. The IR-level analysis pass.
13 /// 2. The IR-level transformation interface which provides the needed
15 /// 3. Codegen-level implementation which uses target-specific hooks.
17 /// This file defines #2, which is the interface that IR-level transformations
18 /// use for querying the codegen.
20 //===----------------------------------------------------------------------===//
22 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
23 #define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
25 #include "llvm/ADT/Optional.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/Intrinsics.h"
28 #include "llvm/IR/Operator.h"
29 #include "llvm/IR/PassManager.h"
30 #include "llvm/Pass.h"
31 #include "llvm/Support/DataTypes.h"
39 class ScalarEvolution;
45 /// \brief Information about a load/store intrinsic defined by the target.
46 struct MemIntrinsicInfo {
48 : ReadMem(false), WriteMem(false), IsSimple(false), MatchingId(0),
49 NumMemRefs(0), PtrVal(nullptr) {}
52 /// True only if this memory operation is non-volatile, non-atomic, and
53 /// unordered. (See LoadInst/StoreInst for details on each)
55 // Same Id is set by the target for corresponding load/store intrinsics.
56 unsigned short MatchingId;
61 /// \brief This pass provides access to the codegen interfaces that are needed
62 /// for IR-level transformations.
63 class TargetTransformInfo {
65 /// \brief Construct a TTI object using a type implementing the \c Concept
68 /// This is used by targets to construct a TTI wrapping their target-specific
69 /// implementaion that encodes appropriate costs for their target.
70 template <typename T> TargetTransformInfo(T Impl);
72 /// \brief Construct a baseline TTI object using a minimal implementation of
73 /// the \c Concept API below.
75 /// The TTI implementation will reflect the information in the DataLayout
76 /// provided if non-null.
77 explicit TargetTransformInfo(const DataLayout &DL);
79 // Provide move semantics.
80 TargetTransformInfo(TargetTransformInfo &&Arg);
81 TargetTransformInfo &operator=(TargetTransformInfo &&RHS);
83 // We need to define the destructor out-of-line to define our sub-classes
85 ~TargetTransformInfo();
87 /// \brief Handle the invalidation of this information.
89 /// When used as a result of \c TargetIRAnalysis this method will be called
90 /// when the function this was computed for changes. When it returns false,
91 /// the information is preserved across those changes.
92 bool invalidate(Function &, const PreservedAnalyses &,
93 FunctionAnalysisManager::Invalidator &) {
94 // FIXME: We should probably in some way ensure that the subtarget
95 // information for a function hasn't changed.
99 /// \name Generic Target Information
102 /// \brief Underlying constants for 'cost' values in this interface.
104 /// Many APIs in this interface return a cost. This enum defines the
105 /// fundamental values that should be used to interpret (and produce) those
106 /// costs. The costs are returned as an int rather than a member of this
107 /// enumeration because it is expected that the cost of one IR instruction
108 /// may have a multiplicative factor to it or otherwise won't fit directly
109 /// into the enum. Moreover, it is common to sum or average costs which works
110 /// better as simple integral values. Thus this enum only provides constants.
111 /// Also note that the returned costs are signed integers to make it natural
112 /// to add, subtract, and test with zero (a common boundary condition). It is
113 /// not expected that 2^32 is a realistic cost to be modeling at any point.
115 /// Note that these costs should usually reflect the intersection of code-size
116 /// cost and execution cost. A free instruction is typically one that folds
117 /// into another instruction. For example, reg-to-reg moves can often be
118 /// skipped by renaming the registers in the CPU, but they still are encoded
119 /// and thus wouldn't be considered 'free' here.
120 enum TargetCostConstants {
121 TCC_Free = 0, ///< Expected to fold away in lowering.
122 TCC_Basic = 1, ///< The cost of a typical 'add' instruction.
123 TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86.
126 /// \brief Estimate the cost of a specific operation when lowered.
128 /// Note that this is designed to work on an arbitrary synthetic opcode, and
129 /// thus work for hypothetical queries before an instruction has even been
130 /// formed. However, this does *not* work for GEPs, and must not be called
131 /// for a GEP instruction. Instead, use the dedicated getGEPCost interface as
132 /// analyzing a GEP's cost required more information.
134 /// Typically only the result type is required, and the operand type can be
135 /// omitted. However, if the opcode is one of the cast instructions, the
136 /// operand type is required.
138 /// The returned cost is defined in terms of \c TargetCostConstants, see its
139 /// comments for a detailed explanation of the cost values.
140 int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy = nullptr) const;
142 /// \brief Estimate the cost of a GEP operation when lowered.
144 /// The contract for this function is the same as \c getOperationCost except
145 /// that it supports an interface that provides extra information specific to
146 /// the GEP operation.
147 int getGEPCost(Type *PointeeType, const Value *Ptr,
148 ArrayRef<const Value *> Operands) const;
150 /// \brief Estimate the cost of a function call when lowered.
152 /// The contract for this is the same as \c getOperationCost except that it
153 /// supports an interface that provides extra information specific to call
156 /// This is the most basic query for estimating call cost: it only knows the
157 /// function type and (potentially) the number of arguments at the call site.
158 /// The latter is only interesting for varargs function types.
159 int getCallCost(FunctionType *FTy, int NumArgs = -1) const;
161 /// \brief Estimate the cost of calling a specific function when lowered.
163 /// This overload adds the ability to reason about the particular function
164 /// being called in the event it is a library call with special lowering.
165 int getCallCost(const Function *F, int NumArgs = -1) const;
167 /// \brief Estimate the cost of calling a specific function when lowered.
169 /// This overload allows specifying a set of candidate argument values.
170 int getCallCost(const Function *F, ArrayRef<const Value *> Arguments) const;
172 /// \returns A value by which our inlining threshold should be multiplied.
173 /// This is primarily used to bump up the inlining threshold wholesale on
174 /// targets where calls are unusually expensive.
176 /// TODO: This is a rather blunt instrument. Perhaps altering the costs of
177 /// individual classes of instructions would be better.
178 unsigned getInliningThresholdMultiplier() const;
180 /// \brief Estimate the cost of an intrinsic when lowered.
182 /// Mirrors the \c getCallCost method but uses an intrinsic identifier.
183 int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
184 ArrayRef<Type *> ParamTys) const;
186 /// \brief Estimate the cost of an intrinsic when lowered.
188 /// Mirrors the \c getCallCost method but uses an intrinsic identifier.
189 int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
190 ArrayRef<const Value *> Arguments) const;
192 /// \brief Estimate the cost of a given IR user when lowered.
194 /// This can estimate the cost of either a ConstantExpr or Instruction when
195 /// lowered. It has two primary advantages over the \c getOperationCost and
196 /// \c getGEPCost above, and one significant disadvantage: it can only be
197 /// used when the IR construct has already been formed.
199 /// The advantages are that it can inspect the SSA use graph to reason more
200 /// accurately about the cost. For example, all-constant-GEPs can often be
201 /// folded into a load or other instruction, but if they are used in some
202 /// other context they may not be folded. This routine can distinguish such
205 /// The returned cost is defined in terms of \c TargetCostConstants, see its
206 /// comments for a detailed explanation of the cost values.
207 int getUserCost(const User *U) const;
209 /// \brief Return true if branch divergence exists.
211 /// Branch divergence has a significantly negative impact on GPU performance
212 /// when threads in the same wavefront take different paths due to conditional
214 bool hasBranchDivergence() const;
216 /// \brief Returns whether V is a source of divergence.
218 /// This function provides the target-dependent information for
219 /// the target-independent DivergenceAnalysis. DivergenceAnalysis first
220 /// builds the dependency graph, and then runs the reachability algorithm
221 /// starting with the sources of divergence.
222 bool isSourceOfDivergence(const Value *V) const;
224 /// \brief Test whether calls to a function lower to actual program function
227 /// The idea is to test whether the program is likely to require a 'call'
228 /// instruction or equivalent in order to call the given function.
230 /// FIXME: It's not clear that this is a good or useful query API. Client's
231 /// should probably move to simpler cost metrics using the above.
232 /// Alternatively, we could split the cost interface into distinct code-size
233 /// and execution-speed costs. This would allow modelling the core of this
234 /// query more accurately as a call is a single small instruction, but
235 /// incurs significant execution cost.
236 bool isLoweredToCall(const Function *F) const;
238 /// Parameters that control the generic loop unrolling transformation.
239 struct UnrollingPreferences {
240 /// The cost threshold for the unrolled loop. Should be relative to the
241 /// getUserCost values returned by this API, and the expectation is that
242 /// the unrolled loop's instructions when run through that interface should
243 /// not exceed this cost. However, this is only an estimate. Also, specific
244 /// loops may be unrolled even with a cost above this threshold if deemed
245 /// profitable. Set this to UINT_MAX to disable the loop body cost
248 /// If complete unrolling will reduce the cost of the loop, we will boost
249 /// the Threshold by a certain percent to allow more aggressive complete
250 /// unrolling. This value provides the maximum boost percentage that we
251 /// can apply to Threshold (The value should be no less than 100).
252 /// BoostedThreshold = Threshold * min(RolledCost / UnrolledCost,
253 /// MaxPercentThresholdBoost / 100)
254 /// E.g. if complete unrolling reduces the loop execution time by 50%
255 /// then we boost the threshold by the factor of 2x. If unrolling is not
256 /// expected to reduce the running time, then we do not increase the
258 unsigned MaxPercentThresholdBoost;
259 /// The cost threshold for the unrolled loop when optimizing for size (set
260 /// to UINT_MAX to disable).
261 unsigned OptSizeThreshold;
262 /// The cost threshold for the unrolled loop, like Threshold, but used
263 /// for partial/runtime unrolling (set to UINT_MAX to disable).
264 unsigned PartialThreshold;
265 /// The cost threshold for the unrolled loop when optimizing for size, like
266 /// OptSizeThreshold, but used for partial/runtime unrolling (set to
267 /// UINT_MAX to disable).
268 unsigned PartialOptSizeThreshold;
269 /// A forced unrolling factor (the number of concatenated bodies of the
270 /// original loop in the unrolled loop body). When set to 0, the unrolling
271 /// transformation will select an unrolling factor based on the current cost
272 /// threshold and other factors.
274 /// A forced peeling factor (the number of bodied of the original loop
275 /// that should be peeled off before the loop body). When set to 0, the
276 /// unrolling transformation will select a peeling factor based on profile
277 /// information and other factors.
279 /// Default unroll count for loops with run-time trip count.
280 unsigned DefaultUnrollRuntimeCount;
281 // Set the maximum unrolling factor. The unrolling factor may be selected
282 // using the appropriate cost threshold, but may not exceed this number
283 // (set to UINT_MAX to disable). This does not apply in cases where the
284 // loop is being fully unrolled.
286 /// Set the maximum unrolling factor for full unrolling. Like MaxCount, but
287 /// applies even if full unrolling is selected. This allows a target to fall
288 /// back to Partial unrolling if full unrolling is above FullUnrollMaxCount.
289 unsigned FullUnrollMaxCount;
290 // Represents number of instructions optimized when "back edge"
291 // becomes "fall through" in unrolled loop.
292 // For now we count a conditional branch on a backedge and a comparison
295 /// Allow partial unrolling (unrolling of loops to expand the size of the
296 /// loop body, not only to eliminate small constant-trip-count loops).
298 /// Allow runtime unrolling (unrolling of loops to expand the size of the
299 /// loop body even when the number of loop iterations is not known at
302 /// Allow generation of a loop remainder (extra iterations after unroll).
304 /// Allow emitting expensive instructions (such as divisions) when computing
305 /// the trip count of a loop for runtime unrolling.
306 bool AllowExpensiveTripCount;
307 /// Apply loop unroll on any kind of loop
308 /// (mainly to loops that fail runtime unrolling).
310 /// Allow using trip count upper bound to unroll loops.
312 /// Allow peeling off loop iterations for loops with low dynamic tripcount.
316 /// \brief Get target-customized preferences for the generic loop unrolling
317 /// transformation. The caller will initialize UP with the current
318 /// target-independent defaults.
319 void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const;
323 /// \name Scalar Target Information
326 /// \brief Flags indicating the kind of support for population count.
328 /// Compared to the SW implementation, HW support is supposed to
329 /// significantly boost the performance when the population is dense, and it
330 /// may or may not degrade performance if the population is sparse. A HW
331 /// support is considered as "Fast" if it can outperform, or is on a par
332 /// with, SW implementation when the population is sparse; otherwise, it is
333 /// considered as "Slow".
334 enum PopcntSupportKind { PSK_Software, PSK_SlowHardware, PSK_FastHardware };
336 /// \brief Return true if the specified immediate is legal add immediate, that
337 /// is the target has add instructions which can add a register with the
338 /// immediate without having to materialize the immediate into a register.
339 bool isLegalAddImmediate(int64_t Imm) const;
341 /// \brief Return true if the specified immediate is legal icmp immediate,
342 /// that is the target has icmp instructions which can compare a register
343 /// against the immediate without having to materialize the immediate into a
345 bool isLegalICmpImmediate(int64_t Imm) const;
347 /// \brief Return true if the addressing mode represented by AM is legal for
348 /// this target, for a load/store of the specified type.
349 /// The type may be VoidTy, in which case only return true if the addressing
350 /// mode is legal for a load/store of any legal type.
351 /// TODO: Handle pre/postinc as well.
352 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
353 bool HasBaseReg, int64_t Scale,
354 unsigned AddrSpace = 0) const;
356 /// \brief Return true if the target supports masked load/store
357 /// AVX2 and AVX-512 targets allow masks for consecutive load and store
358 bool isLegalMaskedStore(Type *DataType) const;
359 bool isLegalMaskedLoad(Type *DataType) const;
361 /// \brief Return true if the target supports masked gather/scatter
362 /// AVX-512 fully supports gather and scatter for vectors with 32 and 64
363 /// bits scalar type.
364 bool isLegalMaskedScatter(Type *DataType) const;
365 bool isLegalMaskedGather(Type *DataType) const;
367 /// \brief Return the cost of the scaling factor used in the addressing
368 /// mode represented by AM for this target, for a load/store
369 /// of the specified type.
370 /// If the AM is supported, the return value must be >= 0.
371 /// If the AM is not supported, it returns a negative value.
372 /// TODO: Handle pre/postinc as well.
373 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
374 bool HasBaseReg, int64_t Scale,
375 unsigned AddrSpace = 0) const;
377 /// \brief Return true if target supports the load / store
378 /// instruction with the given Offset on the form reg + Offset. It
379 /// may be that Offset is too big for a certain type (register
381 bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) const;
383 /// \brief Return true if it's free to truncate a value of type Ty1 to type
384 /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
385 /// by referencing its sub-register AX.
386 bool isTruncateFree(Type *Ty1, Type *Ty2) const;
388 /// \brief Return true if it is profitable to hoist instruction in the
389 /// then/else to before if.
390 bool isProfitableToHoist(Instruction *I) const;
392 /// \brief Return true if this type is legal.
393 bool isTypeLegal(Type *Ty) const;
395 /// \brief Returns the target's jmp_buf alignment in bytes.
396 unsigned getJumpBufAlignment() const;
398 /// \brief Returns the target's jmp_buf size in bytes.
399 unsigned getJumpBufSize() const;
401 /// \brief Return true if switches should be turned into lookup tables for the
403 bool shouldBuildLookupTables() const;
405 /// \brief Return true if switches should be turned into lookup tables
406 /// containing this constant value for the target.
407 bool shouldBuildLookupTablesForConstant(Constant *C) const;
409 /// \brief Don't restrict interleaved unrolling to small loops.
410 bool enableAggressiveInterleaving(bool LoopHasReductions) const;
412 /// \brief Enable matching of interleaved access groups.
413 bool enableInterleavedAccessVectorization() const;
415 /// \brief Indicate that it is potentially unsafe to automatically vectorize
416 /// floating-point operations because the semantics of vector and scalar
417 /// floating-point semantics may differ. For example, ARM NEON v7 SIMD math
418 /// does not support IEEE-754 denormal numbers, while depending on the
419 /// platform, scalar floating-point math does.
420 /// This applies to floating-point math operations and calls, not memory
421 /// operations, shuffles, or casts.
422 bool isFPVectorizationPotentiallyUnsafe() const;
424 /// \brief Determine if the target supports unaligned memory accesses.
425 bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
426 unsigned BitWidth, unsigned AddressSpace = 0,
427 unsigned Alignment = 1,
428 bool *Fast = nullptr) const;
430 /// \brief Return hardware support for population count.
431 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
433 /// \brief Return true if the hardware has a fast square-root instruction.
434 bool haveFastSqrt(Type *Ty) const;
436 /// \brief Return the expected cost of supporting the floating point operation
437 /// of the specified type.
438 int getFPOpCost(Type *Ty) const;
440 /// \brief Return the expected cost of materializing for the given integer
441 /// immediate of the specified type.
442 int getIntImmCost(const APInt &Imm, Type *Ty) const;
444 /// \brief Return the expected cost of materialization for the given integer
445 /// immediate of the specified type for a given instruction. The cost can be
446 /// zero if the immediate can be folded into the specified instruction.
447 int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
449 int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
452 /// \brief Return the expected cost for the given integer when optimising
453 /// for size. This is different than the other integer immediate cost
454 /// functions in that it is subtarget agnostic. This is useful when you e.g.
455 /// target one ISA such as Aarch32 but smaller encodings could be possible
456 /// with another such as Thumb. This return value is used as a penalty when
457 /// the total costs for a constant is calculated (the bigger the cost, the
458 /// more beneficial constant hoisting is).
459 int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm,
463 /// \name Vector Target Information
466 /// \brief The various kinds of shuffle patterns for vector queries.
468 SK_Broadcast, ///< Broadcast element 0 to all other elements.
469 SK_Reverse, ///< Reverse the order of the vector.
470 SK_Alternate, ///< Choose alternate elements from vector.
471 SK_InsertSubvector, ///< InsertSubvector. Index indicates start offset.
472 SK_ExtractSubvector,///< ExtractSubvector Index indicates start offset.
473 SK_PermuteTwoSrc, ///< Merge elements from two source vectors into one
474 ///< with any shuffle mask.
475 SK_PermuteSingleSrc ///< Shuffle elements of single source vector with any
479 /// \brief Additional information about an operand's possible values.
480 enum OperandValueKind {
481 OK_AnyValue, // Operand can have any value.
482 OK_UniformValue, // Operand is uniform (splat of a value).
483 OK_UniformConstantValue, // Operand is uniform constant.
484 OK_NonUniformConstantValue // Operand is a non uniform constant value.
487 /// \brief Additional properties of an operand's values.
488 enum OperandValueProperties { OP_None = 0, OP_PowerOf2 = 1 };
490 /// \return The number of scalar or vector registers that the target has.
491 /// If 'Vectors' is true, it returns the number of vector registers. If it is
492 /// set to false, it returns the number of scalar registers.
493 unsigned getNumberOfRegisters(bool Vector) const;
495 /// \return The width of the largest scalar or vector register type.
496 unsigned getRegisterBitWidth(bool Vector) const;
498 /// \return The size of a cache line in bytes.
499 unsigned getCacheLineSize() const;
501 /// \return How much before a load we should place the prefetch instruction.
502 /// This is currently measured in number of instructions.
503 unsigned getPrefetchDistance() const;
505 /// \return Some HW prefetchers can handle accesses up to a certain constant
506 /// stride. This is the minimum stride in bytes where it makes sense to start
507 /// adding SW prefetches. The default is 1, i.e. prefetch with any stride.
508 unsigned getMinPrefetchStride() const;
510 /// \return The maximum number of iterations to prefetch ahead. If the
511 /// required number of iterations is more than this number, no prefetching is
513 unsigned getMaxPrefetchIterationsAhead() const;
515 /// \return The maximum interleave factor that any transform should try to
516 /// perform for this target. This number depends on the level of parallelism
517 /// and the number of execution units in the CPU.
518 unsigned getMaxInterleaveFactor(unsigned VF) const;
520 /// \return The expected cost of arithmetic ops, such as mul, xor, fsub, etc.
521 int getArithmeticInstrCost(
522 unsigned Opcode, Type *Ty, OperandValueKind Opd1Info = OK_AnyValue,
523 OperandValueKind Opd2Info = OK_AnyValue,
524 OperandValueProperties Opd1PropInfo = OP_None,
525 OperandValueProperties Opd2PropInfo = OP_None) const;
527 /// \return The cost of a shuffle instruction of kind Kind and of type Tp.
528 /// The index and subtype parameters are used by the subvector insertion and
529 /// extraction shuffle kinds.
530 int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index = 0,
531 Type *SubTp = nullptr) const;
533 /// \return The expected cost of cast instructions, such as bitcast, trunc,
535 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const;
537 /// \return The expected cost of a sign- or zero-extended vector extract. Use
538 /// -1 to indicate that there is no information about the index value.
539 int getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
540 unsigned Index = -1) const;
542 /// \return The expected cost of control-flow related instructions such as
544 int getCFInstrCost(unsigned Opcode) const;
546 /// \returns The expected cost of compare and select instructions.
547 int getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
548 Type *CondTy = nullptr) const;
550 /// \return The expected cost of vector Insert and Extract.
551 /// Use -1 to indicate that there is no information on the index value.
552 int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index = -1) const;
554 /// \return The cost of Load and Store instructions.
555 int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
556 unsigned AddressSpace) const;
558 /// \return The cost of masked Load and Store instructions.
559 int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
560 unsigned AddressSpace) const;
562 /// \return The cost of Gather or Scatter operation
563 /// \p Opcode - is a type of memory access Load or Store
564 /// \p DataTy - a vector type of the data to be loaded or stored
565 /// \p Ptr - pointer [or vector of pointers] - address[es] in memory
566 /// \p VariableMask - true when the memory access is predicated with a mask
567 /// that is not a compile-time constant
568 /// \p Alignment - alignment of single element
569 int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr,
570 bool VariableMask, unsigned Alignment) const;
572 /// \return The cost of the interleaved memory operation.
573 /// \p Opcode is the memory operation code
574 /// \p VecTy is the vector type of the interleaved access.
575 /// \p Factor is the interleave factor
576 /// \p Indices is the indices for interleaved load members (as interleaved
577 /// load allows gaps)
578 /// \p Alignment is the alignment of the memory operation
579 /// \p AddressSpace is address space of the pointer.
580 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
581 ArrayRef<unsigned> Indices, unsigned Alignment,
582 unsigned AddressSpace) const;
584 /// \brief Calculate the cost of performing a vector reduction.
586 /// This is the cost of reducing the vector value of type \p Ty to a scalar
587 /// value using the operation denoted by \p Opcode. The form of the reduction
588 /// can either be a pairwise reduction or a reduction that splits the vector
589 /// at every reduction level.
593 /// ((v0+v1), (v2, v3), undef, undef)
596 /// ((v0+v2), (v1+v3), undef, undef)
597 int getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwiseForm) const;
599 /// \returns The cost of Intrinsic instructions. Types analysis only.
600 int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
601 ArrayRef<Type *> Tys, FastMathFlags FMF) const;
603 /// \returns The cost of Intrinsic instructions. Analyses the real arguments.
604 int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
605 ArrayRef<Value *> Args, FastMathFlags FMF) const;
607 /// \returns The cost of Call instructions.
608 int getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) const;
610 /// \returns The number of pieces into which the provided type must be
611 /// split during legalization. Zero is returned when the answer is unknown.
612 unsigned getNumberOfParts(Type *Tp) const;
614 /// \returns The cost of the address computation. For most targets this can be
615 /// merged into the instruction indexing mode. Some targets might want to
616 /// distinguish between address computation for memory operations on vector
617 /// types and scalar types. Such targets should override this function.
618 /// The 'SE' parameter holds pointer for the scalar evolution object which
619 /// is used in order to get the Ptr step value in case of constant stride.
620 /// The 'Ptr' parameter holds SCEV of the access pointer.
621 int getAddressComputationCost(Type *Ty, ScalarEvolution *SE = nullptr,
622 const SCEV *Ptr = nullptr) const;
624 /// \returns The cost, if any, of keeping values of the given types alive
627 /// Some types may require the use of register classes that do not have
628 /// any callee-saved registers, so would require a spill and fill.
629 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
631 /// \returns True if the intrinsic is a supported memory intrinsic. Info
632 /// will contain additional information - whether the intrinsic may write
633 /// or read to memory, volatility and the pointer. Info is undefined
634 /// if false is returned.
635 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
637 /// \returns A value which is the result of the given memory intrinsic. New
638 /// instructions may be created to extract the result from the given intrinsic
639 /// memory operation. Returns nullptr if the target cannot create a result
640 /// from the given intrinsic.
641 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
642 Type *ExpectedType) const;
644 /// \returns True if the two functions have compatible attributes for inlining
646 bool areInlineCompatible(const Function *Caller,
647 const Function *Callee) const;
649 /// \returns The bitwidth of the largest vector type that should be used to
650 /// load/store in the given address space.
651 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const;
653 /// \returns True if the load instruction is legal to vectorize.
654 bool isLegalToVectorizeLoad(LoadInst *LI) const;
656 /// \returns True if the store instruction is legal to vectorize.
657 bool isLegalToVectorizeStore(StoreInst *SI) const;
659 /// \returns True if it is legal to vectorize the given load chain.
660 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
662 unsigned AddrSpace) const;
664 /// \returns True if it is legal to vectorize the given store chain.
665 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
667 unsigned AddrSpace) const;
669 /// \returns The new vector factor value if the target doesn't support \p
670 /// SizeInBytes loads or has a better vector factor.
671 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
672 unsigned ChainSizeInBytes,
673 VectorType *VecTy) const;
675 /// \returns The new vector factor value if the target doesn't support \p
676 /// SizeInBytes stores or has a better vector factor.
677 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
678 unsigned ChainSizeInBytes,
679 VectorType *VecTy) const;
684 /// \brief The abstract base class used to type erase specific TTI
688 /// \brief The template model for the base class which wraps a concrete
689 /// implementation in a type erased interface.
690 template <typename T> class Model;
692 std::unique_ptr<Concept> TTIImpl;
695 class TargetTransformInfo::Concept {
697 virtual ~Concept() = 0;
698 virtual const DataLayout &getDataLayout() const = 0;
699 virtual int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) = 0;
700 virtual int getGEPCost(Type *PointeeType, const Value *Ptr,
701 ArrayRef<const Value *> Operands) = 0;
702 virtual int getCallCost(FunctionType *FTy, int NumArgs) = 0;
703 virtual int getCallCost(const Function *F, int NumArgs) = 0;
704 virtual int getCallCost(const Function *F,
705 ArrayRef<const Value *> Arguments) = 0;
706 virtual unsigned getInliningThresholdMultiplier() = 0;
707 virtual int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
708 ArrayRef<Type *> ParamTys) = 0;
709 virtual int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
710 ArrayRef<const Value *> Arguments) = 0;
711 virtual int getUserCost(const User *U) = 0;
712 virtual bool hasBranchDivergence() = 0;
713 virtual bool isSourceOfDivergence(const Value *V) = 0;
714 virtual bool isLoweredToCall(const Function *F) = 0;
715 virtual void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) = 0;
716 virtual bool isLegalAddImmediate(int64_t Imm) = 0;
717 virtual bool isLegalICmpImmediate(int64_t Imm) = 0;
718 virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
719 int64_t BaseOffset, bool HasBaseReg,
721 unsigned AddrSpace) = 0;
722 virtual bool isLegalMaskedStore(Type *DataType) = 0;
723 virtual bool isLegalMaskedLoad(Type *DataType) = 0;
724 virtual bool isLegalMaskedScatter(Type *DataType) = 0;
725 virtual bool isLegalMaskedGather(Type *DataType) = 0;
726 virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
727 int64_t BaseOffset, bool HasBaseReg,
728 int64_t Scale, unsigned AddrSpace) = 0;
729 virtual bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) = 0;
730 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) = 0;
731 virtual bool isProfitableToHoist(Instruction *I) = 0;
732 virtual bool isTypeLegal(Type *Ty) = 0;
733 virtual unsigned getJumpBufAlignment() = 0;
734 virtual unsigned getJumpBufSize() = 0;
735 virtual bool shouldBuildLookupTables() = 0;
736 virtual bool shouldBuildLookupTablesForConstant(Constant *C) = 0;
737 virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0;
738 virtual bool enableInterleavedAccessVectorization() = 0;
739 virtual bool isFPVectorizationPotentiallyUnsafe() = 0;
740 virtual bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
742 unsigned AddressSpace,
745 virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0;
746 virtual bool haveFastSqrt(Type *Ty) = 0;
747 virtual int getFPOpCost(Type *Ty) = 0;
748 virtual int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm,
750 virtual int getIntImmCost(const APInt &Imm, Type *Ty) = 0;
751 virtual int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
753 virtual int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
755 virtual unsigned getNumberOfRegisters(bool Vector) = 0;
756 virtual unsigned getRegisterBitWidth(bool Vector) = 0;
757 virtual unsigned getCacheLineSize() = 0;
758 virtual unsigned getPrefetchDistance() = 0;
759 virtual unsigned getMinPrefetchStride() = 0;
760 virtual unsigned getMaxPrefetchIterationsAhead() = 0;
761 virtual unsigned getMaxInterleaveFactor(unsigned VF) = 0;
763 getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
764 OperandValueKind Opd2Info,
765 OperandValueProperties Opd1PropInfo,
766 OperandValueProperties Opd2PropInfo) = 0;
767 virtual int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
769 virtual int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) = 0;
770 virtual int getExtractWithExtendCost(unsigned Opcode, Type *Dst,
771 VectorType *VecTy, unsigned Index) = 0;
772 virtual int getCFInstrCost(unsigned Opcode) = 0;
773 virtual int getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
775 virtual int getVectorInstrCost(unsigned Opcode, Type *Val,
777 virtual int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
778 unsigned AddressSpace) = 0;
779 virtual int getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
781 unsigned AddressSpace) = 0;
782 virtual int getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
783 Value *Ptr, bool VariableMask,
784 unsigned Alignment) = 0;
785 virtual int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
787 ArrayRef<unsigned> Indices,
789 unsigned AddressSpace) = 0;
790 virtual int getReductionCost(unsigned Opcode, Type *Ty,
791 bool IsPairwiseForm) = 0;
792 virtual int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
793 ArrayRef<Type *> Tys,
794 FastMathFlags FMF) = 0;
795 virtual int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
796 ArrayRef<Value *> Args,
797 FastMathFlags FMF) = 0;
798 virtual int getCallInstrCost(Function *F, Type *RetTy,
799 ArrayRef<Type *> Tys) = 0;
800 virtual unsigned getNumberOfParts(Type *Tp) = 0;
801 virtual int getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
802 const SCEV *Ptr) = 0;
803 virtual unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) = 0;
804 virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst,
805 MemIntrinsicInfo &Info) = 0;
806 virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
807 Type *ExpectedType) = 0;
808 virtual bool areInlineCompatible(const Function *Caller,
809 const Function *Callee) const = 0;
810 virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0;
811 virtual bool isLegalToVectorizeLoad(LoadInst *LI) const = 0;
812 virtual bool isLegalToVectorizeStore(StoreInst *SI) const = 0;
813 virtual bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
815 unsigned AddrSpace) const = 0;
816 virtual bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
818 unsigned AddrSpace) const = 0;
819 virtual unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
820 unsigned ChainSizeInBytes,
821 VectorType *VecTy) const = 0;
822 virtual unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
823 unsigned ChainSizeInBytes,
824 VectorType *VecTy) const = 0;
827 template <typename T>
828 class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
832 Model(T Impl) : Impl(std::move(Impl)) {}
835 const DataLayout &getDataLayout() const override {
836 return Impl.getDataLayout();
839 int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) override {
840 return Impl.getOperationCost(Opcode, Ty, OpTy);
842 int getGEPCost(Type *PointeeType, const Value *Ptr,
843 ArrayRef<const Value *> Operands) override {
844 return Impl.getGEPCost(PointeeType, Ptr, Operands);
846 int getCallCost(FunctionType *FTy, int NumArgs) override {
847 return Impl.getCallCost(FTy, NumArgs);
849 int getCallCost(const Function *F, int NumArgs) override {
850 return Impl.getCallCost(F, NumArgs);
852 int getCallCost(const Function *F,
853 ArrayRef<const Value *> Arguments) override {
854 return Impl.getCallCost(F, Arguments);
856 unsigned getInliningThresholdMultiplier() override {
857 return Impl.getInliningThresholdMultiplier();
859 int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
860 ArrayRef<Type *> ParamTys) override {
861 return Impl.getIntrinsicCost(IID, RetTy, ParamTys);
863 int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
864 ArrayRef<const Value *> Arguments) override {
865 return Impl.getIntrinsicCost(IID, RetTy, Arguments);
867 int getUserCost(const User *U) override { return Impl.getUserCost(U); }
868 bool hasBranchDivergence() override { return Impl.hasBranchDivergence(); }
869 bool isSourceOfDivergence(const Value *V) override {
870 return Impl.isSourceOfDivergence(V);
872 bool isLoweredToCall(const Function *F) override {
873 return Impl.isLoweredToCall(F);
875 void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) override {
876 return Impl.getUnrollingPreferences(L, UP);
878 bool isLegalAddImmediate(int64_t Imm) override {
879 return Impl.isLegalAddImmediate(Imm);
881 bool isLegalICmpImmediate(int64_t Imm) override {
882 return Impl.isLegalICmpImmediate(Imm);
884 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
885 bool HasBaseReg, int64_t Scale,
886 unsigned AddrSpace) override {
887 return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
890 bool isLegalMaskedStore(Type *DataType) override {
891 return Impl.isLegalMaskedStore(DataType);
893 bool isLegalMaskedLoad(Type *DataType) override {
894 return Impl.isLegalMaskedLoad(DataType);
896 bool isLegalMaskedScatter(Type *DataType) override {
897 return Impl.isLegalMaskedScatter(DataType);
899 bool isLegalMaskedGather(Type *DataType) override {
900 return Impl.isLegalMaskedGather(DataType);
902 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
903 bool HasBaseReg, int64_t Scale,
904 unsigned AddrSpace) override {
905 return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg,
908 bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) override {
909 return Impl.isFoldableMemAccessOffset(I, Offset);
911 bool isTruncateFree(Type *Ty1, Type *Ty2) override {
912 return Impl.isTruncateFree(Ty1, Ty2);
914 bool isProfitableToHoist(Instruction *I) override {
915 return Impl.isProfitableToHoist(I);
917 bool isTypeLegal(Type *Ty) override { return Impl.isTypeLegal(Ty); }
918 unsigned getJumpBufAlignment() override { return Impl.getJumpBufAlignment(); }
919 unsigned getJumpBufSize() override { return Impl.getJumpBufSize(); }
920 bool shouldBuildLookupTables() override {
921 return Impl.shouldBuildLookupTables();
923 bool shouldBuildLookupTablesForConstant(Constant *C) override {
924 return Impl.shouldBuildLookupTablesForConstant(C);
926 bool enableAggressiveInterleaving(bool LoopHasReductions) override {
927 return Impl.enableAggressiveInterleaving(LoopHasReductions);
929 bool enableInterleavedAccessVectorization() override {
930 return Impl.enableInterleavedAccessVectorization();
932 bool isFPVectorizationPotentiallyUnsafe() override {
933 return Impl.isFPVectorizationPotentiallyUnsafe();
935 bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
936 unsigned BitWidth, unsigned AddressSpace,
937 unsigned Alignment, bool *Fast) override {
938 return Impl.allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace,
941 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) override {
942 return Impl.getPopcntSupport(IntTyWidthInBit);
944 bool haveFastSqrt(Type *Ty) override { return Impl.haveFastSqrt(Ty); }
946 int getFPOpCost(Type *Ty) override { return Impl.getFPOpCost(Ty); }
948 int getIntImmCodeSizeCost(unsigned Opc, unsigned Idx, const APInt &Imm,
950 return Impl.getIntImmCodeSizeCost(Opc, Idx, Imm, Ty);
952 int getIntImmCost(const APInt &Imm, Type *Ty) override {
953 return Impl.getIntImmCost(Imm, Ty);
955 int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
957 return Impl.getIntImmCost(Opc, Idx, Imm, Ty);
959 int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
961 return Impl.getIntImmCost(IID, Idx, Imm, Ty);
963 unsigned getNumberOfRegisters(bool Vector) override {
964 return Impl.getNumberOfRegisters(Vector);
966 unsigned getRegisterBitWidth(bool Vector) override {
967 return Impl.getRegisterBitWidth(Vector);
970 unsigned getCacheLineSize() override {
971 return Impl.getCacheLineSize();
973 unsigned getPrefetchDistance() override { return Impl.getPrefetchDistance(); }
974 unsigned getMinPrefetchStride() override {
975 return Impl.getMinPrefetchStride();
977 unsigned getMaxPrefetchIterationsAhead() override {
978 return Impl.getMaxPrefetchIterationsAhead();
980 unsigned getMaxInterleaveFactor(unsigned VF) override {
981 return Impl.getMaxInterleaveFactor(VF);
984 getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
985 OperandValueKind Opd2Info,
986 OperandValueProperties Opd1PropInfo,
987 OperandValueProperties Opd2PropInfo) override {
988 return Impl.getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
989 Opd1PropInfo, Opd2PropInfo);
991 int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
992 Type *SubTp) override {
993 return Impl.getShuffleCost(Kind, Tp, Index, SubTp);
995 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) override {
996 return Impl.getCastInstrCost(Opcode, Dst, Src);
998 int getExtractWithExtendCost(unsigned Opcode, Type *Dst, VectorType *VecTy,
999 unsigned Index) override {
1000 return Impl.getExtractWithExtendCost(Opcode, Dst, VecTy, Index);
1002 int getCFInstrCost(unsigned Opcode) override {
1003 return Impl.getCFInstrCost(Opcode);
1005 int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) override {
1006 return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy);
1008 int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) override {
1009 return Impl.getVectorInstrCost(Opcode, Val, Index);
1011 int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
1012 unsigned AddressSpace) override {
1013 return Impl.getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
1015 int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
1016 unsigned AddressSpace) override {
1017 return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
1019 int getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
1020 Value *Ptr, bool VariableMask,
1021 unsigned Alignment) override {
1022 return Impl.getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1025 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
1026 ArrayRef<unsigned> Indices, unsigned Alignment,
1027 unsigned AddressSpace) override {
1028 return Impl.getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1029 Alignment, AddressSpace);
1031 int getReductionCost(unsigned Opcode, Type *Ty,
1032 bool IsPairwiseForm) override {
1033 return Impl.getReductionCost(Opcode, Ty, IsPairwiseForm);
1035 int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, ArrayRef<Type *> Tys,
1036 FastMathFlags FMF) override {
1037 return Impl.getIntrinsicInstrCost(ID, RetTy, Tys, FMF);
1039 int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
1040 ArrayRef<Value *> Args,
1041 FastMathFlags FMF) override {
1042 return Impl.getIntrinsicInstrCost(ID, RetTy, Args, FMF);
1044 int getCallInstrCost(Function *F, Type *RetTy,
1045 ArrayRef<Type *> Tys) override {
1046 return Impl.getCallInstrCost(F, RetTy, Tys);
1048 unsigned getNumberOfParts(Type *Tp) override {
1049 return Impl.getNumberOfParts(Tp);
1051 int getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
1052 const SCEV *Ptr) override {
1053 return Impl.getAddressComputationCost(Ty, SE, Ptr);
1055 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) override {
1056 return Impl.getCostOfKeepingLiveOverCall(Tys);
1058 bool getTgtMemIntrinsic(IntrinsicInst *Inst,
1059 MemIntrinsicInfo &Info) override {
1060 return Impl.getTgtMemIntrinsic(Inst, Info);
1062 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
1063 Type *ExpectedType) override {
1064 return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
1066 bool areInlineCompatible(const Function *Caller,
1067 const Function *Callee) const override {
1068 return Impl.areInlineCompatible(Caller, Callee);
1070 unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const override {
1071 return Impl.getLoadStoreVecRegBitWidth(AddrSpace);
1073 bool isLegalToVectorizeLoad(LoadInst *LI) const override {
1074 return Impl.isLegalToVectorizeLoad(LI);
1076 bool isLegalToVectorizeStore(StoreInst *SI) const override {
1077 return Impl.isLegalToVectorizeStore(SI);
1079 bool isLegalToVectorizeLoadChain(unsigned ChainSizeInBytes,
1081 unsigned AddrSpace) const override {
1082 return Impl.isLegalToVectorizeLoadChain(ChainSizeInBytes, Alignment,
1085 bool isLegalToVectorizeStoreChain(unsigned ChainSizeInBytes,
1087 unsigned AddrSpace) const override {
1088 return Impl.isLegalToVectorizeStoreChain(ChainSizeInBytes, Alignment,
1091 unsigned getLoadVectorFactor(unsigned VF, unsigned LoadSize,
1092 unsigned ChainSizeInBytes,
1093 VectorType *VecTy) const override {
1094 return Impl.getLoadVectorFactor(VF, LoadSize, ChainSizeInBytes, VecTy);
1096 unsigned getStoreVectorFactor(unsigned VF, unsigned StoreSize,
1097 unsigned ChainSizeInBytes,
1098 VectorType *VecTy) const override {
1099 return Impl.getStoreVectorFactor(VF, StoreSize, ChainSizeInBytes, VecTy);
1103 template <typename T>
1104 TargetTransformInfo::TargetTransformInfo(T Impl)
1105 : TTIImpl(new Model<T>(Impl)) {}
1107 /// \brief Analysis pass providing the \c TargetTransformInfo.
1109 /// The core idea of the TargetIRAnalysis is to expose an interface through
1110 /// which LLVM targets can analyze and provide information about the middle
1111 /// end's target-independent IR. This supports use cases such as target-aware
1112 /// cost modeling of IR constructs.
1114 /// This is a function analysis because much of the cost modeling for targets
1115 /// is done in a subtarget specific way and LLVM supports compiling different
1116 /// functions targeting different subtargets in order to support runtime
1117 /// dispatch according to the observed subtarget.
1118 class TargetIRAnalysis : public AnalysisInfoMixin<TargetIRAnalysis> {
1120 typedef TargetTransformInfo Result;
1122 /// \brief Default construct a target IR analysis.
1124 /// This will use the module's datalayout to construct a baseline
1125 /// conservative TTI result.
1128 /// \brief Construct an IR analysis pass around a target-provide callback.
1130 /// The callback will be called with a particular function for which the TTI
1131 /// is needed and must return a TTI object for that function.
1132 TargetIRAnalysis(std::function<Result(const Function &)> TTICallback);
1134 // Value semantics. We spell out the constructors for MSVC.
1135 TargetIRAnalysis(const TargetIRAnalysis &Arg)
1136 : TTICallback(Arg.TTICallback) {}
1137 TargetIRAnalysis(TargetIRAnalysis &&Arg)
1138 : TTICallback(std::move(Arg.TTICallback)) {}
1139 TargetIRAnalysis &operator=(const TargetIRAnalysis &RHS) {
1140 TTICallback = RHS.TTICallback;
1143 TargetIRAnalysis &operator=(TargetIRAnalysis &&RHS) {
1144 TTICallback = std::move(RHS.TTICallback);
1148 Result run(const Function &F, FunctionAnalysisManager &);
1151 friend AnalysisInfoMixin<TargetIRAnalysis>;
1152 static AnalysisKey Key;
1154 /// \brief The callback used to produce a result.
1156 /// We use a completely opaque callback so that targets can provide whatever
1157 /// mechanism they desire for constructing the TTI for a given function.
1159 /// FIXME: Should we really use std::function? It's relatively inefficient.
1160 /// It might be possible to arrange for even stateful callbacks to outlive
1161 /// the analysis and thus use a function_ref which would be lighter weight.
1162 /// This may also be less error prone as the callback is likely to reference
1163 /// the external TargetMachine, and that reference needs to never dangle.
1164 std::function<Result(const Function &)> TTICallback;
1166 /// \brief Helper function used as the callback in the default constructor.
1167 static Result getDefaultTTI(const Function &F);
1170 /// \brief Wrapper pass for TargetTransformInfo.
1172 /// This pass can be constructed from a TTI object which it stores internally
1173 /// and is queried by passes.
1174 class TargetTransformInfoWrapperPass : public ImmutablePass {
1175 TargetIRAnalysis TIRA;
1176 Optional<TargetTransformInfo> TTI;
1178 virtual void anchor();
1183 /// \brief We must provide a default constructor for the pass but it should
1186 /// Use the constructor below or call one of the creation routines.
1187 TargetTransformInfoWrapperPass();
1189 explicit TargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
1191 TargetTransformInfo &getTTI(const Function &F);
1194 /// \brief Create an analysis pass wrapper around a TTI object.
1196 /// This analysis pass just holds the TTI instance and makes it available to
1198 ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
1200 } // End llvm namespace