1 //===- ARMTargetTransformInfo.h - ARM specific TTI --------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file a TargetTransformInfo::Concept conforming object specific to the
11 /// ARM target machine. It uses the target's detailed information to
12 /// provide more precise answers to certain TTI queries, while letting the
13 /// target independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #ifndef LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H
18 #define LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H
21 #include "ARMSubtarget.h"
22 #include "ARMTargetMachine.h"
23 #include "llvm/ADT/ArrayRef.h"
24 #include "llvm/Analysis/TargetTransformInfo.h"
25 #include "llvm/CodeGen/BasicTTIImpl.h"
26 #include "llvm/IR/Constant.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/MC/SubtargetFeature.h"
33 class ARMTargetLowering;
37 class ScalarEvolution;
41 class ARMTTIImpl : public BasicTTIImplBase<ARMTTIImpl> {
42 using BaseT = BasicTTIImplBase<ARMTTIImpl>;
43 using TTI = TargetTransformInfo;
47 const ARMSubtarget *ST;
48 const ARMTargetLowering *TLI;
50 // Currently the following features are excluded from InlineFeatureWhitelist.
51 // ModeThumb, FeatureNoARM, ModeSoftFloat, FeatureFP64, FeatureD32
52 // Depending on whether they are set or unset, different
53 // instructions/registers are available. For example, inlining a callee with
54 // -thumb-mode in a caller with +thumb-mode, may cause the assembler to
55 // fail if the callee uses ARM only instructions, e.g. in inline asm.
56 const FeatureBitset InlineFeatureWhitelist = {
57 ARM::FeatureVFP2, ARM::FeatureVFP3, ARM::FeatureNEON, ARM::FeatureThumb2,
58 ARM::FeatureFP16, ARM::FeatureVFP4, ARM::FeatureFPARMv8,
59 ARM::FeatureFullFP16, ARM::FeatureFP16FML, ARM::FeatureHWDivThumb,
60 ARM::FeatureHWDivARM, ARM::FeatureDB, ARM::FeatureV7Clrex,
61 ARM::FeatureAcquireRelease, ARM::FeatureSlowFPBrcc,
62 ARM::FeaturePerfMon, ARM::FeatureTrustZone, ARM::Feature8MSecExt,
63 ARM::FeatureCrypto, ARM::FeatureCRC, ARM::FeatureRAS,
64 ARM::FeatureFPAO, ARM::FeatureFuseAES, ARM::FeatureZCZeroing,
65 ARM::FeatureProfUnpredicate, ARM::FeatureSlowVGETLNi32,
66 ARM::FeatureSlowVDUP32, ARM::FeaturePreferVMOVSR,
67 ARM::FeaturePrefISHSTBarrier, ARM::FeatureMuxedUnits,
68 ARM::FeatureSlowOddRegister, ARM::FeatureSlowLoadDSubreg,
69 ARM::FeatureDontWidenVMOVS, ARM::FeatureExpandMLx,
70 ARM::FeatureHasVMLxHazards, ARM::FeatureNEONForFPMovs,
71 ARM::FeatureNEONForFP, ARM::FeatureCheckVLDnAlign,
72 ARM::FeatureHasSlowFPVMLx, ARM::FeatureHasSlowFPVFMx,
73 ARM::FeatureVMLxForwarding, ARM::FeaturePref32BitThumb,
74 ARM::FeatureAvoidPartialCPSR, ARM::FeatureCheapPredicableCPSR,
75 ARM::FeatureAvoidMOVsShOp, ARM::FeatureHasRetAddrStack,
76 ARM::FeatureHasNoBranchPredictor, ARM::FeatureDSP, ARM::FeatureMP,
77 ARM::FeatureVirtualization, ARM::FeatureMClass, ARM::FeatureRClass,
78 ARM::FeatureAClass, ARM::FeatureNaClTrap, ARM::FeatureStrictAlign,
79 ARM::FeatureLongCalls, ARM::FeatureExecuteOnly, ARM::FeatureReserveR9,
80 ARM::FeatureNoMovt, ARM::FeatureNoNegativeImmediates
83 const ARMSubtarget *getST() const { return ST; }
84 const ARMTargetLowering *getTLI() const { return TLI; }
87 explicit ARMTTIImpl(const ARMBaseTargetMachine *TM, const Function &F)
88 : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
89 TLI(ST->getTargetLowering()) {}
91 bool areInlineCompatible(const Function *Caller,
92 const Function *Callee) const;
94 bool enableInterleavedAccessVectorization() { return true; }
96 bool shouldFavorBackedgeIndex(const Loop *L) const {
97 if (L->getHeader()->getParent()->hasOptSize())
99 return ST->isMClass() && ST->isThumb2() && L->getNumBlocks() == 1;
102 /// Floating-point computation using ARMv8 AArch32 Advanced
103 /// SIMD instructions remains unchanged from ARMv7. Only AArch64 SIMD
104 /// and Arm MVE are IEEE-754 compliant.
105 bool isFPVectorizationPotentiallyUnsafe() {
106 return !ST->isTargetDarwin() && !ST->hasMVEFloatOps();
109 /// \name Scalar TTI Implementations
112 int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
115 using BaseT::getIntImmCost;
116 int getIntImmCost(const APInt &Imm, Type *Ty);
118 int getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty);
122 /// \name Vector TTI Implementations
125 unsigned getNumberOfRegisters(unsigned ClassID) const {
126 bool Vector = (ClassID == 1);
130 if (ST->hasMVEIntegerOps())
135 if (ST->isThumb1Only())
140 unsigned getRegisterBitWidth(bool Vector) const {
144 if (ST->hasMVEIntegerOps())
152 unsigned getMaxInterleaveFactor(unsigned VF) {
153 return ST->getMaxInterleaveFactor();
156 bool isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment);
158 bool isLegalMaskedStore(Type *DataTy, MaybeAlign Alignment) {
159 return isLegalMaskedLoad(DataTy, Alignment);
162 bool isLegalMaskedGather(Type *Ty, MaybeAlign Alignment);
164 bool isLegalMaskedScatter(Type *Ty, MaybeAlign Alignment) { return false; }
166 int getMemcpyCost(const Instruction *I);
168 int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp);
170 bool useReductionIntrinsic(unsigned Opcode, Type *Ty,
171 TTI::ReductionFlags Flags) const;
173 bool shouldExpandReduction(const IntrinsicInst *II) const {
177 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
178 const Instruction *I = nullptr);
180 int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
181 const Instruction *I = nullptr);
183 int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
185 int getAddressComputationCost(Type *Val, ScalarEvolution *SE,
188 int getArithmeticInstrCost(
189 unsigned Opcode, Type *Ty,
190 TTI::OperandValueKind Op1Info = TTI::OK_AnyValue,
191 TTI::OperandValueKind Op2Info = TTI::OK_AnyValue,
192 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
193 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
194 ArrayRef<const Value *> Args = ArrayRef<const Value *>(),
195 const Instruction *CxtI = nullptr);
197 int getMemoryOpCost(unsigned Opcode, Type *Src, MaybeAlign Alignment,
198 unsigned AddressSpace, const Instruction *I = nullptr);
200 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
201 ArrayRef<unsigned> Indices, unsigned Alignment,
202 unsigned AddressSpace,
203 bool UseMaskForCond = false,
204 bool UseMaskForGaps = false);
206 bool isLoweredToCall(const Function *F);
207 bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
209 TargetLibraryInfo *LibInfo,
210 HardwareLoopInfo &HWLoopInfo);
211 bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI,
214 TargetLibraryInfo *TLI,
216 const LoopAccessInfo *LAI);
217 void getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
218 TTI::UnrollingPreferences &UP);
220 bool shouldBuildLookupTablesForConstant(Constant *C) const {
221 // In the ROPI and RWPI relocation models we can't have pointers to global
222 // variables or functions in constant data, so don't convert switches to
223 // lookup tables if any of the values would need relocation.
224 if (ST->isROPI() || ST->isRWPI())
225 return !C->needsRelocation();
232 } // end namespace llvm
234 #endif // LLVM_LIB_TARGET_ARM_ARMTARGETTRANSFORMINFO_H