1 //===-- AArch64Subtarget.cpp - AArch64 Subtarget Information ----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the AArch64 specific subclass of TargetSubtarget.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64Subtarget.h"
17 #include "AArch64InstrInfo.h"
18 #include "AArch64PBQPRegAlloc.h"
19 #include "AArch64TargetMachine.h"
21 #include "AArch64CallLowering.h"
22 #include "AArch64LegalizerInfo.h"
23 #include "AArch64RegisterBankInfo.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
25 #include "llvm/CodeGen/MachineScheduler.h"
26 #include "llvm/IR/GlobalValue.h"
27 #include "llvm/Support/TargetParser.h"
31 #define DEBUG_TYPE "aarch64-subtarget"
33 #define GET_SUBTARGETINFO_CTOR
34 #define GET_SUBTARGETINFO_TARGET_DESC
35 #include "AArch64GenSubtargetInfo.inc"
38 EnableEarlyIfConvert("aarch64-early-ifcvt", cl::desc("Enable the early if "
39 "converter pass"), cl::init(true), cl::Hidden);
41 // If OS supports TBI, use this flag to enable it.
43 UseAddressTopByteIgnored("aarch64-use-tbi", cl::desc("Assume that top byte of "
44 "an address is ignored"), cl::init(false), cl::Hidden);
47 UseNonLazyBind("aarch64-enable-nonlazybind",
48 cl::desc("Call nonlazybind functions via direct GOT load"),
49 cl::init(false), cl::Hidden);
52 AArch64Subtarget::initializeSubtargetDependencies(StringRef FS,
53 StringRef CPUString) {
54 // Determine default and user-specified characteristics
56 if (CPUString.empty())
57 CPUString = "generic";
59 ParseSubtargetFeatures(CPUString, FS);
60 initializeProperties();
65 void AArch64Subtarget::initializeProperties() {
66 // Initialize CPU specific properties. We should add a tablegen feature for
67 // this in the future so we can specify it together with the subtarget
69 switch (ARMProcFamily) {
72 PrefetchDistance = 280;
73 MinPrefetchStride = 2048;
74 MaxPrefetchIterationsAhead = 3;
77 MaxInterleaveFactor = 4;
78 PrefFunctionAlignment = 4;
81 MaxInterleaveFactor = 4;
83 PrefFunctionAlignment = 4;
84 PrefLoopAlignment = 3;
87 MaxInterleaveFactor = 4;
88 MaxJumpTableSize = 20;
89 PrefFunctionAlignment = 5;
90 PrefLoopAlignment = 4;
93 MaxInterleaveFactor = 4;
94 // FIXME: remove this to enable 64-bit SLP if performance looks good.
95 MinVectorRegisterBitWidth = 128;
97 PrefetchDistance = 820;
98 MinPrefetchStride = 2048;
99 MaxPrefetchIterationsAhead = 8;
102 MaxInterleaveFactor = 4;
103 // FIXME: remove this to enable 64-bit SLP if performance looks good.
104 MinVectorRegisterBitWidth = 128;
107 MaxInterleaveFactor = 4;
108 VectorInsertExtractBaseCost = 2;
110 PrefetchDistance = 740;
111 MinPrefetchStride = 1024;
112 MaxPrefetchIterationsAhead = 11;
113 // FIXME: remove this to enable 64-bit SLP if performance looks good.
114 MinVectorRegisterBitWidth = 128;
118 PrefFunctionAlignment = 3;
119 PrefLoopAlignment = 2;
120 MaxInterleaveFactor = 4;
121 PrefetchDistance = 128;
122 MinPrefetchStride = 1024;
123 MaxPrefetchIterationsAhead = 4;
124 // FIXME: remove this to enable 64-bit SLP if performance looks good.
125 MinVectorRegisterBitWidth = 128;
132 PrefFunctionAlignment = 3;
133 PrefLoopAlignment = 2;
134 // FIXME: remove this to enable 64-bit SLP if performance looks good.
135 MinVectorRegisterBitWidth = 128;
137 case CortexA35: break;
139 PrefFunctionAlignment = 3;
141 case CortexA55: break;
145 PrefFunctionAlignment = 4;
151 AArch64Subtarget::AArch64Subtarget(const Triple &TT, const std::string &CPU,
152 const std::string &FS,
153 const TargetMachine &TM, bool LittleEndian)
154 : AArch64GenSubtargetInfo(TT, CPU, FS),
155 ReserveX18(AArch64::isX18ReservedByDefault(TT)), IsLittle(LittleEndian),
156 TargetTriple(TT), FrameLowering(),
157 InstrInfo(initializeSubtargetDependencies(FS, CPU)), TSInfo(),
159 CallLoweringInfo.reset(new AArch64CallLowering(*getTargetLowering()));
160 Legalizer.reset(new AArch64LegalizerInfo(*this));
162 auto *RBI = new AArch64RegisterBankInfo(*getRegisterInfo());
164 // FIXME: At this point, we can't rely on Subtarget having RBI.
165 // It's awkward to mix passing RBI and the Subtarget; should we pass
167 InstSelector.reset(createAArch64InstructionSelector(
168 *static_cast<const AArch64TargetMachine *>(&TM), *this, *RBI));
170 RegBankInfo.reset(RBI);
173 const CallLowering *AArch64Subtarget::getCallLowering() const {
174 return CallLoweringInfo.get();
177 const InstructionSelector *AArch64Subtarget::getInstructionSelector() const {
178 return InstSelector.get();
181 const LegalizerInfo *AArch64Subtarget::getLegalizerInfo() const {
182 return Legalizer.get();
185 const RegisterBankInfo *AArch64Subtarget::getRegBankInfo() const {
186 return RegBankInfo.get();
189 /// Find the target operand flags that describe how a global value should be
190 /// referenced for the current subtarget.
192 AArch64Subtarget::ClassifyGlobalReference(const GlobalValue *GV,
193 const TargetMachine &TM) const {
194 // MachO large model always goes via a GOT, simply to get a single 8-byte
195 // absolute relocation on all global addresses.
196 if (TM.getCodeModel() == CodeModel::Large && isTargetMachO())
197 return AArch64II::MO_GOT;
199 unsigned Flags = GV->hasDLLImportStorageClass() ? AArch64II::MO_DLLIMPORT
200 : AArch64II::MO_NO_FLAG;
202 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
203 return AArch64II::MO_GOT | Flags;
205 // The small code model's direct accesses use ADRP, which cannot
206 // necessarily produce the value 0 (if the code is above 4GB).
207 if (useSmallAddressing() && GV->hasExternalWeakLinkage())
208 return AArch64II::MO_GOT | Flags;
213 unsigned char AArch64Subtarget::classifyGlobalFunctionReference(
214 const GlobalValue *GV, const TargetMachine &TM) const {
215 // MachO large model always goes via a GOT, because we don't have the
216 // relocations available to do anything else..
217 if (TM.getCodeModel() == CodeModel::Large && isTargetMachO() &&
218 !GV->hasInternalLinkage())
219 return AArch64II::MO_GOT;
221 // NonLazyBind goes via GOT unless we know it's available locally.
222 auto *F = dyn_cast<Function>(GV);
223 if (UseNonLazyBind && F && F->hasFnAttribute(Attribute::NonLazyBind) &&
224 !TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
225 return AArch64II::MO_GOT;
227 return AArch64II::MO_NO_FLAG;
230 void AArch64Subtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
231 unsigned NumRegionInstrs) const {
232 // LNT run (at least on Cyclone) showed reasonably significant gains for
233 // bi-directional scheduling. 253.perlbmk.
234 Policy.OnlyTopDown = false;
235 Policy.OnlyBottomUp = false;
236 // Enabling or Disabling the latency heuristic is a close call: It seems to
237 // help nearly no benchmark on out-of-order architectures, on the other hand
238 // it regresses register pressure on a few benchmarking.
239 Policy.DisableLatencyHeuristic = DisableLatencySchedHeuristic;
242 bool AArch64Subtarget::enableEarlyIfConversion() const {
243 return EnableEarlyIfConvert;
246 bool AArch64Subtarget::supportsAddressTopByteIgnored() const {
247 if (!UseAddressTopByteIgnored)
250 if (TargetTriple.isiOS()) {
251 unsigned Major, Minor, Micro;
252 TargetTriple.getiOSVersion(Major, Minor, Micro);
259 std::unique_ptr<PBQPRAConstraint>
260 AArch64Subtarget::getCustomPBQPConstraints() const {
261 return balanceFPOps() ? llvm::make_unique<A57ChainingConstraint>() : nullptr;
264 void AArch64Subtarget::mirFileLoaded(MachineFunction &MF) const {
265 // We usually compute max call frame size after ISel. Do the computation now
266 // if the .mir file didn't specify it. Note that this will probably give you
267 // bogus values after PEI has eliminated the callframe setup/destroy pseudo
268 // instructions, specify explicitely if you need it to be correct.
269 MachineFrameInfo &MFI = MF.getFrameInfo();
270 if (!MFI.isMaxCallFrameSizeComputed())
271 MFI.computeMaxCallFrameSize(MF);