1 //===-- X86Subtarget.cpp - X86 Subtarget Information ----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the X86 specific subclass of TargetSubtargetInfo.
12 //===----------------------------------------------------------------------===//
16 #include "X86CallLowering.h"
17 #include "X86LegalizerInfo.h"
18 #include "X86RegisterBankInfo.h"
19 #include "X86Subtarget.h"
20 #include "MCTargetDesc/X86BaseInfo.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/ADT/Triple.h"
23 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
25 #include "llvm/IR/Attributes.h"
26 #include "llvm/IR/ConstantRange.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/GlobalValue.h"
29 #include "llvm/Support/Casting.h"
30 #include "llvm/Support/CodeGen.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Target/TargetMachine.h"
43 #define DEBUG_TYPE "subtarget"
45 #define GET_SUBTARGETINFO_TARGET_DESC
46 #define GET_SUBTARGETINFO_CTOR
47 #include "X86GenSubtargetInfo.inc"
49 // Temporary option to control early if-conversion for x86 while adding machine
52 X86EarlyIfConv("x86-early-ifcvt", cl::Hidden,
53 cl::desc("Enable early if-conversion on X86"));
56 /// Classify a blockaddress reference for the current subtarget according to how
57 /// we should reference it in a non-pcrel context.
58 unsigned char X86Subtarget::classifyBlockAddressReference() const {
59 return classifyLocalReference(nullptr);
62 /// Classify a global variable reference for the current subtarget according to
63 /// how we should reference it in a non-pcrel context.
65 X86Subtarget::classifyGlobalReference(const GlobalValue *GV) const {
66 return classifyGlobalReference(GV, *GV->getParent());
70 X86Subtarget::classifyLocalReference(const GlobalValue *GV) const {
71 // If we're not PIC, it's not very interesting.
72 if (!isPositionIndependent())
73 return X86II::MO_NO_FLAG;
76 // 64-bit ELF PIC local references may use GOTOFF relocations.
78 switch (TM.getCodeModel()) {
79 // 64-bit small code model is simple: All rip-relative.
81 llvm_unreachable("Tiny codesize model not supported on X86");
82 case CodeModel::Small:
83 case CodeModel::Kernel:
84 return X86II::MO_NO_FLAG;
86 // The large PIC code model uses GOTOFF.
87 case CodeModel::Large:
88 return X86II::MO_GOTOFF;
90 // Medium is a hybrid: RIP-rel for code, GOTOFF for DSO local data.
91 case CodeModel::Medium:
92 if (isa<Function>(GV))
93 return X86II::MO_NO_FLAG; // All code is RIP-relative
94 return X86II::MO_GOTOFF; // Local symbols use GOTOFF.
96 llvm_unreachable("invalid code model");
99 // Otherwise, this is either a RIP-relative reference or a 64-bit movabsq,
100 // both of which use MO_NO_FLAG.
101 return X86II::MO_NO_FLAG;
104 // The COFF dynamic linker just patches the executable sections.
106 return X86II::MO_NO_FLAG;
108 if (isTargetDarwin()) {
109 // 32 bit macho has no relocation for a-b if a is undefined, even if
110 // b is in the section that is being relocated.
111 // This means we have to use o load even for GVs that are known to be
113 if (GV && (GV->isDeclarationForLinker() || GV->hasCommonLinkage()))
114 return X86II::MO_DARWIN_NONLAZY_PIC_BASE;
116 return X86II::MO_PIC_BASE_OFFSET;
119 return X86II::MO_GOTOFF;
122 unsigned char X86Subtarget::classifyGlobalReference(const GlobalValue *GV,
123 const Module &M) const {
124 // The static large model never uses stubs.
125 if (TM.getCodeModel() == CodeModel::Large && !isPositionIndependent())
126 return X86II::MO_NO_FLAG;
128 // Absolute symbols can be referenced directly.
130 if (Optional<ConstantRange> CR = GV->getAbsoluteSymbolRange()) {
131 // See if we can use the 8-bit immediate form. Note that some instructions
132 // will sign extend the immediate operand, so to be conservative we only
133 // accept the range [0,128).
134 if (CR->getUnsignedMax().ult(128))
135 return X86II::MO_ABS8;
137 return X86II::MO_NO_FLAG;
141 if (TM.shouldAssumeDSOLocal(M, GV))
142 return classifyLocalReference(GV);
144 if (isTargetCOFF()) {
145 if (GV->hasDLLImportStorageClass())
146 return X86II::MO_DLLIMPORT;
147 return X86II::MO_COFFSTUB;
151 // ELF supports a large, truly PIC code model with non-PC relative GOT
152 // references. Other object file formats do not. Use the no-flag, 64-bit
153 // reference for them.
154 if (TM.getCodeModel() == CodeModel::Large)
155 return isTargetELF() ? X86II::MO_GOT : X86II::MO_NO_FLAG;
156 return X86II::MO_GOTPCREL;
159 if (isTargetDarwin()) {
160 if (!isPositionIndependent())
161 return X86II::MO_DARWIN_NONLAZY;
162 return X86II::MO_DARWIN_NONLAZY_PIC_BASE;
165 return X86II::MO_GOT;
169 X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV) const {
170 return classifyGlobalFunctionReference(GV, *GV->getParent());
174 X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV,
175 const Module &M) const {
176 if (TM.shouldAssumeDSOLocal(M, GV))
177 return X86II::MO_NO_FLAG;
179 if (isTargetCOFF()) {
180 assert(GV->hasDLLImportStorageClass() &&
181 "shouldAssumeDSOLocal gave inconsistent answer");
182 return X86II::MO_DLLIMPORT;
185 const Function *F = dyn_cast_or_null<Function>(GV);
188 if (is64Bit() && F && (CallingConv::X86_RegCall == F->getCallingConv()))
189 // According to psABI, PLT stub clobbers XMM8-XMM15.
190 // In Regcall calling convention those registers are used for passing
191 // parameters. Thus we need to prevent lazy binding in Regcall.
192 return X86II::MO_GOTPCREL;
193 // If PLT must be avoided then the call should be via GOTPCREL.
194 if (((F && F->hasFnAttribute(Attribute::NonLazyBind)) ||
195 (!F && M.getRtLibUseGOT())) &&
197 return X86II::MO_GOTPCREL;
198 return X86II::MO_PLT;
202 if (F && F->hasFnAttribute(Attribute::NonLazyBind))
203 // If the function is marked as non-lazy, generate an indirect call
204 // which loads from the GOT directly. This avoids runtime overhead
205 // at the cost of eager binding (and one extra byte of encoding).
206 return X86II::MO_GOTPCREL;
207 return X86II::MO_NO_FLAG;
210 return X86II::MO_NO_FLAG;
213 /// Return true if the subtarget allows calls to immediate address.
214 bool X86Subtarget::isLegalToCallImmediateAddr() const {
215 // FIXME: I386 PE/COFF supports PC relative calls using IMAGE_REL_I386_REL32
216 // but WinCOFFObjectWriter::RecordRelocation cannot emit them. Once it does,
217 // the following check for Win32 should be removed.
218 if (In64BitMode || isTargetWin32())
220 return isTargetELF() || TM.getRelocationModel() == Reloc::Static;
223 void X86Subtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
224 std::string CPUName = CPU;
228 std::string FullFS = FS;
230 // SSE2 should default to enabled in 64-bit mode, but can be turned off
233 FullFS = "+sse2," + FullFS;
237 // If no CPU was specified, enable 64bit feature to satisy later check.
238 if (CPUName == "generic") {
240 FullFS = "+64bit," + FullFS;
246 // LAHF/SAHF are always supported in non-64-bit mode.
249 FullFS = "+sahf," + FullFS;
254 // Parse features string and set the CPU.
255 ParseSubtargetFeatures(CPUName, FullFS);
257 // All CPUs that implement SSE4.2 or SSE4A support unaligned accesses of
258 // 16-bytes and under that are reasonably fast. These features were
259 // introduced with Intel's Nehalem/Silvermont and AMD's Family10h
260 // micro-architectures respectively.
261 if (hasSSE42() || hasSSE4A())
262 IsUAMem16Slow = false;
264 // It's important to keep the MCSubtargetInfo feature bits in sync with
265 // target data structure which is shared with MC code emitter, etc.
267 ToggleFeature(X86::Mode64Bit);
268 else if (In32BitMode)
269 ToggleFeature(X86::Mode32Bit);
270 else if (In16BitMode)
271 ToggleFeature(X86::Mode16Bit);
273 llvm_unreachable("Not 16-bit, 32-bit or 64-bit mode!");
275 LLVM_DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
276 << ", 3DNowLevel " << X863DNowLevel << ", 64bit "
277 << HasX86_64 << "\n");
278 if (In64BitMode && !HasX86_64)
279 report_fatal_error("64-bit code requested on a subtarget that doesn't "
282 // Stack alignment is 16 bytes on Darwin, Linux, kFreeBSD and Solaris (both
283 // 32 and 64 bit) and for all 64-bit targets.
284 if (StackAlignOverride)
285 stackAlignment = StackAlignOverride;
286 else if (isTargetDarwin() || isTargetLinux() || isTargetSolaris() ||
287 isTargetKFreeBSD() || In64BitMode)
290 // Some CPUs have more overhead for gather. The specified overhead is relative
291 // to the Load operation. "2" is the number provided by Intel architects. This
292 // parameter is used for cost estimation of Gather Op and comparison with
293 // other alternatives.
294 // TODO: Remove the explicit hasAVX512()?, That would mean we would only
295 // enable gather with a -march.
296 if (hasAVX512() || (hasAVX2() && hasFastGather()))
301 // Consume the vector width attribute or apply any target specific limit.
302 if (PreferVectorWidthOverride)
303 PreferVectorWidth = PreferVectorWidthOverride;
304 else if (Prefer256Bit)
305 PreferVectorWidth = 256;
308 X86Subtarget &X86Subtarget::initializeSubtargetDependencies(StringRef CPU,
310 initSubtargetFeatures(CPU, FS);
314 X86Subtarget::X86Subtarget(const Triple &TT, StringRef CPU, StringRef FS,
315 const X86TargetMachine &TM,
316 unsigned StackAlignOverride,
317 unsigned PreferVectorWidthOverride,
318 unsigned RequiredVectorWidth)
319 : X86GenSubtargetInfo(TT, CPU, FS),
320 PICStyle(PICStyles::None), TM(TM), TargetTriple(TT),
321 StackAlignOverride(StackAlignOverride),
322 PreferVectorWidthOverride(PreferVectorWidthOverride),
323 RequiredVectorWidth(RequiredVectorWidth),
324 In64BitMode(TargetTriple.getArch() == Triple::x86_64),
325 In32BitMode(TargetTriple.getArch() == Triple::x86 &&
326 TargetTriple.getEnvironment() != Triple::CODE16),
327 In16BitMode(TargetTriple.getArch() == Triple::x86 &&
328 TargetTriple.getEnvironment() == Triple::CODE16),
329 InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
330 FrameLowering(*this, getStackAlignment()) {
331 // Determine the PICStyle based on the target selected.
332 if (!isPositionIndependent())
333 setPICStyle(PICStyles::None);
335 setPICStyle(PICStyles::RIPRel);
336 else if (isTargetCOFF())
337 setPICStyle(PICStyles::None);
338 else if (isTargetDarwin())
339 setPICStyle(PICStyles::StubPIC);
340 else if (isTargetELF())
341 setPICStyle(PICStyles::GOT);
343 CallLoweringInfo.reset(new X86CallLowering(*getTargetLowering()));
344 Legalizer.reset(new X86LegalizerInfo(*this, TM));
346 auto *RBI = new X86RegisterBankInfo(*getRegisterInfo());
347 RegBankInfo.reset(RBI);
348 InstSelector.reset(createX86InstructionSelector(TM, *this, *RBI));
351 const CallLowering *X86Subtarget::getCallLowering() const {
352 return CallLoweringInfo.get();
355 const InstructionSelector *X86Subtarget::getInstructionSelector() const {
356 return InstSelector.get();
359 const LegalizerInfo *X86Subtarget::getLegalizerInfo() const {
360 return Legalizer.get();
363 const RegisterBankInfo *X86Subtarget::getRegBankInfo() const {
364 return RegBankInfo.get();
367 bool X86Subtarget::enableEarlyIfConversion() const {
368 return hasCMov() && X86EarlyIfConv;