1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the TargetLoweringBase class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/ADT/BitVector.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringExtras.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/ADT/Triple.h"
20 #include "llvm/ADT/Twine.h"
21 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/CodeGen/ISDOpcodes.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/MachineValueType.h"
32 #include "llvm/CodeGen/RuntimeLibcalls.h"
33 #include "llvm/CodeGen/StackMaps.h"
34 #include "llvm/CodeGen/TargetLowering.h"
35 #include "llvm/CodeGen/TargetOpcodes.h"
36 #include "llvm/CodeGen/TargetRegisterInfo.h"
37 #include "llvm/CodeGen/ValueTypes.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/GlobalValue.h"
44 #include "llvm/IR/GlobalVariable.h"
45 #include "llvm/IR/IRBuilder.h"
46 #include "llvm/IR/Module.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/Support/BranchProbability.h"
49 #include "llvm/Support/Casting.h"
50 #include "llvm/Support/CommandLine.h"
51 #include "llvm/Support/Compiler.h"
52 #include "llvm/Support/ErrorHandling.h"
53 #include "llvm/Support/MathExtras.h"
54 #include "llvm/Target/TargetMachine.h"
67 static cl::opt<bool> JumpIsExpensiveOverride(
68 "jump-is-expensive", cl::init(false),
69 cl::desc("Do not create extra branches to split comparison logic."),
72 static cl::opt<unsigned> MinimumJumpTableEntries
73 ("min-jump-table-entries", cl::init(4), cl::Hidden,
74 cl::desc("Set minimum number of entries to use a jump table."));
76 static cl::opt<unsigned> MaximumJumpTableSize
77 ("max-jump-table-size", cl::init(0), cl::Hidden,
78 cl::desc("Set maximum size of jump tables; zero for no limit."));
80 /// Minimum jump table density for normal functions.
81 static cl::opt<unsigned>
82 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
83 cl::desc("Minimum density for building a jump table in "
84 "a normal function"));
86 /// Minimum jump table density for -Os or -Oz functions.
87 static cl::opt<unsigned> OptsizeJumpTableDensity(
88 "optsize-jump-table-density", cl::init(40), cl::Hidden,
89 cl::desc("Minimum density for building a jump table in "
90 "an optsize function"));
92 static bool darwinHasSinCos(const Triple &TT) {
93 assert(TT.isOSDarwin() && "should be called with darwin triple");
94 // Don't bother with 32 bit x86.
95 if (TT.getArch() == Triple::x86)
97 // Macos < 10.9 has no sincos_stret.
99 return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit();
100 // iOS < 7.0 has no sincos_stret.
102 return !TT.isOSVersionLT(7, 0);
103 // Any other darwin such as WatchOS/TvOS is new enough.
107 // Although this default value is arbitrary, it is not random. It is assumed
108 // that a condition that evaluates the same way by a higher percentage than this
109 // is best represented as control flow. Therefore, the default value N should be
110 // set such that the win from N% correct executions is greater than the loss
111 // from (100 - N)% mispredicted executions for the majority of intended targets.
112 static cl::opt<int> MinPercentageForPredictableBranch(
113 "min-predictable-branch", cl::init(99),
114 cl::desc("Minimum percentage (0-100) that a condition must be either true "
115 "or false to assume that the condition is predictable"),
118 void TargetLoweringBase::InitLibcalls(const Triple &TT) {
119 #define HANDLE_LIBCALL(code, name) \
120 setLibcallName(RTLIB::code, name);
121 #include "llvm/CodeGen/RuntimeLibcalls.def"
122 #undef HANDLE_LIBCALL
123 // Initialize calling conventions to their default.
124 for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
125 setLibcallCallingConv((RTLIB::Libcall)LC, CallingConv::C);
127 // A few names are different on particular architectures or environments.
128 if (TT.isOSDarwin()) {
129 // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
130 // of the gnueabi-style __gnu_*_ieee.
131 // FIXME: What about other targets?
132 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
133 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
135 // Darwin 10 and higher has an optimized __bzero.
136 if (!TT.isMacOSX() || !TT.isMacOSXVersionLT(10, 6) || TT.isArch64Bit()) {
137 setLibcallName(RTLIB::BZERO, TT.isAArch64() ? "bzero" : "__bzero");
140 if (darwinHasSinCos(TT)) {
141 setLibcallName(RTLIB::SINCOS_STRET_F32, "__sincosf_stret");
142 setLibcallName(RTLIB::SINCOS_STRET_F64, "__sincos_stret");
143 if (TT.isWatchABI()) {
144 setLibcallCallingConv(RTLIB::SINCOS_STRET_F32,
145 CallingConv::ARM_AAPCS_VFP);
146 setLibcallCallingConv(RTLIB::SINCOS_STRET_F64,
147 CallingConv::ARM_AAPCS_VFP);
151 setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee");
152 setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee");
155 if (TT.isGNUEnvironment() || TT.isOSFuchsia()) {
156 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
157 setLibcallName(RTLIB::SINCOS_F64, "sincos");
158 setLibcallName(RTLIB::SINCOS_F80, "sincosl");
159 setLibcallName(RTLIB::SINCOS_F128, "sincosl");
160 setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl");
163 if (TT.isOSOpenBSD()) {
164 setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr);
168 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
169 /// UNKNOWN_LIBCALL if there is none.
170 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
171 if (OpVT == MVT::f16) {
172 if (RetVT == MVT::f32)
173 return FPEXT_F16_F32;
174 } else if (OpVT == MVT::f32) {
175 if (RetVT == MVT::f64)
176 return FPEXT_F32_F64;
177 if (RetVT == MVT::f128)
178 return FPEXT_F32_F128;
179 if (RetVT == MVT::ppcf128)
180 return FPEXT_F32_PPCF128;
181 } else if (OpVT == MVT::f64) {
182 if (RetVT == MVT::f128)
183 return FPEXT_F64_F128;
184 else if (RetVT == MVT::ppcf128)
185 return FPEXT_F64_PPCF128;
188 return UNKNOWN_LIBCALL;
191 /// getFPROUND - Return the FPROUND_*_* value for the given types, or
192 /// UNKNOWN_LIBCALL if there is none.
193 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
194 if (RetVT == MVT::f16) {
195 if (OpVT == MVT::f32)
196 return FPROUND_F32_F16;
197 if (OpVT == MVT::f64)
198 return FPROUND_F64_F16;
199 if (OpVT == MVT::f80)
200 return FPROUND_F80_F16;
201 if (OpVT == MVT::f128)
202 return FPROUND_F128_F16;
203 if (OpVT == MVT::ppcf128)
204 return FPROUND_PPCF128_F16;
205 } else if (RetVT == MVT::f32) {
206 if (OpVT == MVT::f64)
207 return FPROUND_F64_F32;
208 if (OpVT == MVT::f80)
209 return FPROUND_F80_F32;
210 if (OpVT == MVT::f128)
211 return FPROUND_F128_F32;
212 if (OpVT == MVT::ppcf128)
213 return FPROUND_PPCF128_F32;
214 } else if (RetVT == MVT::f64) {
215 if (OpVT == MVT::f80)
216 return FPROUND_F80_F64;
217 if (OpVT == MVT::f128)
218 return FPROUND_F128_F64;
219 if (OpVT == MVT::ppcf128)
220 return FPROUND_PPCF128_F64;
223 return UNKNOWN_LIBCALL;
226 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
227 /// UNKNOWN_LIBCALL if there is none.
228 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
229 if (OpVT == MVT::f32) {
230 if (RetVT == MVT::i32)
231 return FPTOSINT_F32_I32;
232 if (RetVT == MVT::i64)
233 return FPTOSINT_F32_I64;
234 if (RetVT == MVT::i128)
235 return FPTOSINT_F32_I128;
236 } else if (OpVT == MVT::f64) {
237 if (RetVT == MVT::i32)
238 return FPTOSINT_F64_I32;
239 if (RetVT == MVT::i64)
240 return FPTOSINT_F64_I64;
241 if (RetVT == MVT::i128)
242 return FPTOSINT_F64_I128;
243 } else if (OpVT == MVT::f80) {
244 if (RetVT == MVT::i32)
245 return FPTOSINT_F80_I32;
246 if (RetVT == MVT::i64)
247 return FPTOSINT_F80_I64;
248 if (RetVT == MVT::i128)
249 return FPTOSINT_F80_I128;
250 } else if (OpVT == MVT::f128) {
251 if (RetVT == MVT::i32)
252 return FPTOSINT_F128_I32;
253 if (RetVT == MVT::i64)
254 return FPTOSINT_F128_I64;
255 if (RetVT == MVT::i128)
256 return FPTOSINT_F128_I128;
257 } else if (OpVT == MVT::ppcf128) {
258 if (RetVT == MVT::i32)
259 return FPTOSINT_PPCF128_I32;
260 if (RetVT == MVT::i64)
261 return FPTOSINT_PPCF128_I64;
262 if (RetVT == MVT::i128)
263 return FPTOSINT_PPCF128_I128;
265 return UNKNOWN_LIBCALL;
268 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
269 /// UNKNOWN_LIBCALL if there is none.
270 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
271 if (OpVT == MVT::f32) {
272 if (RetVT == MVT::i32)
273 return FPTOUINT_F32_I32;
274 if (RetVT == MVT::i64)
275 return FPTOUINT_F32_I64;
276 if (RetVT == MVT::i128)
277 return FPTOUINT_F32_I128;
278 } else if (OpVT == MVT::f64) {
279 if (RetVT == MVT::i32)
280 return FPTOUINT_F64_I32;
281 if (RetVT == MVT::i64)
282 return FPTOUINT_F64_I64;
283 if (RetVT == MVT::i128)
284 return FPTOUINT_F64_I128;
285 } else if (OpVT == MVT::f80) {
286 if (RetVT == MVT::i32)
287 return FPTOUINT_F80_I32;
288 if (RetVT == MVT::i64)
289 return FPTOUINT_F80_I64;
290 if (RetVT == MVT::i128)
291 return FPTOUINT_F80_I128;
292 } else if (OpVT == MVT::f128) {
293 if (RetVT == MVT::i32)
294 return FPTOUINT_F128_I32;
295 if (RetVT == MVT::i64)
296 return FPTOUINT_F128_I64;
297 if (RetVT == MVT::i128)
298 return FPTOUINT_F128_I128;
299 } else if (OpVT == MVT::ppcf128) {
300 if (RetVT == MVT::i32)
301 return FPTOUINT_PPCF128_I32;
302 if (RetVT == MVT::i64)
303 return FPTOUINT_PPCF128_I64;
304 if (RetVT == MVT::i128)
305 return FPTOUINT_PPCF128_I128;
307 return UNKNOWN_LIBCALL;
310 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
311 /// UNKNOWN_LIBCALL if there is none.
312 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
313 if (OpVT == MVT::i32) {
314 if (RetVT == MVT::f32)
315 return SINTTOFP_I32_F32;
316 if (RetVT == MVT::f64)
317 return SINTTOFP_I32_F64;
318 if (RetVT == MVT::f80)
319 return SINTTOFP_I32_F80;
320 if (RetVT == MVT::f128)
321 return SINTTOFP_I32_F128;
322 if (RetVT == MVT::ppcf128)
323 return SINTTOFP_I32_PPCF128;
324 } else if (OpVT == MVT::i64) {
325 if (RetVT == MVT::f32)
326 return SINTTOFP_I64_F32;
327 if (RetVT == MVT::f64)
328 return SINTTOFP_I64_F64;
329 if (RetVT == MVT::f80)
330 return SINTTOFP_I64_F80;
331 if (RetVT == MVT::f128)
332 return SINTTOFP_I64_F128;
333 if (RetVT == MVT::ppcf128)
334 return SINTTOFP_I64_PPCF128;
335 } else if (OpVT == MVT::i128) {
336 if (RetVT == MVT::f32)
337 return SINTTOFP_I128_F32;
338 if (RetVT == MVT::f64)
339 return SINTTOFP_I128_F64;
340 if (RetVT == MVT::f80)
341 return SINTTOFP_I128_F80;
342 if (RetVT == MVT::f128)
343 return SINTTOFP_I128_F128;
344 if (RetVT == MVT::ppcf128)
345 return SINTTOFP_I128_PPCF128;
347 return UNKNOWN_LIBCALL;
350 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
351 /// UNKNOWN_LIBCALL if there is none.
352 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
353 if (OpVT == MVT::i32) {
354 if (RetVT == MVT::f32)
355 return UINTTOFP_I32_F32;
356 if (RetVT == MVT::f64)
357 return UINTTOFP_I32_F64;
358 if (RetVT == MVT::f80)
359 return UINTTOFP_I32_F80;
360 if (RetVT == MVT::f128)
361 return UINTTOFP_I32_F128;
362 if (RetVT == MVT::ppcf128)
363 return UINTTOFP_I32_PPCF128;
364 } else if (OpVT == MVT::i64) {
365 if (RetVT == MVT::f32)
366 return UINTTOFP_I64_F32;
367 if (RetVT == MVT::f64)
368 return UINTTOFP_I64_F64;
369 if (RetVT == MVT::f80)
370 return UINTTOFP_I64_F80;
371 if (RetVT == MVT::f128)
372 return UINTTOFP_I64_F128;
373 if (RetVT == MVT::ppcf128)
374 return UINTTOFP_I64_PPCF128;
375 } else if (OpVT == MVT::i128) {
376 if (RetVT == MVT::f32)
377 return UINTTOFP_I128_F32;
378 if (RetVT == MVT::f64)
379 return UINTTOFP_I128_F64;
380 if (RetVT == MVT::f80)
381 return UINTTOFP_I128_F80;
382 if (RetVT == MVT::f128)
383 return UINTTOFP_I128_F128;
384 if (RetVT == MVT::ppcf128)
385 return UINTTOFP_I128_PPCF128;
387 return UNKNOWN_LIBCALL;
390 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
391 #define OP_TO_LIBCALL(Name, Enum) \
393 switch (VT.SimpleTy) { \
395 return UNKNOWN_LIBCALL; \
409 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
410 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
411 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
412 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
413 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
414 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
415 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
416 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
417 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
418 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
419 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
420 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
425 return UNKNOWN_LIBCALL;
428 RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
429 switch (ElementSize) {
431 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
433 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
435 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
437 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
439 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
441 return UNKNOWN_LIBCALL;
445 RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
446 switch (ElementSize) {
448 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
450 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
452 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
454 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
456 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
458 return UNKNOWN_LIBCALL;
462 RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
463 switch (ElementSize) {
465 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
467 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
469 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
471 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
473 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
475 return UNKNOWN_LIBCALL;
479 /// InitCmpLibcallCCs - Set default comparison libcall CC.
480 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
481 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
482 CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
483 CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
484 CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
485 CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ;
486 CCs[RTLIB::UNE_F32] = ISD::SETNE;
487 CCs[RTLIB::UNE_F64] = ISD::SETNE;
488 CCs[RTLIB::UNE_F128] = ISD::SETNE;
489 CCs[RTLIB::UNE_PPCF128] = ISD::SETNE;
490 CCs[RTLIB::OGE_F32] = ISD::SETGE;
491 CCs[RTLIB::OGE_F64] = ISD::SETGE;
492 CCs[RTLIB::OGE_F128] = ISD::SETGE;
493 CCs[RTLIB::OGE_PPCF128] = ISD::SETGE;
494 CCs[RTLIB::OLT_F32] = ISD::SETLT;
495 CCs[RTLIB::OLT_F64] = ISD::SETLT;
496 CCs[RTLIB::OLT_F128] = ISD::SETLT;
497 CCs[RTLIB::OLT_PPCF128] = ISD::SETLT;
498 CCs[RTLIB::OLE_F32] = ISD::SETLE;
499 CCs[RTLIB::OLE_F64] = ISD::SETLE;
500 CCs[RTLIB::OLE_F128] = ISD::SETLE;
501 CCs[RTLIB::OLE_PPCF128] = ISD::SETLE;
502 CCs[RTLIB::OGT_F32] = ISD::SETGT;
503 CCs[RTLIB::OGT_F64] = ISD::SETGT;
504 CCs[RTLIB::OGT_F128] = ISD::SETGT;
505 CCs[RTLIB::OGT_PPCF128] = ISD::SETGT;
506 CCs[RTLIB::UO_F32] = ISD::SETNE;
507 CCs[RTLIB::UO_F64] = ISD::SETNE;
508 CCs[RTLIB::UO_F128] = ISD::SETNE;
509 CCs[RTLIB::UO_PPCF128] = ISD::SETNE;
510 CCs[RTLIB::O_F32] = ISD::SETEQ;
511 CCs[RTLIB::O_F64] = ISD::SETEQ;
512 CCs[RTLIB::O_F128] = ISD::SETEQ;
513 CCs[RTLIB::O_PPCF128] = ISD::SETEQ;
516 /// NOTE: The TargetMachine owns TLOF.
517 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
520 // Perform these initializations only once.
521 MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove =
522 MaxLoadsPerMemcmp = 8;
523 MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize =
524 MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4;
525 UseUnderscoreSetJmp = false;
526 UseUnderscoreLongJmp = false;
527 HasMultipleConditionRegisters = false;
528 HasExtractBitsInsn = false;
529 JumpIsExpensive = JumpIsExpensiveOverride;
530 PredictableSelectIsExpensive = false;
531 EnableExtLdPromotion = false;
532 HasFloatingPointExceptions = true;
533 StackPointerRegisterToSaveRestore = 0;
534 BooleanContents = UndefinedBooleanContent;
535 BooleanFloatContents = UndefinedBooleanContent;
536 BooleanVectorContents = UndefinedBooleanContent;
537 SchedPreferenceInfo = Sched::ILP;
539 JumpBufAlignment = 0;
540 MinFunctionAlignment = 0;
541 PrefFunctionAlignment = 0;
542 PrefLoopAlignment = 0;
543 GatherAllAliasesMaxDepth = 18;
544 MinStackArgumentAlignment = 1;
545 // TODO: the default will be switched to 0 in the next commit, along
546 // with the Target-specific changes necessary.
547 MaxAtomicSizeInBitsSupported = 1024;
549 MinCmpXchgSizeInBits = 0;
550 SupportsUnalignedAtomics = false;
552 std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
554 InitLibcalls(TM.getTargetTriple());
555 InitCmpLibcallCCs(CmpLibcallCCs);
558 void TargetLoweringBase::initActions() {
559 // All operations default to being supported.
560 memset(OpActions, 0, sizeof(OpActions));
561 memset(LoadExtActions, 0, sizeof(LoadExtActions));
562 memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
563 memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
564 memset(CondCodeActions, 0, sizeof(CondCodeActions));
565 std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
566 std::fill(std::begin(TargetDAGCombineArray),
567 std::end(TargetDAGCombineArray), 0);
569 // Set default actions for various operations.
570 for (MVT VT : MVT::all_valuetypes()) {
571 // Default all indexed load / store to expand.
572 for (unsigned IM = (unsigned)ISD::PRE_INC;
573 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
574 setIndexedLoadAction(IM, VT, Expand);
575 setIndexedStoreAction(IM, VT, Expand);
578 // Most backends expect to see the node which just returns the value loaded.
579 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand);
581 // These operations default to expand.
582 setOperationAction(ISD::FGETSIGN, VT, Expand);
583 setOperationAction(ISD::CONCAT_VECTORS, VT, Expand);
584 setOperationAction(ISD::FMINNUM, VT, Expand);
585 setOperationAction(ISD::FMAXNUM, VT, Expand);
586 setOperationAction(ISD::FMINNAN, VT, Expand);
587 setOperationAction(ISD::FMAXNAN, VT, Expand);
588 setOperationAction(ISD::FMAD, VT, Expand);
589 setOperationAction(ISD::SMIN, VT, Expand);
590 setOperationAction(ISD::SMAX, VT, Expand);
591 setOperationAction(ISD::UMIN, VT, Expand);
592 setOperationAction(ISD::UMAX, VT, Expand);
593 setOperationAction(ISD::ABS, VT, Expand);
595 // Overflow operations default to expand
596 setOperationAction(ISD::SADDO, VT, Expand);
597 setOperationAction(ISD::SSUBO, VT, Expand);
598 setOperationAction(ISD::UADDO, VT, Expand);
599 setOperationAction(ISD::USUBO, VT, Expand);
600 setOperationAction(ISD::SMULO, VT, Expand);
601 setOperationAction(ISD::UMULO, VT, Expand);
603 // ADDCARRY operations default to expand
604 setOperationAction(ISD::ADDCARRY, VT, Expand);
605 setOperationAction(ISD::SUBCARRY, VT, Expand);
606 setOperationAction(ISD::SETCCCARRY, VT, Expand);
608 // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
609 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
610 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
612 setOperationAction(ISD::BITREVERSE, VT, Expand);
614 // These library functions default to expand.
615 setOperationAction(ISD::FROUND, VT, Expand);
616 setOperationAction(ISD::FPOWI, VT, Expand);
618 // These operations default to expand for vector types.
620 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
621 setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, VT, Expand);
622 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Expand);
623 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Expand);
626 // For most targets @llvm.get.dynamic.area.offset just returns 0.
627 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand);
630 // Most targets ignore the @llvm.prefetch intrinsic.
631 setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
633 // Most targets also ignore the @llvm.readcyclecounter intrinsic.
634 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand);
636 // ConstantFP nodes default to expand. Targets can either change this to
637 // Legal, in which case all fp constants are legal, or use isFPImmLegal()
638 // to optimize expansions for certain constants.
639 setOperationAction(ISD::ConstantFP, MVT::f16, Expand);
640 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
641 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
642 setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
643 setOperationAction(ISD::ConstantFP, MVT::f128, Expand);
645 // These library functions default to expand.
646 for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
647 setOperationAction(ISD::FLOG , VT, Expand);
648 setOperationAction(ISD::FLOG2, VT, Expand);
649 setOperationAction(ISD::FLOG10, VT, Expand);
650 setOperationAction(ISD::FEXP , VT, Expand);
651 setOperationAction(ISD::FEXP2, VT, Expand);
652 setOperationAction(ISD::FFLOOR, VT, Expand);
653 setOperationAction(ISD::FNEARBYINT, VT, Expand);
654 setOperationAction(ISD::FCEIL, VT, Expand);
655 setOperationAction(ISD::FRINT, VT, Expand);
656 setOperationAction(ISD::FTRUNC, VT, Expand);
657 setOperationAction(ISD::FROUND, VT, Expand);
660 // Default ISD::TRAP to expand (which turns it into abort).
661 setOperationAction(ISD::TRAP, MVT::Other, Expand);
663 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
664 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
665 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
668 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
670 return MVT::getIntegerVT(8 * DL.getPointerSize(0));
673 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy,
674 const DataLayout &DL) const {
675 assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
676 if (LHSTy.isVector())
678 return getScalarShiftAmountTy(DL, LHSTy);
681 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
682 assert(isTypeLegal(VT));
694 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) {
695 // If the command-line option was specified, ignore this request.
696 if (!JumpIsExpensiveOverride.getNumOccurrences())
697 JumpIsExpensive = isExpensive;
700 TargetLoweringBase::LegalizeKind
701 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
702 // If this is a simple type, use the ComputeRegisterProp mechanism.
704 MVT SVT = VT.getSimpleVT();
705 assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
706 MVT NVT = TransformToType[SVT.SimpleTy];
707 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
709 assert((LA == TypeLegal || LA == TypeSoftenFloat ||
710 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger) &&
711 "Promote may not follow Expand or Promote");
713 if (LA == TypeSplitVector)
714 return LegalizeKind(LA,
715 EVT::getVectorVT(Context, SVT.getVectorElementType(),
716 SVT.getVectorNumElements() / 2));
717 if (LA == TypeScalarizeVector)
718 return LegalizeKind(LA, SVT.getVectorElementType());
719 return LegalizeKind(LA, NVT);
722 // Handle Extended Scalar Types.
723 if (!VT.isVector()) {
724 assert(VT.isInteger() && "Float types must be simple");
725 unsigned BitSize = VT.getSizeInBits();
726 // First promote to a power-of-two size, then expand if necessary.
727 if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
728 EVT NVT = VT.getRoundIntegerType(Context);
729 assert(NVT != VT && "Unable to round integer VT");
730 LegalizeKind NextStep = getTypeConversion(Context, NVT);
731 // Avoid multi-step promotion.
732 if (NextStep.first == TypePromoteInteger)
734 // Return rounded integer type.
735 return LegalizeKind(TypePromoteInteger, NVT);
738 return LegalizeKind(TypeExpandInteger,
739 EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
742 // Handle vector types.
743 unsigned NumElts = VT.getVectorNumElements();
744 EVT EltVT = VT.getVectorElementType();
746 // Vectors with only one element are always scalarized.
748 return LegalizeKind(TypeScalarizeVector, EltVT);
750 // Try to widen vector elements until the element type is a power of two and
751 // promote it to a legal type later on, for example:
752 // <3 x i8> -> <4 x i8> -> <4 x i32>
753 if (EltVT.isInteger()) {
754 // Vectors with a number of elements that is not a power of two are always
755 // widened, for example <3 x i8> -> <4 x i8>.
756 if (!VT.isPow2VectorType()) {
757 NumElts = (unsigned)NextPowerOf2(NumElts);
758 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
759 return LegalizeKind(TypeWidenVector, NVT);
762 // Examine the element type.
763 LegalizeKind LK = getTypeConversion(Context, EltVT);
765 // If type is to be expanded, split the vector.
766 // <4 x i140> -> <2 x i140>
767 if (LK.first == TypeExpandInteger)
768 return LegalizeKind(TypeSplitVector,
769 EVT::getVectorVT(Context, EltVT, NumElts / 2));
771 // Promote the integer element types until a legal vector type is found
772 // or until the element integer type is too big. If a legal type was not
773 // found, fallback to the usual mechanism of widening/splitting the
775 EVT OldEltVT = EltVT;
777 // Increase the bitwidth of the element to the next pow-of-two
778 // (which is greater than 8 bits).
779 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
780 .getRoundIntegerType(Context);
782 // Stop trying when getting a non-simple element type.
783 // Note that vector elements may be greater than legal vector element
784 // types. Example: X86 XMM registers hold 64bit element on 32bit
786 if (!EltVT.isSimple())
789 // Build a new vector type and check if it is legal.
790 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
791 // Found a legal promoted vector type.
792 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
793 return LegalizeKind(TypePromoteInteger,
794 EVT::getVectorVT(Context, EltVT, NumElts));
797 // Reset the type to the unexpanded type if we did not find a legal vector
798 // type with a promoted vector element type.
802 // Try to widen the vector until a legal type is found.
803 // If there is no wider legal type, split the vector.
805 // Round up to the next power of 2.
806 NumElts = (unsigned)NextPowerOf2(NumElts);
808 // If there is no simple vector type with this many elements then there
809 // cannot be a larger legal vector type. Note that this assumes that
810 // there are no skipped intermediate vector types in the simple types.
811 if (!EltVT.isSimple())
813 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
814 if (LargerVector == MVT())
817 // If this type is legal then widen the vector.
818 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
819 return LegalizeKind(TypeWidenVector, LargerVector);
822 // Widen odd vectors to next power of two.
823 if (!VT.isPow2VectorType()) {
824 EVT NVT = VT.getPow2VectorType(Context);
825 return LegalizeKind(TypeWidenVector, NVT);
828 // Vectors with illegal element types are expanded.
829 EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
830 return LegalizeKind(TypeSplitVector, NVT);
833 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
834 unsigned &NumIntermediates,
836 TargetLoweringBase *TLI) {
837 // Figure out the right, legal destination reg to copy into.
838 unsigned NumElts = VT.getVectorNumElements();
839 MVT EltTy = VT.getVectorElementType();
841 unsigned NumVectorRegs = 1;
843 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
844 // could break down into LHS/RHS like LegalizeDAG does.
845 if (!isPowerOf2_32(NumElts)) {
846 NumVectorRegs = NumElts;
850 // Divide the input until we get to a supported size. This will always
851 // end with a scalar if the target doesn't support vectors.
852 while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
857 NumIntermediates = NumVectorRegs;
859 MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
860 if (!TLI->isTypeLegal(NewVT))
862 IntermediateVT = NewVT;
864 unsigned NewVTSize = NewVT.getSizeInBits();
866 // Convert sizes such as i33 to i64.
867 if (!isPowerOf2_32(NewVTSize))
868 NewVTSize = NextPowerOf2(NewVTSize);
870 MVT DestVT = TLI->getRegisterType(NewVT);
872 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
873 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
875 // Otherwise, promotion or legal types use the same number of registers as
876 // the vector decimated to the appropriate level.
877 return NumVectorRegs;
880 /// isLegalRC - Return true if the value types that can be represented by the
881 /// specified register class are all legal.
882 bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI,
883 const TargetRegisterClass &RC) const {
884 for (auto I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
890 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
891 /// sequence of memory operands that is recognized by PrologEpilogInserter.
893 TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
894 MachineBasicBlock *MBB) const {
895 MachineInstr *MI = &InitialMI;
896 MachineFunction &MF = *MI->getMF();
897 MachineFrameInfo &MFI = MF.getFrameInfo();
899 // We're handling multiple types of operands here:
900 // PATCHPOINT MetaArgs - live-in, read only, direct
901 // STATEPOINT Deopt Spill - live-through, read only, indirect
902 // STATEPOINT Deopt Alloca - live-through, read only, direct
903 // (We're currently conservative and mark the deopt slots read/write in
905 // STATEPOINT GC Spill - live-through, read/write, indirect
906 // STATEPOINT GC Alloca - live-through, read/write, direct
907 // The live-in vs live-through is handled already (the live through ones are
908 // all stack slots), but we need to handle the different type of stackmap
909 // operands and memory effects here.
911 // MI changes inside this loop as we grow operands.
912 for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
913 MachineOperand &MO = MI->getOperand(OperIdx);
917 // foldMemoryOperand builds a new MI after replacing a single FI operand
918 // with the canonical set of five x86 addressing-mode operands.
919 int FI = MO.getIndex();
920 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
922 // Copy operands before the frame-index.
923 for (unsigned i = 0; i < OperIdx; ++i)
924 MIB.add(MI->getOperand(i));
925 // Add frame index operands recognized by stackmaps.cpp
926 if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
927 // indirect-mem-ref tag, size, #FI, offset.
928 // Used for spills inserted by StatepointLowering. This codepath is not
929 // used for patchpoints/stackmaps at all, for these spilling is done via
930 // foldMemoryOperand callback only.
931 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
932 MIB.addImm(StackMaps::IndirectMemRefOp);
933 MIB.addImm(MFI.getObjectSize(FI));
934 MIB.add(MI->getOperand(OperIdx));
937 // direct-mem-ref tag, #FI, offset.
938 // Used by patchpoint, and direct alloca arguments to statepoints
939 MIB.addImm(StackMaps::DirectMemRefOp);
940 MIB.add(MI->getOperand(OperIdx));
943 // Copy the operands after the frame index.
944 for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
945 MIB.add(MI->getOperand(i));
947 // Inherit previous memory operands.
948 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
949 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
951 // Add a new memory operand for this FI.
952 assert(MFI.getObjectOffset(FI) != -1);
954 auto Flags = MachineMemOperand::MOLoad;
955 if (MI->getOpcode() == TargetOpcode::STATEPOINT) {
956 Flags |= MachineMemOperand::MOStore;
957 Flags |= MachineMemOperand::MOVolatile;
959 MachineMemOperand *MMO = MF.getMachineMemOperand(
960 MachinePointerInfo::getFixedStack(MF, FI), Flags,
961 MF.getDataLayout().getPointerSize(), MFI.getObjectAlignment(FI));
962 MIB->addMemOperand(MF, MMO);
964 // Replace the instruction and update the operand index.
965 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
966 OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;
967 MI->eraseFromParent();
973 /// findRepresentativeClass - Return the largest legal super-reg register class
974 /// of the register class for the specified type and its associated "cost".
975 // This function is in TargetLowering because it uses RegClassForVT which would
976 // need to be moved to TargetRegisterInfo and would necessitate moving
977 // isTypeLegal over as well - a massive change that would just require
978 // TargetLowering having a TargetRegisterInfo class member that it would use.
979 std::pair<const TargetRegisterClass *, uint8_t>
980 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI,
982 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
984 return std::make_pair(RC, 0);
986 // Compute the set of all super-register classes.
987 BitVector SuperRegRC(TRI->getNumRegClasses());
988 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
989 SuperRegRC.setBitsInMask(RCI.getMask());
991 // Find the first legal register class with the largest spill size.
992 const TargetRegisterClass *BestRC = RC;
993 for (unsigned i : SuperRegRC.set_bits()) {
994 const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
995 // We want the largest possible spill size.
996 if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
998 if (!isLegalRC(*TRI, *SuperRC))
1002 return std::make_pair(BestRC, 1);
1005 /// computeRegisterProperties - Once all of the register classes are added,
1006 /// this allows us to compute derived properties we expose.
1007 void TargetLoweringBase::computeRegisterProperties(
1008 const TargetRegisterInfo *TRI) {
1009 static_assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE,
1010 "Too many value types for ValueTypeActions to hold!");
1012 // Everything defaults to needing one register.
1013 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1014 NumRegistersForVT[i] = 1;
1015 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1017 // ...except isVoid, which doesn't need any registers.
1018 NumRegistersForVT[MVT::isVoid] = 0;
1020 // Find the largest integer register class.
1021 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1022 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1023 assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1025 // Every integer value type larger than this largest register takes twice as
1026 // many registers to represent as the previous ValueType.
1027 for (unsigned ExpandedReg = LargestIntReg + 1;
1028 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1029 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1030 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1031 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1032 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
1036 // Inspect all of the ValueType's smaller than the largest integer
1037 // register to see which ones need promotion.
1038 unsigned LegalIntReg = LargestIntReg;
1039 for (unsigned IntReg = LargestIntReg - 1;
1040 IntReg >= (unsigned)MVT::i1; --IntReg) {
1041 MVT IVT = (MVT::SimpleValueType)IntReg;
1042 if (isTypeLegal(IVT)) {
1043 LegalIntReg = IntReg;
1045 RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1046 (const MVT::SimpleValueType)LegalIntReg;
1047 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
1051 // ppcf128 type is really two f64's.
1052 if (!isTypeLegal(MVT::ppcf128)) {
1053 if (isTypeLegal(MVT::f64)) {
1054 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1055 RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1056 TransformToType[MVT::ppcf128] = MVT::f64;
1057 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
1059 NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
1060 RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
1061 TransformToType[MVT::ppcf128] = MVT::i128;
1062 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat);
1066 // Decide how to handle f128. If the target does not have native f128 support,
1067 // expand it to i128 and we will be generating soft float library calls.
1068 if (!isTypeLegal(MVT::f128)) {
1069 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1070 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1071 TransformToType[MVT::f128] = MVT::i128;
1072 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
1075 // Decide how to handle f64. If the target does not have native f64 support,
1076 // expand it to i64 and we will be generating soft float library calls.
1077 if (!isTypeLegal(MVT::f64)) {
1078 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1079 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1080 TransformToType[MVT::f64] = MVT::i64;
1081 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
1084 // Decide how to handle f32. If the target does not have native f32 support,
1085 // expand it to i32 and we will be generating soft float library calls.
1086 if (!isTypeLegal(MVT::f32)) {
1087 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1088 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1089 TransformToType[MVT::f32] = MVT::i32;
1090 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
1093 // Decide how to handle f16. If the target does not have native f16 support,
1094 // promote it to f32, because there are no f16 library calls (except for
1096 if (!isTypeLegal(MVT::f16)) {
1097 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1098 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1099 TransformToType[MVT::f16] = MVT::f32;
1100 ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
1103 // Loop over all of the vector value types to see which need transformations.
1104 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1105 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1106 MVT VT = (MVT::SimpleValueType) i;
1107 if (isTypeLegal(VT))
1110 MVT EltVT = VT.getVectorElementType();
1111 unsigned NElts = VT.getVectorNumElements();
1112 bool IsLegalWiderType = false;
1113 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1114 switch (PreferredAction) {
1115 case TypePromoteInteger:
1116 // Try to promote the elements of integer vectors. If no legal
1117 // promotion was found, fall through to the widen-vector method.
1118 for (unsigned nVT = i + 1; nVT <= MVT::LAST_INTEGER_VECTOR_VALUETYPE; ++nVT) {
1119 MVT SVT = (MVT::SimpleValueType) nVT;
1120 // Promote vectors of integers to vectors with the same number
1121 // of elements, with a wider element type.
1122 if (SVT.getScalarSizeInBits() > EltVT.getSizeInBits() &&
1123 SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)) {
1124 TransformToType[i] = SVT;
1125 RegisterTypeForVT[i] = SVT;
1126 NumRegistersForVT[i] = 1;
1127 ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
1128 IsLegalWiderType = true;
1132 if (IsLegalWiderType)
1136 case TypeWidenVector:
1137 // Try to widen the vector.
1138 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1139 MVT SVT = (MVT::SimpleValueType) nVT;
1140 if (SVT.getVectorElementType() == EltVT
1141 && SVT.getVectorNumElements() > NElts && isTypeLegal(SVT)) {
1142 TransformToType[i] = SVT;
1143 RegisterTypeForVT[i] = SVT;
1144 NumRegistersForVT[i] = 1;
1145 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1146 IsLegalWiderType = true;
1150 if (IsLegalWiderType)
1154 case TypeSplitVector:
1155 case TypeScalarizeVector: {
1158 unsigned NumIntermediates;
1159 NumRegistersForVT[i] = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1160 NumIntermediates, RegisterVT, this);
1161 RegisterTypeForVT[i] = RegisterVT;
1163 MVT NVT = VT.getPow2VectorType();
1165 // Type is already a power of 2. The default action is to split.
1166 TransformToType[i] = MVT::Other;
1167 if (PreferredAction == TypeScalarizeVector)
1168 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
1169 else if (PreferredAction == TypeSplitVector)
1170 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1172 // Set type action according to the number of elements.
1173 ValueTypeActions.setTypeAction(VT, NElts == 1 ? TypeScalarizeVector
1176 TransformToType[i] = NVT;
1177 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1182 llvm_unreachable("Unknown vector legalization action!");
1186 // Determine the 'representative' register class for each value type.
1187 // An representative register class is the largest (meaning one which is
1188 // not a sub-register class / subreg register class) legal register class for
1189 // a group of value types. For example, on i386, i8, i16, and i32
1190 // representative would be GR32; while on x86_64 it's GR64.
1191 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1192 const TargetRegisterClass* RRC;
1194 std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
1195 RepRegClassForVT[i] = RRC;
1196 RepRegClassCostForVT[i] = Cost;
1200 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1202 assert(!VT.isVector() && "No default SetCC type for vectors!");
1203 return getPointerTy(DL).SimpleTy;
1206 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const {
1207 return MVT::i32; // return the default value
1210 /// getVectorTypeBreakdown - Vector types are broken down into some number of
1211 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
1212 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1213 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1215 /// This method returns the number of registers needed, and the VT for each
1216 /// register. It also returns the VT and quantity of the intermediate values
1217 /// before they are promoted/expanded.
1218 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
1219 EVT &IntermediateVT,
1220 unsigned &NumIntermediates,
1221 MVT &RegisterVT) const {
1222 unsigned NumElts = VT.getVectorNumElements();
1224 // If there is a wider vector type with the same element type as this one,
1225 // or a promoted vector type that has the same number of elements which
1226 // are wider, then we should convert to that legal vector type.
1227 // This handles things like <2 x float> -> <4 x float> and
1228 // <4 x i1> -> <4 x i32>.
1229 LegalizeTypeAction TA = getTypeAction(Context, VT);
1230 if (NumElts != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1231 EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1232 if (isTypeLegal(RegisterEVT)) {
1233 IntermediateVT = RegisterEVT;
1234 RegisterVT = RegisterEVT.getSimpleVT();
1235 NumIntermediates = 1;
1240 // Figure out the right, legal destination reg to copy into.
1241 EVT EltTy = VT.getVectorElementType();
1243 unsigned NumVectorRegs = 1;
1245 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
1246 // could break down into LHS/RHS like LegalizeDAG does.
1247 if (!isPowerOf2_32(NumElts)) {
1248 NumVectorRegs = NumElts;
1252 // Divide the input until we get to a supported size. This will always
1253 // end with a scalar if the target doesn't support vectors.
1254 while (NumElts > 1 && !isTypeLegal(
1255 EVT::getVectorVT(Context, EltTy, NumElts))) {
1257 NumVectorRegs <<= 1;
1260 NumIntermediates = NumVectorRegs;
1262 EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
1263 if (!isTypeLegal(NewVT))
1265 IntermediateVT = NewVT;
1267 MVT DestVT = getRegisterType(Context, NewVT);
1268 RegisterVT = DestVT;
1269 unsigned NewVTSize = NewVT.getSizeInBits();
1271 // Convert sizes such as i33 to i64.
1272 if (!isPowerOf2_32(NewVTSize))
1273 NewVTSize = NextPowerOf2(NewVTSize);
1275 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1276 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1278 // Otherwise, promotion or legal types use the same number of registers as
1279 // the vector decimated to the appropriate level.
1280 return NumVectorRegs;
1283 /// Get the EVTs and ArgFlags collections that represent the legalized return
1284 /// type of the given function. This does not require a DAG or a return value,
1285 /// and is suitable for use before any DAGs for the function are constructed.
1286 /// TODO: Move this out of TargetLowering.cpp.
1287 void llvm::GetReturnInfo(Type *ReturnType, AttributeList attr,
1288 SmallVectorImpl<ISD::OutputArg> &Outs,
1289 const TargetLowering &TLI, const DataLayout &DL) {
1290 SmallVector<EVT, 4> ValueVTs;
1291 ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1292 unsigned NumValues = ValueVTs.size();
1293 if (NumValues == 0) return;
1295 for (unsigned j = 0, f = NumValues; j != f; ++j) {
1296 EVT VT = ValueVTs[j];
1297 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1299 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1300 ExtendKind = ISD::SIGN_EXTEND;
1301 else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1302 ExtendKind = ISD::ZERO_EXTEND;
1304 // FIXME: C calling convention requires the return type to be promoted to
1305 // at least 32-bit. But this is not necessary for non-C calling
1306 // conventions. The frontend should mark functions whose return values
1307 // require promoting with signext or zeroext attributes.
1308 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
1309 MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
1310 if (VT.bitsLT(MinVT))
1315 TLI.getNumRegistersForCallingConv(ReturnType->getContext(), VT);
1317 TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), VT);
1319 // 'inreg' on function refers to return value
1320 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1321 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::InReg))
1324 // Propagate extension type if any
1325 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1327 else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1330 for (unsigned i = 0; i < NumParts; ++i)
1331 Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isFixed=*/true, 0, 0));
1335 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1336 /// function arguments in the caller parameter area. This is the actual
1337 /// alignment, not its logarithm.
1338 unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty,
1339 const DataLayout &DL) const {
1340 return DL.getABITypeAlignment(Ty);
1343 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1344 const DataLayout &DL, EVT VT,
1348 // Check if the specified alignment is sufficient based on the data layout.
1349 // TODO: While using the data layout works in practice, a better solution
1350 // would be to implement this check directly (make this a virtual function).
1351 // For example, the ABI alignment may change based on software platform while
1352 // this function should only be affected by hardware implementation.
1353 Type *Ty = VT.getTypeForEVT(Context);
1354 if (Alignment >= DL.getABITypeAlignment(Ty)) {
1355 // Assume that an access that meets the ABI-specified alignment is fast.
1356 if (Fast != nullptr)
1361 // This is a misaligned access.
1362 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
1365 BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const {
1366 return BranchProbability(MinPercentageForPredictableBranch, 100);
1369 //===----------------------------------------------------------------------===//
1370 // TargetTransformInfo Helpers
1371 //===----------------------------------------------------------------------===//
1373 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
1374 enum InstructionOpcodes {
1375 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1376 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1377 #include "llvm/IR/Instruction.def"
1379 switch (static_cast<InstructionOpcodes>(Opcode)) {
1382 case Switch: return 0;
1383 case IndirectBr: return 0;
1384 case Invoke: return 0;
1385 case Resume: return 0;
1386 case Unreachable: return 0;
1387 case CleanupRet: return 0;
1388 case CatchRet: return 0;
1389 case CatchPad: return 0;
1390 case CatchSwitch: return 0;
1391 case CleanupPad: return 0;
1392 case Add: return ISD::ADD;
1393 case FAdd: return ISD::FADD;
1394 case Sub: return ISD::SUB;
1395 case FSub: return ISD::FSUB;
1396 case Mul: return ISD::MUL;
1397 case FMul: return ISD::FMUL;
1398 case UDiv: return ISD::UDIV;
1399 case SDiv: return ISD::SDIV;
1400 case FDiv: return ISD::FDIV;
1401 case URem: return ISD::UREM;
1402 case SRem: return ISD::SREM;
1403 case FRem: return ISD::FREM;
1404 case Shl: return ISD::SHL;
1405 case LShr: return ISD::SRL;
1406 case AShr: return ISD::SRA;
1407 case And: return ISD::AND;
1408 case Or: return ISD::OR;
1409 case Xor: return ISD::XOR;
1410 case Alloca: return 0;
1411 case Load: return ISD::LOAD;
1412 case Store: return ISD::STORE;
1413 case GetElementPtr: return 0;
1414 case Fence: return 0;
1415 case AtomicCmpXchg: return 0;
1416 case AtomicRMW: return 0;
1417 case Trunc: return ISD::TRUNCATE;
1418 case ZExt: return ISD::ZERO_EXTEND;
1419 case SExt: return ISD::SIGN_EXTEND;
1420 case FPToUI: return ISD::FP_TO_UINT;
1421 case FPToSI: return ISD::FP_TO_SINT;
1422 case UIToFP: return ISD::UINT_TO_FP;
1423 case SIToFP: return ISD::SINT_TO_FP;
1424 case FPTrunc: return ISD::FP_ROUND;
1425 case FPExt: return ISD::FP_EXTEND;
1426 case PtrToInt: return ISD::BITCAST;
1427 case IntToPtr: return ISD::BITCAST;
1428 case BitCast: return ISD::BITCAST;
1429 case AddrSpaceCast: return ISD::ADDRSPACECAST;
1430 case ICmp: return ISD::SETCC;
1431 case FCmp: return ISD::SETCC;
1433 case Call: return 0;
1434 case Select: return ISD::SELECT;
1435 case UserOp1: return 0;
1436 case UserOp2: return 0;
1437 case VAArg: return 0;
1438 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
1439 case InsertElement: return ISD::INSERT_VECTOR_ELT;
1440 case ShuffleVector: return ISD::VECTOR_SHUFFLE;
1441 case ExtractValue: return ISD::MERGE_VALUES;
1442 case InsertValue: return ISD::MERGE_VALUES;
1443 case LandingPad: return 0;
1446 llvm_unreachable("Unknown instruction type encountered!");
1450 TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL,
1452 LLVMContext &C = Ty->getContext();
1453 EVT MTy = getValueType(DL, Ty);
1456 // We keep legalizing the type until we find a legal kind. We assume that
1457 // the only operation that costs anything is the split. After splitting
1458 // we need to handle two types.
1460 LegalizeKind LK = getTypeConversion(C, MTy);
1462 if (LK.first == TypeLegal)
1463 return std::make_pair(Cost, MTy.getSimpleVT());
1465 if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
1468 // Do not loop with f128 type.
1469 if (MTy == LK.second)
1470 return std::make_pair(Cost, MTy.getSimpleVT());
1472 // Keep legalizing the type.
1477 Value *TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
1478 bool UseTLS) const {
1479 // compiler-rt provides a variable with a magic name. Targets that do not
1480 // link with compiler-rt may also provide such a variable.
1481 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1482 const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
1483 auto UnsafeStackPtr =
1484 dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
1486 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1488 if (!UnsafeStackPtr) {
1489 auto TLSModel = UseTLS ?
1490 GlobalValue::InitialExecTLSModel :
1491 GlobalValue::NotThreadLocal;
1492 // The global variable is not defined yet, define it ourselves.
1493 // We use the initial-exec TLS model because we do not support the
1494 // variable living anywhere other than in the main executable.
1495 UnsafeStackPtr = new GlobalVariable(
1496 *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
1497 UnsafeStackPtrVar, nullptr, TLSModel);
1499 // The variable exists, check its type and attributes.
1500 if (UnsafeStackPtr->getValueType() != StackPtrTy)
1501 report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
1502 if (UseTLS != UnsafeStackPtr->isThreadLocal())
1503 report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
1504 (UseTLS ? "" : "not ") + "be thread-local");
1506 return UnsafeStackPtr;
1509 Value *TargetLoweringBase::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
1510 if (!TM.getTargetTriple().isAndroid())
1511 return getDefaultSafeStackPointerLocation(IRB, true);
1513 // Android provides a libc function to retrieve the address of the current
1514 // thread's unsafe stack pointer.
1515 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1516 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1517 Value *Fn = M->getOrInsertFunction("__safestack_pointer_address",
1518 StackPtrTy->getPointerTo(0));
1519 return IRB.CreateCall(Fn);
1522 //===----------------------------------------------------------------------===//
1523 // Loop Strength Reduction hooks
1524 //===----------------------------------------------------------------------===//
1526 /// isLegalAddressingMode - Return true if the addressing mode represented
1527 /// by AM is legal for this target, for a load/store of the specified type.
1528 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
1529 const AddrMode &AM, Type *Ty,
1530 unsigned AS, Instruction *I) const {
1531 // The default implementation of this implements a conservative RISCy, r+r and
1534 // Allows a sign-extended 16-bit immediate field.
1535 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
1538 // No global is ever allowed as a base.
1542 // Only support r+r,
1544 case 0: // "r+i" or just "i", depending on HasBaseReg.
1547 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
1549 // Otherwise we have r+r or r+i.
1552 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
1554 // Allow 2*r as r+r.
1556 default: // Don't allow n * r
1563 //===----------------------------------------------------------------------===//
1565 //===----------------------------------------------------------------------===//
1567 // For OpenBSD return its special guard variable. Otherwise return nullptr,
1568 // so that SelectionDAG handle SSP.
1569 Value *TargetLoweringBase::getIRStackGuard(IRBuilder<> &IRB) const {
1570 if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
1571 Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
1572 PointerType *PtrTy = Type::getInt8PtrTy(M.getContext());
1573 return M.getOrInsertGlobal("__guard_local", PtrTy);
1578 // Currently only support "standard" __stack_chk_guard.
1579 // TODO: add LOAD_STACK_GUARD support.
1580 void TargetLoweringBase::insertSSPDeclarations(Module &M) const {
1581 M.getOrInsertGlobal("__stack_chk_guard", Type::getInt8PtrTy(M.getContext()));
1584 // Currently only support "standard" __stack_chk_guard.
1585 // TODO: add LOAD_STACK_GUARD support.
1586 Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const {
1587 return M.getGlobalVariable("__stack_chk_guard", true);
1590 Value *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const {
1594 unsigned TargetLoweringBase::getMinimumJumpTableEntries() const {
1595 return MinimumJumpTableEntries;
1598 void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) {
1599 MinimumJumpTableEntries = Val;
1602 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
1603 return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
1606 unsigned TargetLoweringBase::getMaximumJumpTableSize() const {
1607 return MaximumJumpTableSize;
1610 void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) {
1611 MaximumJumpTableSize = Val;
1614 //===----------------------------------------------------------------------===//
1615 // Reciprocal Estimates
1616 //===----------------------------------------------------------------------===//
1618 /// Get the reciprocal estimate attribute string for a function that will
1619 /// override the target defaults.
1620 static StringRef getRecipEstimateForFunc(MachineFunction &MF) {
1621 const Function &F = MF.getFunction();
1622 return F.getFnAttribute("reciprocal-estimates").getValueAsString();
1625 /// Construct a string for the given reciprocal operation of the given type.
1626 /// This string should match the corresponding option to the front-end's
1627 /// "-mrecip" flag assuming those strings have been passed through in an
1628 /// attribute string. For example, "vec-divf" for a division of a vXf32.
1629 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
1630 std::string Name = VT.isVector() ? "vec-" : "";
1632 Name += IsSqrt ? "sqrt" : "div";
1634 // TODO: Handle "half" or other float types?
1635 if (VT.getScalarType() == MVT::f64) {
1638 assert(VT.getScalarType() == MVT::f32 &&
1639 "Unexpected FP type for reciprocal estimate");
1646 /// Return the character position and value (a single numeric character) of a
1647 /// customized refinement operation in the input string if it exists. Return
1648 /// false if there is no customized refinement step count.
1649 static bool parseRefinementStep(StringRef In, size_t &Position,
1651 const char RefStepToken = ':';
1652 Position = In.find(RefStepToken);
1653 if (Position == StringRef::npos)
1656 StringRef RefStepString = In.substr(Position + 1);
1657 // Allow exactly one numeric character for the additional refinement
1659 if (RefStepString.size() == 1) {
1660 char RefStepChar = RefStepString[0];
1661 if (RefStepChar >= '0' && RefStepChar <= '9') {
1662 Value = RefStepChar - '0';
1666 report_fatal_error("Invalid refinement step for -recip.");
1669 /// For the input attribute string, return one of the ReciprocalEstimate enum
1670 /// status values (enabled, disabled, or not specified) for this operation on
1671 /// the specified data type.
1672 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
1673 if (Override.empty())
1674 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1676 SmallVector<StringRef, 4> OverrideVector;
1677 SplitString(Override, OverrideVector, ",");
1678 unsigned NumArgs = OverrideVector.size();
1680 // Check if "all", "none", or "default" was specified.
1682 // Look for an optional setting of the number of refinement steps needed
1683 // for this type of reciprocal operation.
1686 if (parseRefinementStep(Override, RefPos, RefSteps)) {
1687 // Split the string for further processing.
1688 Override = Override.substr(0, RefPos);
1691 // All reciprocal types are enabled.
1692 if (Override == "all")
1693 return TargetLoweringBase::ReciprocalEstimate::Enabled;
1695 // All reciprocal types are disabled.
1696 if (Override == "none")
1697 return TargetLoweringBase::ReciprocalEstimate::Disabled;
1699 // Target defaults for enablement are used.
1700 if (Override == "default")
1701 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1704 // The attribute string may omit the size suffix ('f'/'d').
1705 std::string VTName = getReciprocalOpName(IsSqrt, VT);
1706 std::string VTNameNoSize = VTName;
1707 VTNameNoSize.pop_back();
1708 static const char DisabledPrefix = '!';
1710 for (StringRef RecipType : OverrideVector) {
1713 if (parseRefinementStep(RecipType, RefPos, RefSteps))
1714 RecipType = RecipType.substr(0, RefPos);
1716 // Ignore the disablement token for string matching.
1717 bool IsDisabled = RecipType[0] == DisabledPrefix;
1719 RecipType = RecipType.substr(1);
1721 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
1722 return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled
1723 : TargetLoweringBase::ReciprocalEstimate::Enabled;
1726 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1729 /// For the input attribute string, return the customized refinement step count
1730 /// for this operation on the specified data type. If the step count does not
1731 /// exist, return the ReciprocalEstimate enum value for unspecified.
1732 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
1733 if (Override.empty())
1734 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1736 SmallVector<StringRef, 4> OverrideVector;
1737 SplitString(Override, OverrideVector, ",");
1738 unsigned NumArgs = OverrideVector.size();
1740 // Check if "all", "default", or "none" was specified.
1742 // Look for an optional setting of the number of refinement steps needed
1743 // for this type of reciprocal operation.
1746 if (!parseRefinementStep(Override, RefPos, RefSteps))
1747 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1749 // Split the string for further processing.
1750 Override = Override.substr(0, RefPos);
1751 assert(Override != "none" &&
1752 "Disabled reciprocals, but specifed refinement steps?");
1754 // If this is a general override, return the specified number of steps.
1755 if (Override == "all" || Override == "default")
1759 // The attribute string may omit the size suffix ('f'/'d').
1760 std::string VTName = getReciprocalOpName(IsSqrt, VT);
1761 std::string VTNameNoSize = VTName;
1762 VTNameNoSize.pop_back();
1764 for (StringRef RecipType : OverrideVector) {
1767 if (!parseRefinementStep(RecipType, RefPos, RefSteps))
1770 RecipType = RecipType.substr(0, RefPos);
1771 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
1775 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1778 int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT,
1779 MachineFunction &MF) const {
1780 return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
1783 int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT,
1784 MachineFunction &MF) const {
1785 return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
1788 int TargetLoweringBase::getSqrtRefinementSteps(EVT VT,
1789 MachineFunction &MF) const {
1790 return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
1793 int TargetLoweringBase::getDivRefinementSteps(EVT VT,
1794 MachineFunction &MF) const {
1795 return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
1798 void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const {
1799 MF.getRegInfo().freezeReservedRegs(MF);