1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the TargetLoweringBase class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/ADT/BitVector.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringExtras.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/ADT/Triple.h"
20 #include "llvm/ADT/Twine.h"
21 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/CodeGen/ISDOpcodes.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/RuntimeLibcalls.h"
32 #include "llvm/CodeGen/StackMaps.h"
33 #include "llvm/CodeGen/TargetLowering.h"
34 #include "llvm/CodeGen/TargetOpcodes.h"
35 #include "llvm/CodeGen/TargetRegisterInfo.h"
36 #include "llvm/CodeGen/ValueTypes.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/CallingConv.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DerivedTypes.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/GlobalValue.h"
43 #include "llvm/IR/GlobalVariable.h"
44 #include "llvm/IR/IRBuilder.h"
45 #include "llvm/IR/Module.h"
46 #include "llvm/IR/Type.h"
47 #include "llvm/Support/BranchProbability.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Compiler.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MachineValueType.h"
53 #include "llvm/Support/MathExtras.h"
54 #include "llvm/Target/TargetMachine.h"
67 static cl::opt<bool> JumpIsExpensiveOverride(
68 "jump-is-expensive", cl::init(false),
69 cl::desc("Do not create extra branches to split comparison logic."),
72 static cl::opt<unsigned> MinimumJumpTableEntries
73 ("min-jump-table-entries", cl::init(4), cl::Hidden,
74 cl::desc("Set minimum number of entries to use a jump table."));
76 static cl::opt<unsigned> MaximumJumpTableSize
77 ("max-jump-table-size", cl::init(0), cl::Hidden,
78 cl::desc("Set maximum size of jump tables; zero for no limit."));
80 /// Minimum jump table density for normal functions.
81 static cl::opt<unsigned>
82 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
83 cl::desc("Minimum density for building a jump table in "
84 "a normal function"));
86 /// Minimum jump table density for -Os or -Oz functions.
87 static cl::opt<unsigned> OptsizeJumpTableDensity(
88 "optsize-jump-table-density", cl::init(40), cl::Hidden,
89 cl::desc("Minimum density for building a jump table in "
90 "an optsize function"));
92 static bool darwinHasSinCos(const Triple &TT) {
93 assert(TT.isOSDarwin() && "should be called with darwin triple");
94 // Don't bother with 32 bit x86.
95 if (TT.getArch() == Triple::x86)
97 // Macos < 10.9 has no sincos_stret.
99 return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit();
100 // iOS < 7.0 has no sincos_stret.
102 return !TT.isOSVersionLT(7, 0);
103 // Any other darwin such as WatchOS/TvOS is new enough.
107 // Although this default value is arbitrary, it is not random. It is assumed
108 // that a condition that evaluates the same way by a higher percentage than this
109 // is best represented as control flow. Therefore, the default value N should be
110 // set such that the win from N% correct executions is greater than the loss
111 // from (100 - N)% mispredicted executions for the majority of intended targets.
112 static cl::opt<int> MinPercentageForPredictableBranch(
113 "min-predictable-branch", cl::init(99),
114 cl::desc("Minimum percentage (0-100) that a condition must be either true "
115 "or false to assume that the condition is predictable"),
118 void TargetLoweringBase::InitLibcalls(const Triple &TT) {
119 #define HANDLE_LIBCALL(code, name) \
120 setLibcallName(RTLIB::code, name);
121 #include "llvm/IR/RuntimeLibcalls.def"
122 #undef HANDLE_LIBCALL
123 // Initialize calling conventions to their default.
124 for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
125 setLibcallCallingConv((RTLIB::Libcall)LC, CallingConv::C);
127 // A few names are different on particular architectures or environments.
128 if (TT.isOSDarwin()) {
129 // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
130 // of the gnueabi-style __gnu_*_ieee.
131 // FIXME: What about other targets?
132 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
133 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
135 // Some darwins have an optimized __bzero/bzero function.
136 switch (TT.getArch()) {
139 if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6))
140 setLibcallName(RTLIB::BZERO, "__bzero");
142 case Triple::aarch64:
143 setLibcallName(RTLIB::BZERO, "bzero");
149 if (darwinHasSinCos(TT)) {
150 setLibcallName(RTLIB::SINCOS_STRET_F32, "__sincosf_stret");
151 setLibcallName(RTLIB::SINCOS_STRET_F64, "__sincos_stret");
152 if (TT.isWatchABI()) {
153 setLibcallCallingConv(RTLIB::SINCOS_STRET_F32,
154 CallingConv::ARM_AAPCS_VFP);
155 setLibcallCallingConv(RTLIB::SINCOS_STRET_F64,
156 CallingConv::ARM_AAPCS_VFP);
160 setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee");
161 setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee");
164 if (TT.isGNUEnvironment() || TT.isOSFuchsia() ||
165 (TT.isAndroid() && !TT.isAndroidVersionLT(9))) {
166 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
167 setLibcallName(RTLIB::SINCOS_F64, "sincos");
168 setLibcallName(RTLIB::SINCOS_F80, "sincosl");
169 setLibcallName(RTLIB::SINCOS_F128, "sincosl");
170 setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl");
173 if (TT.isOSOpenBSD()) {
174 setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr);
178 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
179 /// UNKNOWN_LIBCALL if there is none.
180 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
181 if (OpVT == MVT::f16) {
182 if (RetVT == MVT::f32)
183 return FPEXT_F16_F32;
184 } else if (OpVT == MVT::f32) {
185 if (RetVT == MVT::f64)
186 return FPEXT_F32_F64;
187 if (RetVT == MVT::f128)
188 return FPEXT_F32_F128;
189 if (RetVT == MVT::ppcf128)
190 return FPEXT_F32_PPCF128;
191 } else if (OpVT == MVT::f64) {
192 if (RetVT == MVT::f128)
193 return FPEXT_F64_F128;
194 else if (RetVT == MVT::ppcf128)
195 return FPEXT_F64_PPCF128;
196 } else if (OpVT == MVT::f80) {
197 if (RetVT == MVT::f128)
198 return FPEXT_F80_F128;
201 return UNKNOWN_LIBCALL;
204 /// getFPROUND - Return the FPROUND_*_* value for the given types, or
205 /// UNKNOWN_LIBCALL if there is none.
206 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
207 if (RetVT == MVT::f16) {
208 if (OpVT == MVT::f32)
209 return FPROUND_F32_F16;
210 if (OpVT == MVT::f64)
211 return FPROUND_F64_F16;
212 if (OpVT == MVT::f80)
213 return FPROUND_F80_F16;
214 if (OpVT == MVT::f128)
215 return FPROUND_F128_F16;
216 if (OpVT == MVT::ppcf128)
217 return FPROUND_PPCF128_F16;
218 } else if (RetVT == MVT::f32) {
219 if (OpVT == MVT::f64)
220 return FPROUND_F64_F32;
221 if (OpVT == MVT::f80)
222 return FPROUND_F80_F32;
223 if (OpVT == MVT::f128)
224 return FPROUND_F128_F32;
225 if (OpVT == MVT::ppcf128)
226 return FPROUND_PPCF128_F32;
227 } else if (RetVT == MVT::f64) {
228 if (OpVT == MVT::f80)
229 return FPROUND_F80_F64;
230 if (OpVT == MVT::f128)
231 return FPROUND_F128_F64;
232 if (OpVT == MVT::ppcf128)
233 return FPROUND_PPCF128_F64;
234 } else if (RetVT == MVT::f80) {
235 if (OpVT == MVT::f128)
236 return FPROUND_F128_F80;
239 return UNKNOWN_LIBCALL;
242 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
243 /// UNKNOWN_LIBCALL if there is none.
244 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
245 if (OpVT == MVT::f32) {
246 if (RetVT == MVT::i32)
247 return FPTOSINT_F32_I32;
248 if (RetVT == MVT::i64)
249 return FPTOSINT_F32_I64;
250 if (RetVT == MVT::i128)
251 return FPTOSINT_F32_I128;
252 } else if (OpVT == MVT::f64) {
253 if (RetVT == MVT::i32)
254 return FPTOSINT_F64_I32;
255 if (RetVT == MVT::i64)
256 return FPTOSINT_F64_I64;
257 if (RetVT == MVT::i128)
258 return FPTOSINT_F64_I128;
259 } else if (OpVT == MVT::f80) {
260 if (RetVT == MVT::i32)
261 return FPTOSINT_F80_I32;
262 if (RetVT == MVT::i64)
263 return FPTOSINT_F80_I64;
264 if (RetVT == MVT::i128)
265 return FPTOSINT_F80_I128;
266 } else if (OpVT == MVT::f128) {
267 if (RetVT == MVT::i32)
268 return FPTOSINT_F128_I32;
269 if (RetVT == MVT::i64)
270 return FPTOSINT_F128_I64;
271 if (RetVT == MVT::i128)
272 return FPTOSINT_F128_I128;
273 } else if (OpVT == MVT::ppcf128) {
274 if (RetVT == MVT::i32)
275 return FPTOSINT_PPCF128_I32;
276 if (RetVT == MVT::i64)
277 return FPTOSINT_PPCF128_I64;
278 if (RetVT == MVT::i128)
279 return FPTOSINT_PPCF128_I128;
281 return UNKNOWN_LIBCALL;
284 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
285 /// UNKNOWN_LIBCALL if there is none.
286 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
287 if (OpVT == MVT::f32) {
288 if (RetVT == MVT::i32)
289 return FPTOUINT_F32_I32;
290 if (RetVT == MVT::i64)
291 return FPTOUINT_F32_I64;
292 if (RetVT == MVT::i128)
293 return FPTOUINT_F32_I128;
294 } else if (OpVT == MVT::f64) {
295 if (RetVT == MVT::i32)
296 return FPTOUINT_F64_I32;
297 if (RetVT == MVT::i64)
298 return FPTOUINT_F64_I64;
299 if (RetVT == MVT::i128)
300 return FPTOUINT_F64_I128;
301 } else if (OpVT == MVT::f80) {
302 if (RetVT == MVT::i32)
303 return FPTOUINT_F80_I32;
304 if (RetVT == MVT::i64)
305 return FPTOUINT_F80_I64;
306 if (RetVT == MVT::i128)
307 return FPTOUINT_F80_I128;
308 } else if (OpVT == MVT::f128) {
309 if (RetVT == MVT::i32)
310 return FPTOUINT_F128_I32;
311 if (RetVT == MVT::i64)
312 return FPTOUINT_F128_I64;
313 if (RetVT == MVT::i128)
314 return FPTOUINT_F128_I128;
315 } else if (OpVT == MVT::ppcf128) {
316 if (RetVT == MVT::i32)
317 return FPTOUINT_PPCF128_I32;
318 if (RetVT == MVT::i64)
319 return FPTOUINT_PPCF128_I64;
320 if (RetVT == MVT::i128)
321 return FPTOUINT_PPCF128_I128;
323 return UNKNOWN_LIBCALL;
326 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
327 /// UNKNOWN_LIBCALL if there is none.
328 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
329 if (OpVT == MVT::i32) {
330 if (RetVT == MVT::f32)
331 return SINTTOFP_I32_F32;
332 if (RetVT == MVT::f64)
333 return SINTTOFP_I32_F64;
334 if (RetVT == MVT::f80)
335 return SINTTOFP_I32_F80;
336 if (RetVT == MVT::f128)
337 return SINTTOFP_I32_F128;
338 if (RetVT == MVT::ppcf128)
339 return SINTTOFP_I32_PPCF128;
340 } else if (OpVT == MVT::i64) {
341 if (RetVT == MVT::f32)
342 return SINTTOFP_I64_F32;
343 if (RetVT == MVT::f64)
344 return SINTTOFP_I64_F64;
345 if (RetVT == MVT::f80)
346 return SINTTOFP_I64_F80;
347 if (RetVT == MVT::f128)
348 return SINTTOFP_I64_F128;
349 if (RetVT == MVT::ppcf128)
350 return SINTTOFP_I64_PPCF128;
351 } else if (OpVT == MVT::i128) {
352 if (RetVT == MVT::f32)
353 return SINTTOFP_I128_F32;
354 if (RetVT == MVT::f64)
355 return SINTTOFP_I128_F64;
356 if (RetVT == MVT::f80)
357 return SINTTOFP_I128_F80;
358 if (RetVT == MVT::f128)
359 return SINTTOFP_I128_F128;
360 if (RetVT == MVT::ppcf128)
361 return SINTTOFP_I128_PPCF128;
363 return UNKNOWN_LIBCALL;
366 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
367 /// UNKNOWN_LIBCALL if there is none.
368 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
369 if (OpVT == MVT::i32) {
370 if (RetVT == MVT::f32)
371 return UINTTOFP_I32_F32;
372 if (RetVT == MVT::f64)
373 return UINTTOFP_I32_F64;
374 if (RetVT == MVT::f80)
375 return UINTTOFP_I32_F80;
376 if (RetVT == MVT::f128)
377 return UINTTOFP_I32_F128;
378 if (RetVT == MVT::ppcf128)
379 return UINTTOFP_I32_PPCF128;
380 } else if (OpVT == MVT::i64) {
381 if (RetVT == MVT::f32)
382 return UINTTOFP_I64_F32;
383 if (RetVT == MVT::f64)
384 return UINTTOFP_I64_F64;
385 if (RetVT == MVT::f80)
386 return UINTTOFP_I64_F80;
387 if (RetVT == MVT::f128)
388 return UINTTOFP_I64_F128;
389 if (RetVT == MVT::ppcf128)
390 return UINTTOFP_I64_PPCF128;
391 } else if (OpVT == MVT::i128) {
392 if (RetVT == MVT::f32)
393 return UINTTOFP_I128_F32;
394 if (RetVT == MVT::f64)
395 return UINTTOFP_I128_F64;
396 if (RetVT == MVT::f80)
397 return UINTTOFP_I128_F80;
398 if (RetVT == MVT::f128)
399 return UINTTOFP_I128_F128;
400 if (RetVT == MVT::ppcf128)
401 return UINTTOFP_I128_PPCF128;
403 return UNKNOWN_LIBCALL;
406 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
407 #define OP_TO_LIBCALL(Name, Enum) \
409 switch (VT.SimpleTy) { \
411 return UNKNOWN_LIBCALL; \
425 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
426 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
427 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
428 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
429 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
430 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
431 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
432 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
433 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
434 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
435 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
436 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
441 return UNKNOWN_LIBCALL;
444 RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
445 switch (ElementSize) {
447 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
449 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
451 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
453 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
455 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
457 return UNKNOWN_LIBCALL;
461 RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
462 switch (ElementSize) {
464 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
466 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
468 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
470 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
472 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
474 return UNKNOWN_LIBCALL;
478 RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
479 switch (ElementSize) {
481 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
483 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
485 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
487 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
489 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
491 return UNKNOWN_LIBCALL;
495 /// InitCmpLibcallCCs - Set default comparison libcall CC.
496 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
497 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
498 CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
499 CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
500 CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
501 CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ;
502 CCs[RTLIB::UNE_F32] = ISD::SETNE;
503 CCs[RTLIB::UNE_F64] = ISD::SETNE;
504 CCs[RTLIB::UNE_F128] = ISD::SETNE;
505 CCs[RTLIB::UNE_PPCF128] = ISD::SETNE;
506 CCs[RTLIB::OGE_F32] = ISD::SETGE;
507 CCs[RTLIB::OGE_F64] = ISD::SETGE;
508 CCs[RTLIB::OGE_F128] = ISD::SETGE;
509 CCs[RTLIB::OGE_PPCF128] = ISD::SETGE;
510 CCs[RTLIB::OLT_F32] = ISD::SETLT;
511 CCs[RTLIB::OLT_F64] = ISD::SETLT;
512 CCs[RTLIB::OLT_F128] = ISD::SETLT;
513 CCs[RTLIB::OLT_PPCF128] = ISD::SETLT;
514 CCs[RTLIB::OLE_F32] = ISD::SETLE;
515 CCs[RTLIB::OLE_F64] = ISD::SETLE;
516 CCs[RTLIB::OLE_F128] = ISD::SETLE;
517 CCs[RTLIB::OLE_PPCF128] = ISD::SETLE;
518 CCs[RTLIB::OGT_F32] = ISD::SETGT;
519 CCs[RTLIB::OGT_F64] = ISD::SETGT;
520 CCs[RTLIB::OGT_F128] = ISD::SETGT;
521 CCs[RTLIB::OGT_PPCF128] = ISD::SETGT;
522 CCs[RTLIB::UO_F32] = ISD::SETNE;
523 CCs[RTLIB::UO_F64] = ISD::SETNE;
524 CCs[RTLIB::UO_F128] = ISD::SETNE;
525 CCs[RTLIB::UO_PPCF128] = ISD::SETNE;
526 CCs[RTLIB::O_F32] = ISD::SETEQ;
527 CCs[RTLIB::O_F64] = ISD::SETEQ;
528 CCs[RTLIB::O_F128] = ISD::SETEQ;
529 CCs[RTLIB::O_PPCF128] = ISD::SETEQ;
532 /// NOTE: The TargetMachine owns TLOF.
533 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
536 // Perform these initializations only once.
537 MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove =
538 MaxLoadsPerMemcmp = 8;
539 MaxGluedStoresPerMemcpy = 0;
540 MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize =
541 MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4;
542 UseUnderscoreSetJmp = false;
543 UseUnderscoreLongJmp = false;
544 HasMultipleConditionRegisters = false;
545 HasExtractBitsInsn = false;
546 JumpIsExpensive = JumpIsExpensiveOverride;
547 PredictableSelectIsExpensive = false;
548 EnableExtLdPromotion = false;
549 HasFloatingPointExceptions = true;
550 StackPointerRegisterToSaveRestore = 0;
551 BooleanContents = UndefinedBooleanContent;
552 BooleanFloatContents = UndefinedBooleanContent;
553 BooleanVectorContents = UndefinedBooleanContent;
554 SchedPreferenceInfo = Sched::ILP;
556 JumpBufAlignment = 0;
557 MinFunctionAlignment = 0;
558 PrefFunctionAlignment = 0;
559 PrefLoopAlignment = 0;
560 GatherAllAliasesMaxDepth = 18;
561 MinStackArgumentAlignment = 1;
562 // TODO: the default will be switched to 0 in the next commit, along
563 // with the Target-specific changes necessary.
564 MaxAtomicSizeInBitsSupported = 1024;
566 MinCmpXchgSizeInBits = 0;
567 SupportsUnalignedAtomics = false;
569 std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
571 InitLibcalls(TM.getTargetTriple());
572 InitCmpLibcallCCs(CmpLibcallCCs);
575 void TargetLoweringBase::initActions() {
576 // All operations default to being supported.
577 memset(OpActions, 0, sizeof(OpActions));
578 memset(LoadExtActions, 0, sizeof(LoadExtActions));
579 memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
580 memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
581 memset(CondCodeActions, 0, sizeof(CondCodeActions));
582 std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
583 std::fill(std::begin(TargetDAGCombineArray),
584 std::end(TargetDAGCombineArray), 0);
586 // Set default actions for various operations.
587 for (MVT VT : MVT::all_valuetypes()) {
588 // Default all indexed load / store to expand.
589 for (unsigned IM = (unsigned)ISD::PRE_INC;
590 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
591 setIndexedLoadAction(IM, VT, Expand);
592 setIndexedStoreAction(IM, VT, Expand);
595 // Most backends expect to see the node which just returns the value loaded.
596 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand);
598 // These operations default to expand.
599 setOperationAction(ISD::FGETSIGN, VT, Expand);
600 setOperationAction(ISD::CONCAT_VECTORS, VT, Expand);
601 setOperationAction(ISD::FMINNUM, VT, Expand);
602 setOperationAction(ISD::FMAXNUM, VT, Expand);
603 setOperationAction(ISD::FMINNUM_IEEE, VT, Expand);
604 setOperationAction(ISD::FMAXNUM_IEEE, VT, Expand);
605 setOperationAction(ISD::FMINIMUM, VT, Expand);
606 setOperationAction(ISD::FMAXIMUM, VT, Expand);
607 setOperationAction(ISD::FMAD, VT, Expand);
608 setOperationAction(ISD::SMIN, VT, Expand);
609 setOperationAction(ISD::SMAX, VT, Expand);
610 setOperationAction(ISD::UMIN, VT, Expand);
611 setOperationAction(ISD::UMAX, VT, Expand);
612 setOperationAction(ISD::ABS, VT, Expand);
613 setOperationAction(ISD::FSHL, VT, Expand);
614 setOperationAction(ISD::FSHR, VT, Expand);
615 setOperationAction(ISD::SADDSAT, VT, Expand);
616 setOperationAction(ISD::UADDSAT, VT, Expand);
617 setOperationAction(ISD::SSUBSAT, VT, Expand);
618 setOperationAction(ISD::USUBSAT, VT, Expand);
619 setOperationAction(ISD::SMULFIX, VT, Expand);
621 // Overflow operations default to expand
622 setOperationAction(ISD::SADDO, VT, Expand);
623 setOperationAction(ISD::SSUBO, VT, Expand);
624 setOperationAction(ISD::UADDO, VT, Expand);
625 setOperationAction(ISD::USUBO, VT, Expand);
626 setOperationAction(ISD::SMULO, VT, Expand);
627 setOperationAction(ISD::UMULO, VT, Expand);
629 // ADDCARRY operations default to expand
630 setOperationAction(ISD::ADDCARRY, VT, Expand);
631 setOperationAction(ISD::SUBCARRY, VT, Expand);
632 setOperationAction(ISD::SETCCCARRY, VT, Expand);
634 // ADDC/ADDE/SUBC/SUBE default to expand.
635 setOperationAction(ISD::ADDC, VT, Expand);
636 setOperationAction(ISD::ADDE, VT, Expand);
637 setOperationAction(ISD::SUBC, VT, Expand);
638 setOperationAction(ISD::SUBE, VT, Expand);
640 // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
641 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
642 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
644 setOperationAction(ISD::BITREVERSE, VT, Expand);
646 // These library functions default to expand.
647 setOperationAction(ISD::FROUND, VT, Expand);
648 setOperationAction(ISD::FPOWI, VT, Expand);
650 // These operations default to expand for vector types.
652 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
653 setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, VT, Expand);
654 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Expand);
655 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Expand);
658 // For most targets @llvm.get.dynamic.area.offset just returns 0.
659 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand);
662 // Most targets ignore the @llvm.prefetch intrinsic.
663 setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
665 // Most targets also ignore the @llvm.readcyclecounter intrinsic.
666 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand);
668 // ConstantFP nodes default to expand. Targets can either change this to
669 // Legal, in which case all fp constants are legal, or use isFPImmLegal()
670 // to optimize expansions for certain constants.
671 setOperationAction(ISD::ConstantFP, MVT::f16, Expand);
672 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
673 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
674 setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
675 setOperationAction(ISD::ConstantFP, MVT::f128, Expand);
677 // These library functions default to expand.
678 for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
679 setOperationAction(ISD::FCBRT, VT, Expand);
680 setOperationAction(ISD::FLOG , VT, Expand);
681 setOperationAction(ISD::FLOG2, VT, Expand);
682 setOperationAction(ISD::FLOG10, VT, Expand);
683 setOperationAction(ISD::FEXP , VT, Expand);
684 setOperationAction(ISD::FEXP2, VT, Expand);
685 setOperationAction(ISD::FFLOOR, VT, Expand);
686 setOperationAction(ISD::FNEARBYINT, VT, Expand);
687 setOperationAction(ISD::FCEIL, VT, Expand);
688 setOperationAction(ISD::FRINT, VT, Expand);
689 setOperationAction(ISD::FTRUNC, VT, Expand);
690 setOperationAction(ISD::FROUND, VT, Expand);
693 // Default ISD::TRAP to expand (which turns it into abort).
694 setOperationAction(ISD::TRAP, MVT::Other, Expand);
696 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
697 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
698 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
701 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
703 return MVT::getIntegerVT(8 * DL.getPointerSize(0));
706 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
707 bool LegalTypes) const {
708 assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
709 if (LHSTy.isVector())
711 return LegalTypes ? getScalarShiftAmountTy(DL, LHSTy)
715 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
716 assert(isTypeLegal(VT));
728 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) {
729 // If the command-line option was specified, ignore this request.
730 if (!JumpIsExpensiveOverride.getNumOccurrences())
731 JumpIsExpensive = isExpensive;
734 TargetLoweringBase::LegalizeKind
735 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
736 // If this is a simple type, use the ComputeRegisterProp mechanism.
738 MVT SVT = VT.getSimpleVT();
739 assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
740 MVT NVT = TransformToType[SVT.SimpleTy];
741 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
743 assert((LA == TypeLegal || LA == TypeSoftenFloat ||
744 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger) &&
745 "Promote may not follow Expand or Promote");
747 if (LA == TypeSplitVector)
748 return LegalizeKind(LA,
749 EVT::getVectorVT(Context, SVT.getVectorElementType(),
750 SVT.getVectorNumElements() / 2));
751 if (LA == TypeScalarizeVector)
752 return LegalizeKind(LA, SVT.getVectorElementType());
753 return LegalizeKind(LA, NVT);
756 // Handle Extended Scalar Types.
757 if (!VT.isVector()) {
758 assert(VT.isInteger() && "Float types must be simple");
759 unsigned BitSize = VT.getSizeInBits();
760 // First promote to a power-of-two size, then expand if necessary.
761 if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
762 EVT NVT = VT.getRoundIntegerType(Context);
763 assert(NVT != VT && "Unable to round integer VT");
764 LegalizeKind NextStep = getTypeConversion(Context, NVT);
765 // Avoid multi-step promotion.
766 if (NextStep.first == TypePromoteInteger)
768 // Return rounded integer type.
769 return LegalizeKind(TypePromoteInteger, NVT);
772 return LegalizeKind(TypeExpandInteger,
773 EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
776 // Handle vector types.
777 unsigned NumElts = VT.getVectorNumElements();
778 EVT EltVT = VT.getVectorElementType();
780 // Vectors with only one element are always scalarized.
782 return LegalizeKind(TypeScalarizeVector, EltVT);
784 // Try to widen vector elements until the element type is a power of two and
785 // promote it to a legal type later on, for example:
786 // <3 x i8> -> <4 x i8> -> <4 x i32>
787 if (EltVT.isInteger()) {
788 // Vectors with a number of elements that is not a power of two are always
789 // widened, for example <3 x i8> -> <4 x i8>.
790 if (!VT.isPow2VectorType()) {
791 NumElts = (unsigned)NextPowerOf2(NumElts);
792 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
793 return LegalizeKind(TypeWidenVector, NVT);
796 // Examine the element type.
797 LegalizeKind LK = getTypeConversion(Context, EltVT);
799 // If type is to be expanded, split the vector.
800 // <4 x i140> -> <2 x i140>
801 if (LK.first == TypeExpandInteger)
802 return LegalizeKind(TypeSplitVector,
803 EVT::getVectorVT(Context, EltVT, NumElts / 2));
805 // Promote the integer element types until a legal vector type is found
806 // or until the element integer type is too big. If a legal type was not
807 // found, fallback to the usual mechanism of widening/splitting the
809 EVT OldEltVT = EltVT;
811 // Increase the bitwidth of the element to the next pow-of-two
812 // (which is greater than 8 bits).
813 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
814 .getRoundIntegerType(Context);
816 // Stop trying when getting a non-simple element type.
817 // Note that vector elements may be greater than legal vector element
818 // types. Example: X86 XMM registers hold 64bit element on 32bit
820 if (!EltVT.isSimple())
823 // Build a new vector type and check if it is legal.
824 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
825 // Found a legal promoted vector type.
826 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
827 return LegalizeKind(TypePromoteInteger,
828 EVT::getVectorVT(Context, EltVT, NumElts));
831 // Reset the type to the unexpanded type if we did not find a legal vector
832 // type with a promoted vector element type.
836 // Try to widen the vector until a legal type is found.
837 // If there is no wider legal type, split the vector.
839 // Round up to the next power of 2.
840 NumElts = (unsigned)NextPowerOf2(NumElts);
842 // If there is no simple vector type with this many elements then there
843 // cannot be a larger legal vector type. Note that this assumes that
844 // there are no skipped intermediate vector types in the simple types.
845 if (!EltVT.isSimple())
847 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
848 if (LargerVector == MVT())
851 // If this type is legal then widen the vector.
852 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
853 return LegalizeKind(TypeWidenVector, LargerVector);
856 // Widen odd vectors to next power of two.
857 if (!VT.isPow2VectorType()) {
858 EVT NVT = VT.getPow2VectorType(Context);
859 return LegalizeKind(TypeWidenVector, NVT);
862 // Vectors with illegal element types are expanded.
863 EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
864 return LegalizeKind(TypeSplitVector, NVT);
867 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
868 unsigned &NumIntermediates,
870 TargetLoweringBase *TLI) {
871 // Figure out the right, legal destination reg to copy into.
872 unsigned NumElts = VT.getVectorNumElements();
873 MVT EltTy = VT.getVectorElementType();
875 unsigned NumVectorRegs = 1;
877 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
878 // could break down into LHS/RHS like LegalizeDAG does.
879 if (!isPowerOf2_32(NumElts)) {
880 NumVectorRegs = NumElts;
884 // Divide the input until we get to a supported size. This will always
885 // end with a scalar if the target doesn't support vectors.
886 while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
891 NumIntermediates = NumVectorRegs;
893 MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
894 if (!TLI->isTypeLegal(NewVT))
896 IntermediateVT = NewVT;
898 unsigned NewVTSize = NewVT.getSizeInBits();
900 // Convert sizes such as i33 to i64.
901 if (!isPowerOf2_32(NewVTSize))
902 NewVTSize = NextPowerOf2(NewVTSize);
904 MVT DestVT = TLI->getRegisterType(NewVT);
906 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
907 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
909 // Otherwise, promotion or legal types use the same number of registers as
910 // the vector decimated to the appropriate level.
911 return NumVectorRegs;
914 /// isLegalRC - Return true if the value types that can be represented by the
915 /// specified register class are all legal.
916 bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI,
917 const TargetRegisterClass &RC) const {
918 for (auto I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
924 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
925 /// sequence of memory operands that is recognized by PrologEpilogInserter.
927 TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
928 MachineBasicBlock *MBB) const {
929 MachineInstr *MI = &InitialMI;
930 MachineFunction &MF = *MI->getMF();
931 MachineFrameInfo &MFI = MF.getFrameInfo();
933 // We're handling multiple types of operands here:
934 // PATCHPOINT MetaArgs - live-in, read only, direct
935 // STATEPOINT Deopt Spill - live-through, read only, indirect
936 // STATEPOINT Deopt Alloca - live-through, read only, direct
937 // (We're currently conservative and mark the deopt slots read/write in
939 // STATEPOINT GC Spill - live-through, read/write, indirect
940 // STATEPOINT GC Alloca - live-through, read/write, direct
941 // The live-in vs live-through is handled already (the live through ones are
942 // all stack slots), but we need to handle the different type of stackmap
943 // operands and memory effects here.
945 // MI changes inside this loop as we grow operands.
946 for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
947 MachineOperand &MO = MI->getOperand(OperIdx);
951 // foldMemoryOperand builds a new MI after replacing a single FI operand
952 // with the canonical set of five x86 addressing-mode operands.
953 int FI = MO.getIndex();
954 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
956 // Copy operands before the frame-index.
957 for (unsigned i = 0; i < OperIdx; ++i)
958 MIB.add(MI->getOperand(i));
959 // Add frame index operands recognized by stackmaps.cpp
960 if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
961 // indirect-mem-ref tag, size, #FI, offset.
962 // Used for spills inserted by StatepointLowering. This codepath is not
963 // used for patchpoints/stackmaps at all, for these spilling is done via
964 // foldMemoryOperand callback only.
965 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
966 MIB.addImm(StackMaps::IndirectMemRefOp);
967 MIB.addImm(MFI.getObjectSize(FI));
968 MIB.add(MI->getOperand(OperIdx));
971 // direct-mem-ref tag, #FI, offset.
972 // Used by patchpoint, and direct alloca arguments to statepoints
973 MIB.addImm(StackMaps::DirectMemRefOp);
974 MIB.add(MI->getOperand(OperIdx));
977 // Copy the operands after the frame index.
978 for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
979 MIB.add(MI->getOperand(i));
981 // Inherit previous memory operands.
982 MIB.cloneMemRefs(*MI);
983 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
985 // Add a new memory operand for this FI.
986 assert(MFI.getObjectOffset(FI) != -1);
988 auto Flags = MachineMemOperand::MOLoad;
989 if (MI->getOpcode() == TargetOpcode::STATEPOINT) {
990 Flags |= MachineMemOperand::MOStore;
991 Flags |= MachineMemOperand::MOVolatile;
993 MachineMemOperand *MMO = MF.getMachineMemOperand(
994 MachinePointerInfo::getFixedStack(MF, FI), Flags,
995 MF.getDataLayout().getPointerSize(), MFI.getObjectAlignment(FI));
996 MIB->addMemOperand(MF, MMO);
998 // Replace the instruction and update the operand index.
999 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1000 OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;
1001 MI->eraseFromParent();
1008 TargetLoweringBase::emitXRayCustomEvent(MachineInstr &MI,
1009 MachineBasicBlock *MBB) const {
1010 assert(MI.getOpcode() == TargetOpcode::PATCHABLE_EVENT_CALL &&
1011 "Called emitXRayCustomEvent on the wrong MI!");
1012 auto &MF = *MI.getMF();
1013 auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
1014 for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
1015 MIB.add(MI.getOperand(OpIdx));
1017 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1018 MI.eraseFromParent();
1023 TargetLoweringBase::emitXRayTypedEvent(MachineInstr &MI,
1024 MachineBasicBlock *MBB) const {
1025 assert(MI.getOpcode() == TargetOpcode::PATCHABLE_TYPED_EVENT_CALL &&
1026 "Called emitXRayTypedEvent on the wrong MI!");
1027 auto &MF = *MI.getMF();
1028 auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
1029 for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
1030 MIB.add(MI.getOperand(OpIdx));
1032 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1033 MI.eraseFromParent();
1037 /// findRepresentativeClass - Return the largest legal super-reg register class
1038 /// of the register class for the specified type and its associated "cost".
1039 // This function is in TargetLowering because it uses RegClassForVT which would
1040 // need to be moved to TargetRegisterInfo and would necessitate moving
1041 // isTypeLegal over as well - a massive change that would just require
1042 // TargetLowering having a TargetRegisterInfo class member that it would use.
1043 std::pair<const TargetRegisterClass *, uint8_t>
1044 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI,
1046 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1048 return std::make_pair(RC, 0);
1050 // Compute the set of all super-register classes.
1051 BitVector SuperRegRC(TRI->getNumRegClasses());
1052 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
1053 SuperRegRC.setBitsInMask(RCI.getMask());
1055 // Find the first legal register class with the largest spill size.
1056 const TargetRegisterClass *BestRC = RC;
1057 for (unsigned i : SuperRegRC.set_bits()) {
1058 const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
1059 // We want the largest possible spill size.
1060 if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
1062 if (!isLegalRC(*TRI, *SuperRC))
1066 return std::make_pair(BestRC, 1);
1069 /// computeRegisterProperties - Once all of the register classes are added,
1070 /// this allows us to compute derived properties we expose.
1071 void TargetLoweringBase::computeRegisterProperties(
1072 const TargetRegisterInfo *TRI) {
1073 static_assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE,
1074 "Too many value types for ValueTypeActions to hold!");
1076 // Everything defaults to needing one register.
1077 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1078 NumRegistersForVT[i] = 1;
1079 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1081 // ...except isVoid, which doesn't need any registers.
1082 NumRegistersForVT[MVT::isVoid] = 0;
1084 // Find the largest integer register class.
1085 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1086 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1087 assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1089 // Every integer value type larger than this largest register takes twice as
1090 // many registers to represent as the previous ValueType.
1091 for (unsigned ExpandedReg = LargestIntReg + 1;
1092 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1093 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1094 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1095 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1096 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
1100 // Inspect all of the ValueType's smaller than the largest integer
1101 // register to see which ones need promotion.
1102 unsigned LegalIntReg = LargestIntReg;
1103 for (unsigned IntReg = LargestIntReg - 1;
1104 IntReg >= (unsigned)MVT::i1; --IntReg) {
1105 MVT IVT = (MVT::SimpleValueType)IntReg;
1106 if (isTypeLegal(IVT)) {
1107 LegalIntReg = IntReg;
1109 RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1110 (MVT::SimpleValueType)LegalIntReg;
1111 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
1115 // ppcf128 type is really two f64's.
1116 if (!isTypeLegal(MVT::ppcf128)) {
1117 if (isTypeLegal(MVT::f64)) {
1118 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1119 RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1120 TransformToType[MVT::ppcf128] = MVT::f64;
1121 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
1123 NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
1124 RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
1125 TransformToType[MVT::ppcf128] = MVT::i128;
1126 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat);
1130 // Decide how to handle f128. If the target does not have native f128 support,
1131 // expand it to i128 and we will be generating soft float library calls.
1132 if (!isTypeLegal(MVT::f128)) {
1133 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1134 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1135 TransformToType[MVT::f128] = MVT::i128;
1136 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
1139 // Decide how to handle f64. If the target does not have native f64 support,
1140 // expand it to i64 and we will be generating soft float library calls.
1141 if (!isTypeLegal(MVT::f64)) {
1142 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1143 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1144 TransformToType[MVT::f64] = MVT::i64;
1145 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
1148 // Decide how to handle f32. If the target does not have native f32 support,
1149 // expand it to i32 and we will be generating soft float library calls.
1150 if (!isTypeLegal(MVT::f32)) {
1151 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1152 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1153 TransformToType[MVT::f32] = MVT::i32;
1154 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
1157 // Decide how to handle f16. If the target does not have native f16 support,
1158 // promote it to f32, because there are no f16 library calls (except for
1160 if (!isTypeLegal(MVT::f16)) {
1161 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1162 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1163 TransformToType[MVT::f16] = MVT::f32;
1164 ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
1167 // Loop over all of the vector value types to see which need transformations.
1168 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1169 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1170 MVT VT = (MVT::SimpleValueType) i;
1171 if (isTypeLegal(VT))
1174 MVT EltVT = VT.getVectorElementType();
1175 unsigned NElts = VT.getVectorNumElements();
1176 bool IsLegalWiderType = false;
1177 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1178 switch (PreferredAction) {
1179 case TypePromoteInteger:
1180 // Try to promote the elements of integer vectors. If no legal
1181 // promotion was found, fall through to the widen-vector method.
1182 for (unsigned nVT = i + 1; nVT <= MVT::LAST_INTEGER_VECTOR_VALUETYPE; ++nVT) {
1183 MVT SVT = (MVT::SimpleValueType) nVT;
1184 // Promote vectors of integers to vectors with the same number
1185 // of elements, with a wider element type.
1186 if (SVT.getScalarSizeInBits() > EltVT.getSizeInBits() &&
1187 SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)) {
1188 TransformToType[i] = SVT;
1189 RegisterTypeForVT[i] = SVT;
1190 NumRegistersForVT[i] = 1;
1191 ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
1192 IsLegalWiderType = true;
1196 if (IsLegalWiderType)
1200 case TypeWidenVector:
1201 // Try to widen the vector.
1202 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1203 MVT SVT = (MVT::SimpleValueType) nVT;
1204 if (SVT.getVectorElementType() == EltVT
1205 && SVT.getVectorNumElements() > NElts && isTypeLegal(SVT)) {
1206 TransformToType[i] = SVT;
1207 RegisterTypeForVT[i] = SVT;
1208 NumRegistersForVT[i] = 1;
1209 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1210 IsLegalWiderType = true;
1214 if (IsLegalWiderType)
1218 case TypeSplitVector:
1219 case TypeScalarizeVector: {
1222 unsigned NumIntermediates;
1223 NumRegistersForVT[i] = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1224 NumIntermediates, RegisterVT, this);
1225 RegisterTypeForVT[i] = RegisterVT;
1227 MVT NVT = VT.getPow2VectorType();
1229 // Type is already a power of 2. The default action is to split.
1230 TransformToType[i] = MVT::Other;
1231 if (PreferredAction == TypeScalarizeVector)
1232 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
1233 else if (PreferredAction == TypeSplitVector)
1234 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1236 // Set type action according to the number of elements.
1237 ValueTypeActions.setTypeAction(VT, NElts == 1 ? TypeScalarizeVector
1240 TransformToType[i] = NVT;
1241 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1246 llvm_unreachable("Unknown vector legalization action!");
1250 // Determine the 'representative' register class for each value type.
1251 // An representative register class is the largest (meaning one which is
1252 // not a sub-register class / subreg register class) legal register class for
1253 // a group of value types. For example, on i386, i8, i16, and i32
1254 // representative would be GR32; while on x86_64 it's GR64.
1255 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1256 const TargetRegisterClass* RRC;
1258 std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
1259 RepRegClassForVT[i] = RRC;
1260 RepRegClassCostForVT[i] = Cost;
1264 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1266 assert(!VT.isVector() && "No default SetCC type for vectors!");
1267 return getPointerTy(DL).SimpleTy;
1270 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const {
1271 return MVT::i32; // return the default value
1274 /// getVectorTypeBreakdown - Vector types are broken down into some number of
1275 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
1276 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1277 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1279 /// This method returns the number of registers needed, and the VT for each
1280 /// register. It also returns the VT and quantity of the intermediate values
1281 /// before they are promoted/expanded.
1282 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
1283 EVT &IntermediateVT,
1284 unsigned &NumIntermediates,
1285 MVT &RegisterVT) const {
1286 unsigned NumElts = VT.getVectorNumElements();
1288 // If there is a wider vector type with the same element type as this one,
1289 // or a promoted vector type that has the same number of elements which
1290 // are wider, then we should convert to that legal vector type.
1291 // This handles things like <2 x float> -> <4 x float> and
1292 // <4 x i1> -> <4 x i32>.
1293 LegalizeTypeAction TA = getTypeAction(Context, VT);
1294 if (NumElts != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1295 EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1296 if (isTypeLegal(RegisterEVT)) {
1297 IntermediateVT = RegisterEVT;
1298 RegisterVT = RegisterEVT.getSimpleVT();
1299 NumIntermediates = 1;
1304 // Figure out the right, legal destination reg to copy into.
1305 EVT EltTy = VT.getVectorElementType();
1307 unsigned NumVectorRegs = 1;
1309 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
1310 // could break down into LHS/RHS like LegalizeDAG does.
1311 if (!isPowerOf2_32(NumElts)) {
1312 NumVectorRegs = NumElts;
1316 // Divide the input until we get to a supported size. This will always
1317 // end with a scalar if the target doesn't support vectors.
1318 while (NumElts > 1 && !isTypeLegal(
1319 EVT::getVectorVT(Context, EltTy, NumElts))) {
1321 NumVectorRegs <<= 1;
1324 NumIntermediates = NumVectorRegs;
1326 EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
1327 if (!isTypeLegal(NewVT))
1329 IntermediateVT = NewVT;
1331 MVT DestVT = getRegisterType(Context, NewVT);
1332 RegisterVT = DestVT;
1333 unsigned NewVTSize = NewVT.getSizeInBits();
1335 // Convert sizes such as i33 to i64.
1336 if (!isPowerOf2_32(NewVTSize))
1337 NewVTSize = NextPowerOf2(NewVTSize);
1339 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1340 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1342 // Otherwise, promotion or legal types use the same number of registers as
1343 // the vector decimated to the appropriate level.
1344 return NumVectorRegs;
1347 /// Get the EVTs and ArgFlags collections that represent the legalized return
1348 /// type of the given function. This does not require a DAG or a return value,
1349 /// and is suitable for use before any DAGs for the function are constructed.
1350 /// TODO: Move this out of TargetLowering.cpp.
1351 void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType,
1353 SmallVectorImpl<ISD::OutputArg> &Outs,
1354 const TargetLowering &TLI, const DataLayout &DL) {
1355 SmallVector<EVT, 4> ValueVTs;
1356 ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1357 unsigned NumValues = ValueVTs.size();
1358 if (NumValues == 0) return;
1360 for (unsigned j = 0, f = NumValues; j != f; ++j) {
1361 EVT VT = ValueVTs[j];
1362 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1364 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1365 ExtendKind = ISD::SIGN_EXTEND;
1366 else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1367 ExtendKind = ISD::ZERO_EXTEND;
1369 // FIXME: C calling convention requires the return type to be promoted to
1370 // at least 32-bit. But this is not necessary for non-C calling
1371 // conventions. The frontend should mark functions whose return values
1372 // require promoting with signext or zeroext attributes.
1373 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
1374 MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
1375 if (VT.bitsLT(MinVT))
1380 TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT);
1382 TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT);
1384 // 'inreg' on function refers to return value
1385 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1386 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::InReg))
1389 // Propagate extension type if any
1390 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1392 else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1395 for (unsigned i = 0; i < NumParts; ++i)
1396 Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isFixed=*/true, 0, 0));
1400 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1401 /// function arguments in the caller parameter area. This is the actual
1402 /// alignment, not its logarithm.
1403 unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty,
1404 const DataLayout &DL) const {
1405 return DL.getABITypeAlignment(Ty);
1408 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1409 const DataLayout &DL, EVT VT,
1413 // Check if the specified alignment is sufficient based on the data layout.
1414 // TODO: While using the data layout works in practice, a better solution
1415 // would be to implement this check directly (make this a virtual function).
1416 // For example, the ABI alignment may change based on software platform while
1417 // this function should only be affected by hardware implementation.
1418 Type *Ty = VT.getTypeForEVT(Context);
1419 if (Alignment >= DL.getABITypeAlignment(Ty)) {
1420 // Assume that an access that meets the ABI-specified alignment is fast.
1421 if (Fast != nullptr)
1426 // This is a misaligned access.
1427 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
1430 BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const {
1431 return BranchProbability(MinPercentageForPredictableBranch, 100);
1434 //===----------------------------------------------------------------------===//
1435 // TargetTransformInfo Helpers
1436 //===----------------------------------------------------------------------===//
1438 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
1439 enum InstructionOpcodes {
1440 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1441 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1442 #include "llvm/IR/Instruction.def"
1444 switch (static_cast<InstructionOpcodes>(Opcode)) {
1447 case Switch: return 0;
1448 case IndirectBr: return 0;
1449 case Invoke: return 0;
1450 case Resume: return 0;
1451 case Unreachable: return 0;
1452 case CleanupRet: return 0;
1453 case CatchRet: return 0;
1454 case CatchPad: return 0;
1455 case CatchSwitch: return 0;
1456 case CleanupPad: return 0;
1457 case FNeg: return ISD::FNEG;
1458 case Add: return ISD::ADD;
1459 case FAdd: return ISD::FADD;
1460 case Sub: return ISD::SUB;
1461 case FSub: return ISD::FSUB;
1462 case Mul: return ISD::MUL;
1463 case FMul: return ISD::FMUL;
1464 case UDiv: return ISD::UDIV;
1465 case SDiv: return ISD::SDIV;
1466 case FDiv: return ISD::FDIV;
1467 case URem: return ISD::UREM;
1468 case SRem: return ISD::SREM;
1469 case FRem: return ISD::FREM;
1470 case Shl: return ISD::SHL;
1471 case LShr: return ISD::SRL;
1472 case AShr: return ISD::SRA;
1473 case And: return ISD::AND;
1474 case Or: return ISD::OR;
1475 case Xor: return ISD::XOR;
1476 case Alloca: return 0;
1477 case Load: return ISD::LOAD;
1478 case Store: return ISD::STORE;
1479 case GetElementPtr: return 0;
1480 case Fence: return 0;
1481 case AtomicCmpXchg: return 0;
1482 case AtomicRMW: return 0;
1483 case Trunc: return ISD::TRUNCATE;
1484 case ZExt: return ISD::ZERO_EXTEND;
1485 case SExt: return ISD::SIGN_EXTEND;
1486 case FPToUI: return ISD::FP_TO_UINT;
1487 case FPToSI: return ISD::FP_TO_SINT;
1488 case UIToFP: return ISD::UINT_TO_FP;
1489 case SIToFP: return ISD::SINT_TO_FP;
1490 case FPTrunc: return ISD::FP_ROUND;
1491 case FPExt: return ISD::FP_EXTEND;
1492 case PtrToInt: return ISD::BITCAST;
1493 case IntToPtr: return ISD::BITCAST;
1494 case BitCast: return ISD::BITCAST;
1495 case AddrSpaceCast: return ISD::ADDRSPACECAST;
1496 case ICmp: return ISD::SETCC;
1497 case FCmp: return ISD::SETCC;
1499 case Call: return 0;
1500 case Select: return ISD::SELECT;
1501 case UserOp1: return 0;
1502 case UserOp2: return 0;
1503 case VAArg: return 0;
1504 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
1505 case InsertElement: return ISD::INSERT_VECTOR_ELT;
1506 case ShuffleVector: return ISD::VECTOR_SHUFFLE;
1507 case ExtractValue: return ISD::MERGE_VALUES;
1508 case InsertValue: return ISD::MERGE_VALUES;
1509 case LandingPad: return 0;
1512 llvm_unreachable("Unknown instruction type encountered!");
1516 TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL,
1518 LLVMContext &C = Ty->getContext();
1519 EVT MTy = getValueType(DL, Ty);
1522 // We keep legalizing the type until we find a legal kind. We assume that
1523 // the only operation that costs anything is the split. After splitting
1524 // we need to handle two types.
1526 LegalizeKind LK = getTypeConversion(C, MTy);
1528 if (LK.first == TypeLegal)
1529 return std::make_pair(Cost, MTy.getSimpleVT());
1531 if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
1534 // Do not loop with f128 type.
1535 if (MTy == LK.second)
1536 return std::make_pair(Cost, MTy.getSimpleVT());
1538 // Keep legalizing the type.
1543 Value *TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
1544 bool UseTLS) const {
1545 // compiler-rt provides a variable with a magic name. Targets that do not
1546 // link with compiler-rt may also provide such a variable.
1547 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1548 const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
1549 auto UnsafeStackPtr =
1550 dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
1552 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1554 if (!UnsafeStackPtr) {
1555 auto TLSModel = UseTLS ?
1556 GlobalValue::InitialExecTLSModel :
1557 GlobalValue::NotThreadLocal;
1558 // The global variable is not defined yet, define it ourselves.
1559 // We use the initial-exec TLS model because we do not support the
1560 // variable living anywhere other than in the main executable.
1561 UnsafeStackPtr = new GlobalVariable(
1562 *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
1563 UnsafeStackPtrVar, nullptr, TLSModel);
1565 // The variable exists, check its type and attributes.
1566 if (UnsafeStackPtr->getValueType() != StackPtrTy)
1567 report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
1568 if (UseTLS != UnsafeStackPtr->isThreadLocal())
1569 report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
1570 (UseTLS ? "" : "not ") + "be thread-local");
1572 return UnsafeStackPtr;
1575 Value *TargetLoweringBase::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
1576 if (!TM.getTargetTriple().isAndroid())
1577 return getDefaultSafeStackPointerLocation(IRB, true);
1579 // Android provides a libc function to retrieve the address of the current
1580 // thread's unsafe stack pointer.
1581 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1582 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1583 Value *Fn = M->getOrInsertFunction("__safestack_pointer_address",
1584 StackPtrTy->getPointerTo(0));
1585 return IRB.CreateCall(Fn);
1588 //===----------------------------------------------------------------------===//
1589 // Loop Strength Reduction hooks
1590 //===----------------------------------------------------------------------===//
1592 /// isLegalAddressingMode - Return true if the addressing mode represented
1593 /// by AM is legal for this target, for a load/store of the specified type.
1594 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
1595 const AddrMode &AM, Type *Ty,
1596 unsigned AS, Instruction *I) const {
1597 // The default implementation of this implements a conservative RISCy, r+r and
1600 // Allows a sign-extended 16-bit immediate field.
1601 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
1604 // No global is ever allowed as a base.
1608 // Only support r+r,
1610 case 0: // "r+i" or just "i", depending on HasBaseReg.
1613 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
1615 // Otherwise we have r+r or r+i.
1618 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
1620 // Allow 2*r as r+r.
1622 default: // Don't allow n * r
1629 //===----------------------------------------------------------------------===//
1631 //===----------------------------------------------------------------------===//
1633 // For OpenBSD return its special guard variable. Otherwise return nullptr,
1634 // so that SelectionDAG handle SSP.
1635 Value *TargetLoweringBase::getIRStackGuard(IRBuilder<> &IRB) const {
1636 if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
1637 Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
1638 PointerType *PtrTy = Type::getInt8PtrTy(M.getContext());
1639 return M.getOrInsertGlobal("__guard_local", PtrTy);
1644 // Currently only support "standard" __stack_chk_guard.
1645 // TODO: add LOAD_STACK_GUARD support.
1646 void TargetLoweringBase::insertSSPDeclarations(Module &M) const {
1647 if (!M.getNamedValue("__stack_chk_guard"))
1648 new GlobalVariable(M, Type::getInt8PtrTy(M.getContext()), false,
1649 GlobalVariable::ExternalLinkage,
1650 nullptr, "__stack_chk_guard");
1653 // Currently only support "standard" __stack_chk_guard.
1654 // TODO: add LOAD_STACK_GUARD support.
1655 Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const {
1656 return M.getNamedValue("__stack_chk_guard");
1659 Value *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const {
1663 unsigned TargetLoweringBase::getMinimumJumpTableEntries() const {
1664 return MinimumJumpTableEntries;
1667 void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) {
1668 MinimumJumpTableEntries = Val;
1671 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
1672 return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
1675 unsigned TargetLoweringBase::getMaximumJumpTableSize() const {
1676 return MaximumJumpTableSize;
1679 void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) {
1680 MaximumJumpTableSize = Val;
1683 //===----------------------------------------------------------------------===//
1684 // Reciprocal Estimates
1685 //===----------------------------------------------------------------------===//
1687 /// Get the reciprocal estimate attribute string for a function that will
1688 /// override the target defaults.
1689 static StringRef getRecipEstimateForFunc(MachineFunction &MF) {
1690 const Function &F = MF.getFunction();
1691 return F.getFnAttribute("reciprocal-estimates").getValueAsString();
1694 /// Construct a string for the given reciprocal operation of the given type.
1695 /// This string should match the corresponding option to the front-end's
1696 /// "-mrecip" flag assuming those strings have been passed through in an
1697 /// attribute string. For example, "vec-divf" for a division of a vXf32.
1698 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
1699 std::string Name = VT.isVector() ? "vec-" : "";
1701 Name += IsSqrt ? "sqrt" : "div";
1703 // TODO: Handle "half" or other float types?
1704 if (VT.getScalarType() == MVT::f64) {
1707 assert(VT.getScalarType() == MVT::f32 &&
1708 "Unexpected FP type for reciprocal estimate");
1715 /// Return the character position and value (a single numeric character) of a
1716 /// customized refinement operation in the input string if it exists. Return
1717 /// false if there is no customized refinement step count.
1718 static bool parseRefinementStep(StringRef In, size_t &Position,
1720 const char RefStepToken = ':';
1721 Position = In.find(RefStepToken);
1722 if (Position == StringRef::npos)
1725 StringRef RefStepString = In.substr(Position + 1);
1726 // Allow exactly one numeric character for the additional refinement
1728 if (RefStepString.size() == 1) {
1729 char RefStepChar = RefStepString[0];
1730 if (RefStepChar >= '0' && RefStepChar <= '9') {
1731 Value = RefStepChar - '0';
1735 report_fatal_error("Invalid refinement step for -recip.");
1738 /// For the input attribute string, return one of the ReciprocalEstimate enum
1739 /// status values (enabled, disabled, or not specified) for this operation on
1740 /// the specified data type.
1741 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
1742 if (Override.empty())
1743 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1745 SmallVector<StringRef, 4> OverrideVector;
1746 Override.split(OverrideVector, ',');
1747 unsigned NumArgs = OverrideVector.size();
1749 // Check if "all", "none", or "default" was specified.
1751 // Look for an optional setting of the number of refinement steps needed
1752 // for this type of reciprocal operation.
1755 if (parseRefinementStep(Override, RefPos, RefSteps)) {
1756 // Split the string for further processing.
1757 Override = Override.substr(0, RefPos);
1760 // All reciprocal types are enabled.
1761 if (Override == "all")
1762 return TargetLoweringBase::ReciprocalEstimate::Enabled;
1764 // All reciprocal types are disabled.
1765 if (Override == "none")
1766 return TargetLoweringBase::ReciprocalEstimate::Disabled;
1768 // Target defaults for enablement are used.
1769 if (Override == "default")
1770 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1773 // The attribute string may omit the size suffix ('f'/'d').
1774 std::string VTName = getReciprocalOpName(IsSqrt, VT);
1775 std::string VTNameNoSize = VTName;
1776 VTNameNoSize.pop_back();
1777 static const char DisabledPrefix = '!';
1779 for (StringRef RecipType : OverrideVector) {
1782 if (parseRefinementStep(RecipType, RefPos, RefSteps))
1783 RecipType = RecipType.substr(0, RefPos);
1785 // Ignore the disablement token for string matching.
1786 bool IsDisabled = RecipType[0] == DisabledPrefix;
1788 RecipType = RecipType.substr(1);
1790 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
1791 return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled
1792 : TargetLoweringBase::ReciprocalEstimate::Enabled;
1795 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1798 /// For the input attribute string, return the customized refinement step count
1799 /// for this operation on the specified data type. If the step count does not
1800 /// exist, return the ReciprocalEstimate enum value for unspecified.
1801 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
1802 if (Override.empty())
1803 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1805 SmallVector<StringRef, 4> OverrideVector;
1806 Override.split(OverrideVector, ',');
1807 unsigned NumArgs = OverrideVector.size();
1809 // Check if "all", "default", or "none" was specified.
1811 // Look for an optional setting of the number of refinement steps needed
1812 // for this type of reciprocal operation.
1815 if (!parseRefinementStep(Override, RefPos, RefSteps))
1816 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1818 // Split the string for further processing.
1819 Override = Override.substr(0, RefPos);
1820 assert(Override != "none" &&
1821 "Disabled reciprocals, but specifed refinement steps?");
1823 // If this is a general override, return the specified number of steps.
1824 if (Override == "all" || Override == "default")
1828 // The attribute string may omit the size suffix ('f'/'d').
1829 std::string VTName = getReciprocalOpName(IsSqrt, VT);
1830 std::string VTNameNoSize = VTName;
1831 VTNameNoSize.pop_back();
1833 for (StringRef RecipType : OverrideVector) {
1836 if (!parseRefinementStep(RecipType, RefPos, RefSteps))
1839 RecipType = RecipType.substr(0, RefPos);
1840 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
1844 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1847 int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT,
1848 MachineFunction &MF) const {
1849 return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
1852 int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT,
1853 MachineFunction &MF) const {
1854 return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
1857 int TargetLoweringBase::getSqrtRefinementSteps(EVT VT,
1858 MachineFunction &MF) const {
1859 return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
1862 int TargetLoweringBase::getDivRefinementSteps(EVT VT,
1863 MachineFunction &MF) const {
1864 return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
1867 void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const {
1868 MF.getRegInfo().freezeReservedRegs(MF);