1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This implements the TargetLoweringBase class.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/ADT/BitVector.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/StringExtras.h"
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/ADT/Triple.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/Analysis/Loads.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/CodeGen/ISDOpcodes.h"
24 #include "llvm/CodeGen/MachineBasicBlock.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstr.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineMemOperand.h"
30 #include "llvm/CodeGen/MachineOperand.h"
31 #include "llvm/CodeGen/MachineRegisterInfo.h"
32 #include "llvm/CodeGen/RuntimeLibcalls.h"
33 #include "llvm/CodeGen/StackMaps.h"
34 #include "llvm/CodeGen/TargetLowering.h"
35 #include "llvm/CodeGen/TargetOpcodes.h"
36 #include "llvm/CodeGen/TargetRegisterInfo.h"
37 #include "llvm/CodeGen/ValueTypes.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/GlobalValue.h"
44 #include "llvm/IR/GlobalVariable.h"
45 #include "llvm/IR/IRBuilder.h"
46 #include "llvm/IR/Module.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/Support/BranchProbability.h"
49 #include "llvm/Support/Casting.h"
50 #include "llvm/Support/CommandLine.h"
51 #include "llvm/Support/Compiler.h"
52 #include "llvm/Support/ErrorHandling.h"
53 #include "llvm/Support/MachineValueType.h"
54 #include "llvm/Support/MathExtras.h"
55 #include "llvm/Target/TargetMachine.h"
56 #include "llvm/Transforms/Utils/SizeOpts.h"
69 static cl::opt<bool> JumpIsExpensiveOverride(
70 "jump-is-expensive", cl::init(false),
71 cl::desc("Do not create extra branches to split comparison logic."),
74 static cl::opt<unsigned> MinimumJumpTableEntries
75 ("min-jump-table-entries", cl::init(4), cl::Hidden,
76 cl::desc("Set minimum number of entries to use a jump table."));
78 static cl::opt<unsigned> MaximumJumpTableSize
79 ("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden,
80 cl::desc("Set maximum size of jump tables."));
82 /// Minimum jump table density for normal functions.
83 static cl::opt<unsigned>
84 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
85 cl::desc("Minimum density for building a jump table in "
86 "a normal function"));
88 /// Minimum jump table density for -Os or -Oz functions.
89 static cl::opt<unsigned> OptsizeJumpTableDensity(
90 "optsize-jump-table-density", cl::init(40), cl::Hidden,
91 cl::desc("Minimum density for building a jump table in "
92 "an optsize function"));
94 // FIXME: This option is only to test if the strict fp operation processed
95 // correctly by preventing mutating strict fp operation to normal fp operation
96 // during development. When the backend supports strict float operation, this
97 // option will be meaningless.
98 static cl::opt<bool> DisableStrictNodeMutation("disable-strictnode-mutation",
99 cl::desc("Don't mutate strict-float node to a legalize node"),
100 cl::init(false), cl::Hidden);
102 static bool darwinHasSinCos(const Triple &TT) {
103 assert(TT.isOSDarwin() && "should be called with darwin triple");
104 // Don't bother with 32 bit x86.
105 if (TT.getArch() == Triple::x86)
107 // Macos < 10.9 has no sincos_stret.
109 return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit();
110 // iOS < 7.0 has no sincos_stret.
112 return !TT.isOSVersionLT(7, 0);
113 // Any other darwin such as WatchOS/TvOS is new enough.
117 // Although this default value is arbitrary, it is not random. It is assumed
118 // that a condition that evaluates the same way by a higher percentage than this
119 // is best represented as control flow. Therefore, the default value N should be
120 // set such that the win from N% correct executions is greater than the loss
121 // from (100 - N)% mispredicted executions for the majority of intended targets.
122 static cl::opt<int> MinPercentageForPredictableBranch(
123 "min-predictable-branch", cl::init(99),
124 cl::desc("Minimum percentage (0-100) that a condition must be either true "
125 "or false to assume that the condition is predictable"),
128 void TargetLoweringBase::InitLibcalls(const Triple &TT) {
129 #define HANDLE_LIBCALL(code, name) \
130 setLibcallName(RTLIB::code, name);
131 #include "llvm/IR/RuntimeLibcalls.def"
132 #undef HANDLE_LIBCALL
133 // Initialize calling conventions to their default.
134 for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
135 setLibcallCallingConv((RTLIB::Libcall)LC, CallingConv::C);
137 // For IEEE quad-precision libcall names, PPC uses "kf" instead of "tf".
138 if (TT.getArch() == Triple::ppc || TT.isPPC64()) {
139 setLibcallName(RTLIB::ADD_F128, "__addkf3");
140 setLibcallName(RTLIB::SUB_F128, "__subkf3");
141 setLibcallName(RTLIB::MUL_F128, "__mulkf3");
142 setLibcallName(RTLIB::DIV_F128, "__divkf3");
143 setLibcallName(RTLIB::FPEXT_F32_F128, "__extendsfkf2");
144 setLibcallName(RTLIB::FPEXT_F64_F128, "__extenddfkf2");
145 setLibcallName(RTLIB::FPROUND_F128_F32, "__trunckfsf2");
146 setLibcallName(RTLIB::FPROUND_F128_F64, "__trunckfdf2");
147 setLibcallName(RTLIB::FPTOSINT_F128_I32, "__fixkfsi");
148 setLibcallName(RTLIB::FPTOSINT_F128_I64, "__fixkfdi");
149 setLibcallName(RTLIB::FPTOUINT_F128_I32, "__fixunskfsi");
150 setLibcallName(RTLIB::FPTOUINT_F128_I64, "__fixunskfdi");
151 setLibcallName(RTLIB::SINTTOFP_I32_F128, "__floatsikf");
152 setLibcallName(RTLIB::SINTTOFP_I64_F128, "__floatdikf");
153 setLibcallName(RTLIB::UINTTOFP_I32_F128, "__floatunsikf");
154 setLibcallName(RTLIB::UINTTOFP_I64_F128, "__floatundikf");
155 setLibcallName(RTLIB::OEQ_F128, "__eqkf2");
156 setLibcallName(RTLIB::UNE_F128, "__nekf2");
157 setLibcallName(RTLIB::OGE_F128, "__gekf2");
158 setLibcallName(RTLIB::OLT_F128, "__ltkf2");
159 setLibcallName(RTLIB::OLE_F128, "__lekf2");
160 setLibcallName(RTLIB::OGT_F128, "__gtkf2");
161 setLibcallName(RTLIB::UO_F128, "__unordkf2");
164 // A few names are different on particular architectures or environments.
165 if (TT.isOSDarwin()) {
166 // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
167 // of the gnueabi-style __gnu_*_ieee.
168 // FIXME: What about other targets?
169 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
170 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
172 // Some darwins have an optimized __bzero/bzero function.
173 switch (TT.getArch()) {
176 if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6))
177 setLibcallName(RTLIB::BZERO, "__bzero");
179 case Triple::aarch64:
180 case Triple::aarch64_32:
181 setLibcallName(RTLIB::BZERO, "bzero");
187 if (darwinHasSinCos(TT)) {
188 setLibcallName(RTLIB::SINCOS_STRET_F32, "__sincosf_stret");
189 setLibcallName(RTLIB::SINCOS_STRET_F64, "__sincos_stret");
190 if (TT.isWatchABI()) {
191 setLibcallCallingConv(RTLIB::SINCOS_STRET_F32,
192 CallingConv::ARM_AAPCS_VFP);
193 setLibcallCallingConv(RTLIB::SINCOS_STRET_F64,
194 CallingConv::ARM_AAPCS_VFP);
198 setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee");
199 setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee");
202 if (TT.isGNUEnvironment() || TT.isOSFuchsia() ||
203 (TT.isAndroid() && !TT.isAndroidVersionLT(9))) {
204 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
205 setLibcallName(RTLIB::SINCOS_F64, "sincos");
206 setLibcallName(RTLIB::SINCOS_F80, "sincosl");
207 setLibcallName(RTLIB::SINCOS_F128, "sincosl");
208 setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl");
212 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
213 setLibcallName(RTLIB::SINCOS_F64, "sincos");
216 if (TT.isOSOpenBSD()) {
217 setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr);
221 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
222 /// UNKNOWN_LIBCALL if there is none.
223 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
224 if (OpVT == MVT::f16) {
225 if (RetVT == MVT::f32)
226 return FPEXT_F16_F32;
227 } else if (OpVT == MVT::f32) {
228 if (RetVT == MVT::f64)
229 return FPEXT_F32_F64;
230 if (RetVT == MVT::f128)
231 return FPEXT_F32_F128;
232 if (RetVT == MVT::ppcf128)
233 return FPEXT_F32_PPCF128;
234 } else if (OpVT == MVT::f64) {
235 if (RetVT == MVT::f128)
236 return FPEXT_F64_F128;
237 else if (RetVT == MVT::ppcf128)
238 return FPEXT_F64_PPCF128;
239 } else if (OpVT == MVT::f80) {
240 if (RetVT == MVT::f128)
241 return FPEXT_F80_F128;
244 return UNKNOWN_LIBCALL;
247 /// getFPROUND - Return the FPROUND_*_* value for the given types, or
248 /// UNKNOWN_LIBCALL if there is none.
249 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
250 if (RetVT == MVT::f16) {
251 if (OpVT == MVT::f32)
252 return FPROUND_F32_F16;
253 if (OpVT == MVT::f64)
254 return FPROUND_F64_F16;
255 if (OpVT == MVT::f80)
256 return FPROUND_F80_F16;
257 if (OpVT == MVT::f128)
258 return FPROUND_F128_F16;
259 if (OpVT == MVT::ppcf128)
260 return FPROUND_PPCF128_F16;
261 } else if (RetVT == MVT::f32) {
262 if (OpVT == MVT::f64)
263 return FPROUND_F64_F32;
264 if (OpVT == MVT::f80)
265 return FPROUND_F80_F32;
266 if (OpVT == MVT::f128)
267 return FPROUND_F128_F32;
268 if (OpVT == MVT::ppcf128)
269 return FPROUND_PPCF128_F32;
270 } else if (RetVT == MVT::f64) {
271 if (OpVT == MVT::f80)
272 return FPROUND_F80_F64;
273 if (OpVT == MVT::f128)
274 return FPROUND_F128_F64;
275 if (OpVT == MVT::ppcf128)
276 return FPROUND_PPCF128_F64;
277 } else if (RetVT == MVT::f80) {
278 if (OpVT == MVT::f128)
279 return FPROUND_F128_F80;
282 return UNKNOWN_LIBCALL;
285 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
286 /// UNKNOWN_LIBCALL if there is none.
287 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
288 if (OpVT == MVT::f32) {
289 if (RetVT == MVT::i32)
290 return FPTOSINT_F32_I32;
291 if (RetVT == MVT::i64)
292 return FPTOSINT_F32_I64;
293 if (RetVT == MVT::i128)
294 return FPTOSINT_F32_I128;
295 } else if (OpVT == MVT::f64) {
296 if (RetVT == MVT::i32)
297 return FPTOSINT_F64_I32;
298 if (RetVT == MVT::i64)
299 return FPTOSINT_F64_I64;
300 if (RetVT == MVT::i128)
301 return FPTOSINT_F64_I128;
302 } else if (OpVT == MVT::f80) {
303 if (RetVT == MVT::i32)
304 return FPTOSINT_F80_I32;
305 if (RetVT == MVT::i64)
306 return FPTOSINT_F80_I64;
307 if (RetVT == MVT::i128)
308 return FPTOSINT_F80_I128;
309 } else if (OpVT == MVT::f128) {
310 if (RetVT == MVT::i32)
311 return FPTOSINT_F128_I32;
312 if (RetVT == MVT::i64)
313 return FPTOSINT_F128_I64;
314 if (RetVT == MVT::i128)
315 return FPTOSINT_F128_I128;
316 } else if (OpVT == MVT::ppcf128) {
317 if (RetVT == MVT::i32)
318 return FPTOSINT_PPCF128_I32;
319 if (RetVT == MVT::i64)
320 return FPTOSINT_PPCF128_I64;
321 if (RetVT == MVT::i128)
322 return FPTOSINT_PPCF128_I128;
324 return UNKNOWN_LIBCALL;
327 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
328 /// UNKNOWN_LIBCALL if there is none.
329 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
330 if (OpVT == MVT::f32) {
331 if (RetVT == MVT::i32)
332 return FPTOUINT_F32_I32;
333 if (RetVT == MVT::i64)
334 return FPTOUINT_F32_I64;
335 if (RetVT == MVT::i128)
336 return FPTOUINT_F32_I128;
337 } else if (OpVT == MVT::f64) {
338 if (RetVT == MVT::i32)
339 return FPTOUINT_F64_I32;
340 if (RetVT == MVT::i64)
341 return FPTOUINT_F64_I64;
342 if (RetVT == MVT::i128)
343 return FPTOUINT_F64_I128;
344 } else if (OpVT == MVT::f80) {
345 if (RetVT == MVT::i32)
346 return FPTOUINT_F80_I32;
347 if (RetVT == MVT::i64)
348 return FPTOUINT_F80_I64;
349 if (RetVT == MVT::i128)
350 return FPTOUINT_F80_I128;
351 } else if (OpVT == MVT::f128) {
352 if (RetVT == MVT::i32)
353 return FPTOUINT_F128_I32;
354 if (RetVT == MVT::i64)
355 return FPTOUINT_F128_I64;
356 if (RetVT == MVT::i128)
357 return FPTOUINT_F128_I128;
358 } else if (OpVT == MVT::ppcf128) {
359 if (RetVT == MVT::i32)
360 return FPTOUINT_PPCF128_I32;
361 if (RetVT == MVT::i64)
362 return FPTOUINT_PPCF128_I64;
363 if (RetVT == MVT::i128)
364 return FPTOUINT_PPCF128_I128;
366 return UNKNOWN_LIBCALL;
369 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
370 /// UNKNOWN_LIBCALL if there is none.
371 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
372 if (OpVT == MVT::i32) {
373 if (RetVT == MVT::f32)
374 return SINTTOFP_I32_F32;
375 if (RetVT == MVT::f64)
376 return SINTTOFP_I32_F64;
377 if (RetVT == MVT::f80)
378 return SINTTOFP_I32_F80;
379 if (RetVT == MVT::f128)
380 return SINTTOFP_I32_F128;
381 if (RetVT == MVT::ppcf128)
382 return SINTTOFP_I32_PPCF128;
383 } else if (OpVT == MVT::i64) {
384 if (RetVT == MVT::f32)
385 return SINTTOFP_I64_F32;
386 if (RetVT == MVT::f64)
387 return SINTTOFP_I64_F64;
388 if (RetVT == MVT::f80)
389 return SINTTOFP_I64_F80;
390 if (RetVT == MVT::f128)
391 return SINTTOFP_I64_F128;
392 if (RetVT == MVT::ppcf128)
393 return SINTTOFP_I64_PPCF128;
394 } else if (OpVT == MVT::i128) {
395 if (RetVT == MVT::f32)
396 return SINTTOFP_I128_F32;
397 if (RetVT == MVT::f64)
398 return SINTTOFP_I128_F64;
399 if (RetVT == MVT::f80)
400 return SINTTOFP_I128_F80;
401 if (RetVT == MVT::f128)
402 return SINTTOFP_I128_F128;
403 if (RetVT == MVT::ppcf128)
404 return SINTTOFP_I128_PPCF128;
406 return UNKNOWN_LIBCALL;
409 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
410 /// UNKNOWN_LIBCALL if there is none.
411 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
412 if (OpVT == MVT::i32) {
413 if (RetVT == MVT::f32)
414 return UINTTOFP_I32_F32;
415 if (RetVT == MVT::f64)
416 return UINTTOFP_I32_F64;
417 if (RetVT == MVT::f80)
418 return UINTTOFP_I32_F80;
419 if (RetVT == MVT::f128)
420 return UINTTOFP_I32_F128;
421 if (RetVT == MVT::ppcf128)
422 return UINTTOFP_I32_PPCF128;
423 } else if (OpVT == MVT::i64) {
424 if (RetVT == MVT::f32)
425 return UINTTOFP_I64_F32;
426 if (RetVT == MVT::f64)
427 return UINTTOFP_I64_F64;
428 if (RetVT == MVT::f80)
429 return UINTTOFP_I64_F80;
430 if (RetVT == MVT::f128)
431 return UINTTOFP_I64_F128;
432 if (RetVT == MVT::ppcf128)
433 return UINTTOFP_I64_PPCF128;
434 } else if (OpVT == MVT::i128) {
435 if (RetVT == MVT::f32)
436 return UINTTOFP_I128_F32;
437 if (RetVT == MVT::f64)
438 return UINTTOFP_I128_F64;
439 if (RetVT == MVT::f80)
440 return UINTTOFP_I128_F80;
441 if (RetVT == MVT::f128)
442 return UINTTOFP_I128_F128;
443 if (RetVT == MVT::ppcf128)
444 return UINTTOFP_I128_PPCF128;
446 return UNKNOWN_LIBCALL;
449 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
450 #define OP_TO_LIBCALL(Name, Enum) \
452 switch (VT.SimpleTy) { \
454 return UNKNOWN_LIBCALL; \
468 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
469 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
470 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
471 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
472 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
473 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
474 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
475 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
476 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
477 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
478 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
479 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
484 return UNKNOWN_LIBCALL;
487 RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
488 switch (ElementSize) {
490 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
492 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
494 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
496 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
498 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
500 return UNKNOWN_LIBCALL;
504 RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
505 switch (ElementSize) {
507 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
509 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
511 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
513 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
515 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
517 return UNKNOWN_LIBCALL;
521 RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
522 switch (ElementSize) {
524 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
526 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
528 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
530 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
532 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
534 return UNKNOWN_LIBCALL;
538 /// InitCmpLibcallCCs - Set default comparison libcall CC.
539 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
540 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
541 CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
542 CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
543 CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
544 CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ;
545 CCs[RTLIB::UNE_F32] = ISD::SETNE;
546 CCs[RTLIB::UNE_F64] = ISD::SETNE;
547 CCs[RTLIB::UNE_F128] = ISD::SETNE;
548 CCs[RTLIB::UNE_PPCF128] = ISD::SETNE;
549 CCs[RTLIB::OGE_F32] = ISD::SETGE;
550 CCs[RTLIB::OGE_F64] = ISD::SETGE;
551 CCs[RTLIB::OGE_F128] = ISD::SETGE;
552 CCs[RTLIB::OGE_PPCF128] = ISD::SETGE;
553 CCs[RTLIB::OLT_F32] = ISD::SETLT;
554 CCs[RTLIB::OLT_F64] = ISD::SETLT;
555 CCs[RTLIB::OLT_F128] = ISD::SETLT;
556 CCs[RTLIB::OLT_PPCF128] = ISD::SETLT;
557 CCs[RTLIB::OLE_F32] = ISD::SETLE;
558 CCs[RTLIB::OLE_F64] = ISD::SETLE;
559 CCs[RTLIB::OLE_F128] = ISD::SETLE;
560 CCs[RTLIB::OLE_PPCF128] = ISD::SETLE;
561 CCs[RTLIB::OGT_F32] = ISD::SETGT;
562 CCs[RTLIB::OGT_F64] = ISD::SETGT;
563 CCs[RTLIB::OGT_F128] = ISD::SETGT;
564 CCs[RTLIB::OGT_PPCF128] = ISD::SETGT;
565 CCs[RTLIB::UO_F32] = ISD::SETNE;
566 CCs[RTLIB::UO_F64] = ISD::SETNE;
567 CCs[RTLIB::UO_F128] = ISD::SETNE;
568 CCs[RTLIB::UO_PPCF128] = ISD::SETNE;
571 /// NOTE: The TargetMachine owns TLOF.
572 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
575 // Perform these initializations only once.
576 MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove =
577 MaxLoadsPerMemcmp = 8;
578 MaxGluedStoresPerMemcpy = 0;
579 MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize =
580 MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4;
581 HasMultipleConditionRegisters = false;
582 HasExtractBitsInsn = false;
583 JumpIsExpensive = JumpIsExpensiveOverride;
584 PredictableSelectIsExpensive = false;
585 EnableExtLdPromotion = false;
586 StackPointerRegisterToSaveRestore = 0;
587 BooleanContents = UndefinedBooleanContent;
588 BooleanFloatContents = UndefinedBooleanContent;
589 BooleanVectorContents = UndefinedBooleanContent;
590 SchedPreferenceInfo = Sched::ILP;
591 GatherAllAliasesMaxDepth = 18;
592 IsStrictFPEnabled = DisableStrictNodeMutation;
593 // TODO: the default will be switched to 0 in the next commit, along
594 // with the Target-specific changes necessary.
595 MaxAtomicSizeInBitsSupported = 1024;
597 MinCmpXchgSizeInBits = 0;
598 SupportsUnalignedAtomics = false;
600 std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
602 InitLibcalls(TM.getTargetTriple());
603 InitCmpLibcallCCs(CmpLibcallCCs);
606 void TargetLoweringBase::initActions() {
607 // All operations default to being supported.
608 memset(OpActions, 0, sizeof(OpActions));
609 memset(LoadExtActions, 0, sizeof(LoadExtActions));
610 memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
611 memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
612 memset(CondCodeActions, 0, sizeof(CondCodeActions));
613 std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
614 std::fill(std::begin(TargetDAGCombineArray),
615 std::end(TargetDAGCombineArray), 0);
617 for (MVT VT : MVT::fp_valuetypes()) {
618 MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits().getFixedSize());
619 if (IntVT.isValid()) {
620 setOperationAction(ISD::ATOMIC_SWAP, VT, Promote);
621 AddPromotedToType(ISD::ATOMIC_SWAP, VT, IntVT);
625 // Set default actions for various operations.
626 for (MVT VT : MVT::all_valuetypes()) {
627 // Default all indexed load / store to expand.
628 for (unsigned IM = (unsigned)ISD::PRE_INC;
629 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
630 setIndexedLoadAction(IM, VT, Expand);
631 setIndexedStoreAction(IM, VT, Expand);
632 setIndexedMaskedLoadAction(IM, VT, Expand);
633 setIndexedMaskedStoreAction(IM, VT, Expand);
636 // Most backends expect to see the node which just returns the value loaded.
637 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand);
639 // These operations default to expand.
640 setOperationAction(ISD::FGETSIGN, VT, Expand);
641 setOperationAction(ISD::CONCAT_VECTORS, VT, Expand);
642 setOperationAction(ISD::FMINNUM, VT, Expand);
643 setOperationAction(ISD::FMAXNUM, VT, Expand);
644 setOperationAction(ISD::FMINNUM_IEEE, VT, Expand);
645 setOperationAction(ISD::FMAXNUM_IEEE, VT, Expand);
646 setOperationAction(ISD::FMINIMUM, VT, Expand);
647 setOperationAction(ISD::FMAXIMUM, VT, Expand);
648 setOperationAction(ISD::FMAD, VT, Expand);
649 setOperationAction(ISD::SMIN, VT, Expand);
650 setOperationAction(ISD::SMAX, VT, Expand);
651 setOperationAction(ISD::UMIN, VT, Expand);
652 setOperationAction(ISD::UMAX, VT, Expand);
653 setOperationAction(ISD::ABS, VT, Expand);
654 setOperationAction(ISD::FSHL, VT, Expand);
655 setOperationAction(ISD::FSHR, VT, Expand);
656 setOperationAction(ISD::SADDSAT, VT, Expand);
657 setOperationAction(ISD::UADDSAT, VT, Expand);
658 setOperationAction(ISD::SSUBSAT, VT, Expand);
659 setOperationAction(ISD::USUBSAT, VT, Expand);
660 setOperationAction(ISD::SMULFIX, VT, Expand);
661 setOperationAction(ISD::SMULFIXSAT, VT, Expand);
662 setOperationAction(ISD::UMULFIX, VT, Expand);
663 setOperationAction(ISD::UMULFIXSAT, VT, Expand);
664 setOperationAction(ISD::SDIVFIX, VT, Expand);
665 setOperationAction(ISD::SDIVFIXSAT, VT, Expand);
666 setOperationAction(ISD::UDIVFIX, VT, Expand);
667 setOperationAction(ISD::UDIVFIXSAT, VT, Expand);
669 // Overflow operations default to expand
670 setOperationAction(ISD::SADDO, VT, Expand);
671 setOperationAction(ISD::SSUBO, VT, Expand);
672 setOperationAction(ISD::UADDO, VT, Expand);
673 setOperationAction(ISD::USUBO, VT, Expand);
674 setOperationAction(ISD::SMULO, VT, Expand);
675 setOperationAction(ISD::UMULO, VT, Expand);
677 // ADDCARRY operations default to expand
678 setOperationAction(ISD::ADDCARRY, VT, Expand);
679 setOperationAction(ISD::SUBCARRY, VT, Expand);
680 setOperationAction(ISD::SETCCCARRY, VT, Expand);
682 // ADDC/ADDE/SUBC/SUBE default to expand.
683 setOperationAction(ISD::ADDC, VT, Expand);
684 setOperationAction(ISD::ADDE, VT, Expand);
685 setOperationAction(ISD::SUBC, VT, Expand);
686 setOperationAction(ISD::SUBE, VT, Expand);
688 // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
689 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
690 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
692 setOperationAction(ISD::BITREVERSE, VT, Expand);
694 // These library functions default to expand.
695 setOperationAction(ISD::FROUND, VT, Expand);
696 setOperationAction(ISD::FROUNDEVEN, VT, Expand);
697 setOperationAction(ISD::FPOWI, VT, Expand);
699 // These operations default to expand for vector types.
701 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
702 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
703 setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, VT, Expand);
704 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Expand);
705 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Expand);
706 setOperationAction(ISD::SPLAT_VECTOR, VT, Expand);
709 // Constrained floating-point operations default to expand.
710 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
711 setOperationAction(ISD::STRICT_##DAGN, VT, Expand);
712 #include "llvm/IR/ConstrainedOps.def"
714 // For most targets @llvm.get.dynamic.area.offset just returns 0.
715 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand);
717 // Vector reduction default to expand.
718 setOperationAction(ISD::VECREDUCE_FADD, VT, Expand);
719 setOperationAction(ISD::VECREDUCE_FMUL, VT, Expand);
720 setOperationAction(ISD::VECREDUCE_ADD, VT, Expand);
721 setOperationAction(ISD::VECREDUCE_MUL, VT, Expand);
722 setOperationAction(ISD::VECREDUCE_AND, VT, Expand);
723 setOperationAction(ISD::VECREDUCE_OR, VT, Expand);
724 setOperationAction(ISD::VECREDUCE_XOR, VT, Expand);
725 setOperationAction(ISD::VECREDUCE_SMAX, VT, Expand);
726 setOperationAction(ISD::VECREDUCE_SMIN, VT, Expand);
727 setOperationAction(ISD::VECREDUCE_UMAX, VT, Expand);
728 setOperationAction(ISD::VECREDUCE_UMIN, VT, Expand);
729 setOperationAction(ISD::VECREDUCE_FMAX, VT, Expand);
730 setOperationAction(ISD::VECREDUCE_FMIN, VT, Expand);
733 // Most targets ignore the @llvm.prefetch intrinsic.
734 setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
736 // Most targets also ignore the @llvm.readcyclecounter intrinsic.
737 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand);
739 // ConstantFP nodes default to expand. Targets can either change this to
740 // Legal, in which case all fp constants are legal, or use isFPImmLegal()
741 // to optimize expansions for certain constants.
742 setOperationAction(ISD::ConstantFP, MVT::f16, Expand);
743 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
744 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
745 setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
746 setOperationAction(ISD::ConstantFP, MVT::f128, Expand);
748 // These library functions default to expand.
749 for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
750 setOperationAction(ISD::FCBRT, VT, Expand);
751 setOperationAction(ISD::FLOG , VT, Expand);
752 setOperationAction(ISD::FLOG2, VT, Expand);
753 setOperationAction(ISD::FLOG10, VT, Expand);
754 setOperationAction(ISD::FEXP , VT, Expand);
755 setOperationAction(ISD::FEXP2, VT, Expand);
756 setOperationAction(ISD::FFLOOR, VT, Expand);
757 setOperationAction(ISD::FNEARBYINT, VT, Expand);
758 setOperationAction(ISD::FCEIL, VT, Expand);
759 setOperationAction(ISD::FRINT, VT, Expand);
760 setOperationAction(ISD::FTRUNC, VT, Expand);
761 setOperationAction(ISD::FROUND, VT, Expand);
762 setOperationAction(ISD::FROUNDEVEN, VT, Expand);
763 setOperationAction(ISD::LROUND, VT, Expand);
764 setOperationAction(ISD::LLROUND, VT, Expand);
765 setOperationAction(ISD::LRINT, VT, Expand);
766 setOperationAction(ISD::LLRINT, VT, Expand);
769 // Default ISD::TRAP to expand (which turns it into abort).
770 setOperationAction(ISD::TRAP, MVT::Other, Expand);
772 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
773 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
774 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
777 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
779 return MVT::getIntegerVT(DL.getPointerSizeInBits(0));
782 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
783 bool LegalTypes) const {
784 assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
785 if (LHSTy.isVector())
787 return LegalTypes ? getScalarShiftAmountTy(DL, LHSTy)
791 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
792 assert(isTypeLegal(VT));
804 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) {
805 // If the command-line option was specified, ignore this request.
806 if (!JumpIsExpensiveOverride.getNumOccurrences())
807 JumpIsExpensive = isExpensive;
810 TargetLoweringBase::LegalizeKind
811 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
812 // If this is a simple type, use the ComputeRegisterProp mechanism.
814 MVT SVT = VT.getSimpleVT();
815 assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
816 MVT NVT = TransformToType[SVT.SimpleTy];
817 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
819 assert((LA == TypeLegal || LA == TypeSoftenFloat ||
820 LA == TypeSoftPromoteHalf ||
822 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)) &&
823 "Promote may not follow Expand or Promote");
825 if (LA == TypeSplitVector)
826 return LegalizeKind(LA,
827 EVT::getVectorVT(Context, SVT.getVectorElementType(),
828 SVT.getVectorElementCount() / 2));
829 if (LA == TypeScalarizeVector)
830 return LegalizeKind(LA, SVT.getVectorElementType());
831 return LegalizeKind(LA, NVT);
834 // Handle Extended Scalar Types.
835 if (!VT.isVector()) {
836 assert(VT.isInteger() && "Float types must be simple");
837 unsigned BitSize = VT.getSizeInBits();
838 // First promote to a power-of-two size, then expand if necessary.
839 if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
840 EVT NVT = VT.getRoundIntegerType(Context);
841 assert(NVT != VT && "Unable to round integer VT");
842 LegalizeKind NextStep = getTypeConversion(Context, NVT);
843 // Avoid multi-step promotion.
844 if (NextStep.first == TypePromoteInteger)
846 // Return rounded integer type.
847 return LegalizeKind(TypePromoteInteger, NVT);
850 return LegalizeKind(TypeExpandInteger,
851 EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
854 // Handle vector types.
855 ElementCount NumElts = VT.getVectorElementCount();
856 EVT EltVT = VT.getVectorElementType();
858 // Vectors with only one element are always scalarized.
860 return LegalizeKind(TypeScalarizeVector, EltVT);
862 if (VT.getVectorElementCount() == ElementCount(1, true))
863 report_fatal_error("Cannot legalize this vector");
865 // Try to widen vector elements until the element type is a power of two and
866 // promote it to a legal type later on, for example:
867 // <3 x i8> -> <4 x i8> -> <4 x i32>
868 if (EltVT.isInteger()) {
869 // Vectors with a number of elements that is not a power of two are always
870 // widened, for example <3 x i8> -> <4 x i8>.
871 if (!VT.isPow2VectorType()) {
872 NumElts = NumElts.NextPowerOf2();
873 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
874 return LegalizeKind(TypeWidenVector, NVT);
877 // Examine the element type.
878 LegalizeKind LK = getTypeConversion(Context, EltVT);
880 // If type is to be expanded, split the vector.
881 // <4 x i140> -> <2 x i140>
882 if (LK.first == TypeExpandInteger)
883 return LegalizeKind(TypeSplitVector,
884 EVT::getVectorVT(Context, EltVT, NumElts / 2));
886 // Promote the integer element types until a legal vector type is found
887 // or until the element integer type is too big. If a legal type was not
888 // found, fallback to the usual mechanism of widening/splitting the
890 EVT OldEltVT = EltVT;
892 // Increase the bitwidth of the element to the next pow-of-two
893 // (which is greater than 8 bits).
894 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
895 .getRoundIntegerType(Context);
897 // Stop trying when getting a non-simple element type.
898 // Note that vector elements may be greater than legal vector element
899 // types. Example: X86 XMM registers hold 64bit element on 32bit
901 if (!EltVT.isSimple())
904 // Build a new vector type and check if it is legal.
905 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
906 // Found a legal promoted vector type.
907 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
908 return LegalizeKind(TypePromoteInteger,
909 EVT::getVectorVT(Context, EltVT, NumElts));
912 // Reset the type to the unexpanded type if we did not find a legal vector
913 // type with a promoted vector element type.
917 // Try to widen the vector until a legal type is found.
918 // If there is no wider legal type, split the vector.
920 // Round up to the next power of 2.
921 NumElts = NumElts.NextPowerOf2();
923 // If there is no simple vector type with this many elements then there
924 // cannot be a larger legal vector type. Note that this assumes that
925 // there are no skipped intermediate vector types in the simple types.
926 if (!EltVT.isSimple())
928 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
929 if (LargerVector == MVT())
932 // If this type is legal then widen the vector.
933 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
934 return LegalizeKind(TypeWidenVector, LargerVector);
937 // Widen odd vectors to next power of two.
938 if (!VT.isPow2VectorType()) {
939 EVT NVT = VT.getPow2VectorType(Context);
940 return LegalizeKind(TypeWidenVector, NVT);
943 // Vectors with illegal element types are expanded.
944 EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorElementCount() / 2);
945 return LegalizeKind(TypeSplitVector, NVT);
948 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
949 unsigned &NumIntermediates,
951 TargetLoweringBase *TLI) {
952 // Figure out the right, legal destination reg to copy into.
953 ElementCount EC = VT.getVectorElementCount();
954 MVT EltTy = VT.getVectorElementType();
956 unsigned NumVectorRegs = 1;
958 // Scalable vectors cannot be scalarized, so splitting or widening is
960 if (VT.isScalableVector() && !isPowerOf2_32(EC.Min))
962 "Splitting or widening of non-power-of-2 MVTs is not implemented.");
964 // FIXME: We don't support non-power-of-2-sized vectors for now.
965 // Ideally we could break down into LHS/RHS like LegalizeDAG does.
966 if (!isPowerOf2_32(EC.Min)) {
967 // Split EC to unit size (scalable property is preserved).
968 NumVectorRegs = EC.Min;
969 EC = EC / NumVectorRegs;
972 // Divide the input until we get to a supported size. This will
973 // always end up with an EC that represent a scalar or a scalable
975 while (EC.Min > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) {
980 NumIntermediates = NumVectorRegs;
982 MVT NewVT = MVT::getVectorVT(EltTy, EC);
983 if (!TLI->isTypeLegal(NewVT))
985 IntermediateVT = NewVT;
987 unsigned LaneSizeInBits = NewVT.getScalarSizeInBits().getFixedSize();
989 // Convert sizes such as i33 to i64.
990 if (!isPowerOf2_32(LaneSizeInBits))
991 LaneSizeInBits = NextPowerOf2(LaneSizeInBits);
993 MVT DestVT = TLI->getRegisterType(NewVT);
995 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
996 return NumVectorRegs *
997 (LaneSizeInBits / DestVT.getScalarSizeInBits().getFixedSize());
999 // Otherwise, promotion or legal types use the same number of registers as
1000 // the vector decimated to the appropriate level.
1001 return NumVectorRegs;
1004 /// isLegalRC - Return true if the value types that can be represented by the
1005 /// specified register class are all legal.
1006 bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI,
1007 const TargetRegisterClass &RC) const {
1008 for (auto I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
1009 if (isTypeLegal(*I))
1014 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
1015 /// sequence of memory operands that is recognized by PrologEpilogInserter.
1017 TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
1018 MachineBasicBlock *MBB) const {
1019 MachineInstr *MI = &InitialMI;
1020 MachineFunction &MF = *MI->getMF();
1021 MachineFrameInfo &MFI = MF.getFrameInfo();
1023 // We're handling multiple types of operands here:
1024 // PATCHPOINT MetaArgs - live-in, read only, direct
1025 // STATEPOINT Deopt Spill - live-through, read only, indirect
1026 // STATEPOINT Deopt Alloca - live-through, read only, direct
1027 // (We're currently conservative and mark the deopt slots read/write in
1029 // STATEPOINT GC Spill - live-through, read/write, indirect
1030 // STATEPOINT GC Alloca - live-through, read/write, direct
1031 // The live-in vs live-through is handled already (the live through ones are
1032 // all stack slots), but we need to handle the different type of stackmap
1033 // operands and memory effects here.
1035 if (!llvm::any_of(MI->operands(),
1036 [](MachineOperand &Operand) { return Operand.isFI(); }))
1039 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
1041 // Inherit previous memory operands.
1042 MIB.cloneMemRefs(*MI);
1044 for (auto &MO : MI->operands()) {
1050 // foldMemoryOperand builds a new MI after replacing a single FI operand
1051 // with the canonical set of five x86 addressing-mode operands.
1052 int FI = MO.getIndex();
1054 // Add frame index operands recognized by stackmaps.cpp
1055 if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
1056 // indirect-mem-ref tag, size, #FI, offset.
1057 // Used for spills inserted by StatepointLowering. This codepath is not
1058 // used for patchpoints/stackmaps at all, for these spilling is done via
1059 // foldMemoryOperand callback only.
1060 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
1061 MIB.addImm(StackMaps::IndirectMemRefOp);
1062 MIB.addImm(MFI.getObjectSize(FI));
1066 // direct-mem-ref tag, #FI, offset.
1067 // Used by patchpoint, and direct alloca arguments to statepoints
1068 MIB.addImm(StackMaps::DirectMemRefOp);
1073 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
1075 // Add a new memory operand for this FI.
1076 assert(MFI.getObjectOffset(FI) != -1);
1078 // Note: STATEPOINT MMOs are added during SelectionDAG. STACKMAP, and
1079 // PATCHPOINT should be updated to do the same. (TODO)
1080 if (MI->getOpcode() != TargetOpcode::STATEPOINT) {
1081 auto Flags = MachineMemOperand::MOLoad;
1082 MachineMemOperand *MMO = MF.getMachineMemOperand(
1083 MachinePointerInfo::getFixedStack(MF, FI), Flags,
1084 MF.getDataLayout().getPointerSize(), MFI.getObjectAlign(FI));
1085 MIB->addMemOperand(MF, MMO);
1088 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1089 MI->eraseFromParent();
1094 TargetLoweringBase::emitXRayCustomEvent(MachineInstr &MI,
1095 MachineBasicBlock *MBB) const {
1096 assert(MI.getOpcode() == TargetOpcode::PATCHABLE_EVENT_CALL &&
1097 "Called emitXRayCustomEvent on the wrong MI!");
1098 auto &MF = *MI.getMF();
1099 auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
1100 for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
1101 MIB.add(MI.getOperand(OpIdx));
1103 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1104 MI.eraseFromParent();
1109 TargetLoweringBase::emitXRayTypedEvent(MachineInstr &MI,
1110 MachineBasicBlock *MBB) const {
1111 assert(MI.getOpcode() == TargetOpcode::PATCHABLE_TYPED_EVENT_CALL &&
1112 "Called emitXRayTypedEvent on the wrong MI!");
1113 auto &MF = *MI.getMF();
1114 auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
1115 for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
1116 MIB.add(MI.getOperand(OpIdx));
1118 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1119 MI.eraseFromParent();
1123 /// findRepresentativeClass - Return the largest legal super-reg register class
1124 /// of the register class for the specified type and its associated "cost".
1125 // This function is in TargetLowering because it uses RegClassForVT which would
1126 // need to be moved to TargetRegisterInfo and would necessitate moving
1127 // isTypeLegal over as well - a massive change that would just require
1128 // TargetLowering having a TargetRegisterInfo class member that it would use.
1129 std::pair<const TargetRegisterClass *, uint8_t>
1130 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI,
1132 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1134 return std::make_pair(RC, 0);
1136 // Compute the set of all super-register classes.
1137 BitVector SuperRegRC(TRI->getNumRegClasses());
1138 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
1139 SuperRegRC.setBitsInMask(RCI.getMask());
1141 // Find the first legal register class with the largest spill size.
1142 const TargetRegisterClass *BestRC = RC;
1143 for (unsigned i : SuperRegRC.set_bits()) {
1144 const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
1145 // We want the largest possible spill size.
1146 if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
1148 if (!isLegalRC(*TRI, *SuperRC))
1152 return std::make_pair(BestRC, 1);
1155 /// computeRegisterProperties - Once all of the register classes are added,
1156 /// this allows us to compute derived properties we expose.
1157 void TargetLoweringBase::computeRegisterProperties(
1158 const TargetRegisterInfo *TRI) {
1159 static_assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE,
1160 "Too many value types for ValueTypeActions to hold!");
1162 // Everything defaults to needing one register.
1163 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1164 NumRegistersForVT[i] = 1;
1165 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1167 // ...except isVoid, which doesn't need any registers.
1168 NumRegistersForVT[MVT::isVoid] = 0;
1170 // Find the largest integer register class.
1171 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1172 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1173 assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1175 // Every integer value type larger than this largest register takes twice as
1176 // many registers to represent as the previous ValueType.
1177 for (unsigned ExpandedReg = LargestIntReg + 1;
1178 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1179 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1180 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1181 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1182 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
1186 // Inspect all of the ValueType's smaller than the largest integer
1187 // register to see which ones need promotion.
1188 unsigned LegalIntReg = LargestIntReg;
1189 for (unsigned IntReg = LargestIntReg - 1;
1190 IntReg >= (unsigned)MVT::i1; --IntReg) {
1191 MVT IVT = (MVT::SimpleValueType)IntReg;
1192 if (isTypeLegal(IVT)) {
1193 LegalIntReg = IntReg;
1195 RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1196 (MVT::SimpleValueType)LegalIntReg;
1197 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
1201 // ppcf128 type is really two f64's.
1202 if (!isTypeLegal(MVT::ppcf128)) {
1203 if (isTypeLegal(MVT::f64)) {
1204 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1205 RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1206 TransformToType[MVT::ppcf128] = MVT::f64;
1207 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
1209 NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
1210 RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
1211 TransformToType[MVT::ppcf128] = MVT::i128;
1212 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat);
1216 // Decide how to handle f128. If the target does not have native f128 support,
1217 // expand it to i128 and we will be generating soft float library calls.
1218 if (!isTypeLegal(MVT::f128)) {
1219 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1220 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1221 TransformToType[MVT::f128] = MVT::i128;
1222 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
1225 // Decide how to handle f64. If the target does not have native f64 support,
1226 // expand it to i64 and we will be generating soft float library calls.
1227 if (!isTypeLegal(MVT::f64)) {
1228 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1229 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1230 TransformToType[MVT::f64] = MVT::i64;
1231 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
1234 // Decide how to handle f32. If the target does not have native f32 support,
1235 // expand it to i32 and we will be generating soft float library calls.
1236 if (!isTypeLegal(MVT::f32)) {
1237 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1238 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1239 TransformToType[MVT::f32] = MVT::i32;
1240 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
1243 // Decide how to handle f16. If the target does not have native f16 support,
1244 // promote it to f32, because there are no f16 library calls (except for
1246 if (!isTypeLegal(MVT::f16)) {
1247 // Allow targets to control how we legalize half.
1248 if (softPromoteHalfType()) {
1249 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16];
1250 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16];
1251 TransformToType[MVT::f16] = MVT::f32;
1252 ValueTypeActions.setTypeAction(MVT::f16, TypeSoftPromoteHalf);
1254 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1255 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1256 TransformToType[MVT::f16] = MVT::f32;
1257 ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
1261 // Loop over all of the vector value types to see which need transformations.
1262 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1263 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1264 MVT VT = (MVT::SimpleValueType) i;
1265 if (isTypeLegal(VT))
1268 MVT EltVT = VT.getVectorElementType();
1269 ElementCount EC = VT.getVectorElementCount();
1270 bool IsLegalWiderType = false;
1271 bool IsScalable = VT.isScalableVector();
1272 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1273 switch (PreferredAction) {
1274 case TypePromoteInteger: {
1275 MVT::SimpleValueType EndVT = IsScalable ?
1276 MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE :
1277 MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE;
1278 // Try to promote the elements of integer vectors. If no legal
1279 // promotion was found, fall through to the widen-vector method.
1280 for (unsigned nVT = i + 1;
1281 (MVT::SimpleValueType)nVT <= EndVT; ++nVT) {
1282 MVT SVT = (MVT::SimpleValueType) nVT;
1283 // Promote vectors of integers to vectors with the same number
1284 // of elements, with a wider element type.
1285 if (SVT.getScalarSizeInBits() > EltVT.getSizeInBits() &&
1286 SVT.getVectorElementCount() == EC && isTypeLegal(SVT)) {
1287 TransformToType[i] = SVT;
1288 RegisterTypeForVT[i] = SVT;
1289 NumRegistersForVT[i] = 1;
1290 ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
1291 IsLegalWiderType = true;
1295 if (IsLegalWiderType)
1300 case TypeWidenVector:
1301 if (isPowerOf2_32(EC.Min)) {
1302 // Try to widen the vector.
1303 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1304 MVT SVT = (MVT::SimpleValueType) nVT;
1305 if (SVT.getVectorElementType() == EltVT &&
1306 SVT.isScalableVector() == IsScalable &&
1307 SVT.getVectorElementCount().Min > EC.Min && isTypeLegal(SVT)) {
1308 TransformToType[i] = SVT;
1309 RegisterTypeForVT[i] = SVT;
1310 NumRegistersForVT[i] = 1;
1311 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1312 IsLegalWiderType = true;
1316 if (IsLegalWiderType)
1319 // Only widen to the next power of 2 to keep consistency with EVT.
1320 MVT NVT = VT.getPow2VectorType();
1321 if (isTypeLegal(NVT)) {
1322 TransformToType[i] = NVT;
1323 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1324 RegisterTypeForVT[i] = NVT;
1325 NumRegistersForVT[i] = 1;
1331 case TypeSplitVector:
1332 case TypeScalarizeVector: {
1335 unsigned NumIntermediates;
1336 unsigned NumRegisters = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1337 NumIntermediates, RegisterVT, this);
1338 NumRegistersForVT[i] = NumRegisters;
1339 assert(NumRegistersForVT[i] == NumRegisters &&
1340 "NumRegistersForVT size cannot represent NumRegisters!");
1341 RegisterTypeForVT[i] = RegisterVT;
1343 MVT NVT = VT.getPow2VectorType();
1345 // Type is already a power of 2. The default action is to split.
1346 TransformToType[i] = MVT::Other;
1347 if (PreferredAction == TypeScalarizeVector)
1348 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
1349 else if (PreferredAction == TypeSplitVector)
1350 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1351 else if (EC.Min > 1)
1352 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1354 ValueTypeActions.setTypeAction(VT, EC.Scalable
1355 ? TypeScalarizeScalableVector
1356 : TypeScalarizeVector);
1358 TransformToType[i] = NVT;
1359 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1364 llvm_unreachable("Unknown vector legalization action!");
1368 // Determine the 'representative' register class for each value type.
1369 // An representative register class is the largest (meaning one which is
1370 // not a sub-register class / subreg register class) legal register class for
1371 // a group of value types. For example, on i386, i8, i16, and i32
1372 // representative would be GR32; while on x86_64 it's GR64.
1373 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1374 const TargetRegisterClass* RRC;
1376 std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
1377 RepRegClassForVT[i] = RRC;
1378 RepRegClassCostForVT[i] = Cost;
1382 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1384 assert(!VT.isVector() && "No default SetCC type for vectors!");
1385 return getPointerTy(DL).SimpleTy;
1388 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const {
1389 return MVT::i32; // return the default value
1392 /// getVectorTypeBreakdown - Vector types are broken down into some number of
1393 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
1394 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1395 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1397 /// This method returns the number of registers needed, and the VT for each
1398 /// register. It also returns the VT and quantity of the intermediate values
1399 /// before they are promoted/expanded.
1400 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
1401 EVT &IntermediateVT,
1402 unsigned &NumIntermediates,
1403 MVT &RegisterVT) const {
1404 ElementCount EltCnt = VT.getVectorElementCount();
1406 // If there is a wider vector type with the same element type as this one,
1407 // or a promoted vector type that has the same number of elements which
1408 // are wider, then we should convert to that legal vector type.
1409 // This handles things like <2 x float> -> <4 x float> and
1410 // <4 x i1> -> <4 x i32>.
1411 LegalizeTypeAction TA = getTypeAction(Context, VT);
1412 if (EltCnt.Min != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1413 EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1414 if (isTypeLegal(RegisterEVT)) {
1415 IntermediateVT = RegisterEVT;
1416 RegisterVT = RegisterEVT.getSimpleVT();
1417 NumIntermediates = 1;
1422 // Figure out the right, legal destination reg to copy into.
1423 EVT EltTy = VT.getVectorElementType();
1425 unsigned NumVectorRegs = 1;
1427 // Scalable vectors cannot be scalarized, so handle the legalisation of the
1428 // types like done elsewhere in SelectionDAG.
1429 if (VT.isScalableVector() && !isPowerOf2_32(EltCnt.Min)) {
1433 // Iterate until we've found a legal (part) type to hold VT.
1434 LK = getTypeConversion(Context, PartVT);
1436 } while (LK.first != TypeLegal);
1439 VT.getVectorElementCount().Min / PartVT.getVectorElementCount().Min;
1441 // FIXME: This code needs to be extended to handle more complex vector
1442 // breakdowns, like nxv7i64 -> nxv8i64 -> 4 x nxv2i64. Currently the only
1443 // supported cases are vectors that are broken down into equal parts
1444 // such as nxv6i64 -> 3 x nxv2i64.
1445 assert(NumIntermediates * PartVT.getVectorElementCount().Min ==
1446 VT.getVectorElementCount().Min &&
1447 "Expected an integer multiple of PartVT");
1448 IntermediateVT = PartVT;
1449 RegisterVT = getRegisterType(Context, IntermediateVT);
1450 return NumIntermediates;
1453 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally
1454 // we could break down into LHS/RHS like LegalizeDAG does.
1455 if (!isPowerOf2_32(EltCnt.Min)) {
1456 NumVectorRegs = EltCnt.Min;
1460 // Divide the input until we get to a supported size. This will always
1461 // end with a scalar if the target doesn't support vectors.
1462 while (EltCnt.Min > 1 &&
1463 !isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) {
1465 NumVectorRegs <<= 1;
1468 NumIntermediates = NumVectorRegs;
1470 EVT NewVT = EVT::getVectorVT(Context, EltTy, EltCnt);
1471 if (!isTypeLegal(NewVT))
1473 IntermediateVT = NewVT;
1475 MVT DestVT = getRegisterType(Context, NewVT);
1476 RegisterVT = DestVT;
1478 if (EVT(DestVT).bitsLT(NewVT)) { // Value is expanded, e.g. i64 -> i16.
1479 TypeSize NewVTSize = NewVT.getSizeInBits();
1480 // Convert sizes such as i33 to i64.
1481 if (!isPowerOf2_32(NewVTSize.getKnownMinSize()))
1482 NewVTSize = NewVTSize.NextPowerOf2();
1483 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1486 // Otherwise, promotion or legal types use the same number of registers as
1487 // the vector decimated to the appropriate level.
1488 return NumVectorRegs;
1491 bool TargetLoweringBase::isSuitableForJumpTable(const SwitchInst *SI,
1494 ProfileSummaryInfo *PSI,
1495 BlockFrequencyInfo *BFI) const {
1496 // FIXME: This function check the maximum table size and density, but the
1497 // minimum size is not checked. It would be nice if the minimum size is
1498 // also combined within this function. Currently, the minimum size check is
1499 // performed in findJumpTable() in SelectionDAGBuiler and
1500 // getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
1501 const bool OptForSize =
1502 SI->getParent()->getParent()->hasOptSize() ||
1503 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI);
1504 const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize);
1505 const unsigned MaxJumpTableSize = getMaximumJumpTableSize();
1507 // Check whether the number of cases is small enough and
1508 // the range is dense enough for a jump table.
1509 return (OptForSize || Range <= MaxJumpTableSize) &&
1510 (NumCases * 100 >= Range * MinDensity);
1513 /// Get the EVTs and ArgFlags collections that represent the legalized return
1514 /// type of the given function. This does not require a DAG or a return value,
1515 /// and is suitable for use before any DAGs for the function are constructed.
1516 /// TODO: Move this out of TargetLowering.cpp.
1517 void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType,
1519 SmallVectorImpl<ISD::OutputArg> &Outs,
1520 const TargetLowering &TLI, const DataLayout &DL) {
1521 SmallVector<EVT, 4> ValueVTs;
1522 ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1523 unsigned NumValues = ValueVTs.size();
1524 if (NumValues == 0) return;
1526 for (unsigned j = 0, f = NumValues; j != f; ++j) {
1527 EVT VT = ValueVTs[j];
1528 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1530 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1531 ExtendKind = ISD::SIGN_EXTEND;
1532 else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1533 ExtendKind = ISD::ZERO_EXTEND;
1535 // FIXME: C calling convention requires the return type to be promoted to
1536 // at least 32-bit. But this is not necessary for non-C calling
1537 // conventions. The frontend should mark functions whose return values
1538 // require promoting with signext or zeroext attributes.
1539 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
1540 MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
1541 if (VT.bitsLT(MinVT))
1546 TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT);
1548 TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT);
1550 // 'inreg' on function refers to return value
1551 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1552 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::InReg))
1555 // Propagate extension type if any
1556 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1558 else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1561 for (unsigned i = 0; i < NumParts; ++i)
1562 Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isfixed=*/true, 0, 0));
1566 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1567 /// function arguments in the caller parameter area. This is the actual
1568 /// alignment, not its logarithm.
1569 unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty,
1570 const DataLayout &DL) const {
1571 return DL.getABITypeAlign(Ty).value();
1574 bool TargetLoweringBase::allowsMemoryAccessForAlignment(
1575 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
1576 Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const {
1577 // Check if the specified alignment is sufficient based on the data layout.
1578 // TODO: While using the data layout works in practice, a better solution
1579 // would be to implement this check directly (make this a virtual function).
1580 // For example, the ABI alignment may change based on software platform while
1581 // this function should only be affected by hardware implementation.
1582 Type *Ty = VT.getTypeForEVT(Context);
1583 if (Alignment >= DL.getABITypeAlign(Ty)) {
1584 // Assume that an access that meets the ABI-specified alignment is fast.
1585 if (Fast != nullptr)
1590 // This is a misaligned access.
1591 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment.value(), Flags,
1595 bool TargetLoweringBase::allowsMemoryAccessForAlignment(
1596 LLVMContext &Context, const DataLayout &DL, EVT VT,
1597 const MachineMemOperand &MMO, bool *Fast) const {
1598 return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(),
1599 MMO.getAlign(), MMO.getFlags(), Fast);
1602 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1603 const DataLayout &DL, EVT VT,
1604 unsigned AddrSpace, Align Alignment,
1605 MachineMemOperand::Flags Flags,
1607 return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment,
1611 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1612 const DataLayout &DL, EVT VT,
1613 const MachineMemOperand &MMO,
1615 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(),
1616 MMO.getFlags(), Fast);
1619 BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const {
1620 return BranchProbability(MinPercentageForPredictableBranch, 100);
1623 //===----------------------------------------------------------------------===//
1624 // TargetTransformInfo Helpers
1625 //===----------------------------------------------------------------------===//
1627 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
1628 enum InstructionOpcodes {
1629 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1630 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1631 #include "llvm/IR/Instruction.def"
1633 switch (static_cast<InstructionOpcodes>(Opcode)) {
1636 case Switch: return 0;
1637 case IndirectBr: return 0;
1638 case Invoke: return 0;
1639 case CallBr: return 0;
1640 case Resume: return 0;
1641 case Unreachable: return 0;
1642 case CleanupRet: return 0;
1643 case CatchRet: return 0;
1644 case CatchPad: return 0;
1645 case CatchSwitch: return 0;
1646 case CleanupPad: return 0;
1647 case FNeg: return ISD::FNEG;
1648 case Add: return ISD::ADD;
1649 case FAdd: return ISD::FADD;
1650 case Sub: return ISD::SUB;
1651 case FSub: return ISD::FSUB;
1652 case Mul: return ISD::MUL;
1653 case FMul: return ISD::FMUL;
1654 case UDiv: return ISD::UDIV;
1655 case SDiv: return ISD::SDIV;
1656 case FDiv: return ISD::FDIV;
1657 case URem: return ISD::UREM;
1658 case SRem: return ISD::SREM;
1659 case FRem: return ISD::FREM;
1660 case Shl: return ISD::SHL;
1661 case LShr: return ISD::SRL;
1662 case AShr: return ISD::SRA;
1663 case And: return ISD::AND;
1664 case Or: return ISD::OR;
1665 case Xor: return ISD::XOR;
1666 case Alloca: return 0;
1667 case Load: return ISD::LOAD;
1668 case Store: return ISD::STORE;
1669 case GetElementPtr: return 0;
1670 case Fence: return 0;
1671 case AtomicCmpXchg: return 0;
1672 case AtomicRMW: return 0;
1673 case Trunc: return ISD::TRUNCATE;
1674 case ZExt: return ISD::ZERO_EXTEND;
1675 case SExt: return ISD::SIGN_EXTEND;
1676 case FPToUI: return ISD::FP_TO_UINT;
1677 case FPToSI: return ISD::FP_TO_SINT;
1678 case UIToFP: return ISD::UINT_TO_FP;
1679 case SIToFP: return ISD::SINT_TO_FP;
1680 case FPTrunc: return ISD::FP_ROUND;
1681 case FPExt: return ISD::FP_EXTEND;
1682 case PtrToInt: return ISD::BITCAST;
1683 case IntToPtr: return ISD::BITCAST;
1684 case BitCast: return ISD::BITCAST;
1685 case AddrSpaceCast: return ISD::ADDRSPACECAST;
1686 case ICmp: return ISD::SETCC;
1687 case FCmp: return ISD::SETCC;
1689 case Call: return 0;
1690 case Select: return ISD::SELECT;
1691 case UserOp1: return 0;
1692 case UserOp2: return 0;
1693 case VAArg: return 0;
1694 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
1695 case InsertElement: return ISD::INSERT_VECTOR_ELT;
1696 case ShuffleVector: return ISD::VECTOR_SHUFFLE;
1697 case ExtractValue: return ISD::MERGE_VALUES;
1698 case InsertValue: return ISD::MERGE_VALUES;
1699 case LandingPad: return 0;
1700 case Freeze: return ISD::FREEZE;
1703 llvm_unreachable("Unknown instruction type encountered!");
1707 TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL,
1709 LLVMContext &C = Ty->getContext();
1710 EVT MTy = getValueType(DL, Ty);
1713 // We keep legalizing the type until we find a legal kind. We assume that
1714 // the only operation that costs anything is the split. After splitting
1715 // we need to handle two types.
1717 LegalizeKind LK = getTypeConversion(C, MTy);
1719 if (LK.first == TypeLegal)
1720 return std::make_pair(Cost, MTy.getSimpleVT());
1722 if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
1725 // Do not loop with f128 type.
1726 if (MTy == LK.second)
1727 return std::make_pair(Cost, MTy.getSimpleVT());
1729 // Keep legalizing the type.
1734 Value *TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
1735 bool UseTLS) const {
1736 // compiler-rt provides a variable with a magic name. Targets that do not
1737 // link with compiler-rt may also provide such a variable.
1738 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1739 const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
1740 auto UnsafeStackPtr =
1741 dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
1743 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1745 if (!UnsafeStackPtr) {
1746 auto TLSModel = UseTLS ?
1747 GlobalValue::InitialExecTLSModel :
1748 GlobalValue::NotThreadLocal;
1749 // The global variable is not defined yet, define it ourselves.
1750 // We use the initial-exec TLS model because we do not support the
1751 // variable living anywhere other than in the main executable.
1752 UnsafeStackPtr = new GlobalVariable(
1753 *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
1754 UnsafeStackPtrVar, nullptr, TLSModel);
1756 // The variable exists, check its type and attributes.
1757 if (UnsafeStackPtr->getValueType() != StackPtrTy)
1758 report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
1759 if (UseTLS != UnsafeStackPtr->isThreadLocal())
1760 report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
1761 (UseTLS ? "" : "not ") + "be thread-local");
1763 return UnsafeStackPtr;
1766 Value *TargetLoweringBase::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
1767 if (!TM.getTargetTriple().isAndroid())
1768 return getDefaultSafeStackPointerLocation(IRB, true);
1770 // Android provides a libc function to retrieve the address of the current
1771 // thread's unsafe stack pointer.
1772 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1773 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1774 FunctionCallee Fn = M->getOrInsertFunction("__safestack_pointer_address",
1775 StackPtrTy->getPointerTo(0));
1776 return IRB.CreateCall(Fn);
1779 //===----------------------------------------------------------------------===//
1780 // Loop Strength Reduction hooks
1781 //===----------------------------------------------------------------------===//
1783 /// isLegalAddressingMode - Return true if the addressing mode represented
1784 /// by AM is legal for this target, for a load/store of the specified type.
1785 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
1786 const AddrMode &AM, Type *Ty,
1787 unsigned AS, Instruction *I) const {
1788 // The default implementation of this implements a conservative RISCy, r+r and
1791 // Allows a sign-extended 16-bit immediate field.
1792 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
1795 // No global is ever allowed as a base.
1799 // Only support r+r,
1801 case 0: // "r+i" or just "i", depending on HasBaseReg.
1804 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
1806 // Otherwise we have r+r or r+i.
1809 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
1811 // Allow 2*r as r+r.
1813 default: // Don't allow n * r
1820 //===----------------------------------------------------------------------===//
1822 //===----------------------------------------------------------------------===//
1824 // For OpenBSD return its special guard variable. Otherwise return nullptr,
1825 // so that SelectionDAG handle SSP.
1826 Value *TargetLoweringBase::getIRStackGuard(IRBuilder<> &IRB) const {
1827 if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
1828 Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
1829 PointerType *PtrTy = Type::getInt8PtrTy(M.getContext());
1830 Constant *C = M.getOrInsertGlobal("__guard_local", PtrTy);
1831 if (GlobalVariable *G = dyn_cast_or_null<GlobalVariable>(C))
1832 G->setVisibility(GlobalValue::HiddenVisibility);
1838 // Currently only support "standard" __stack_chk_guard.
1839 // TODO: add LOAD_STACK_GUARD support.
1840 void TargetLoweringBase::insertSSPDeclarations(Module &M) const {
1841 if (!M.getNamedValue("__stack_chk_guard"))
1842 new GlobalVariable(M, Type::getInt8PtrTy(M.getContext()), false,
1843 GlobalVariable::ExternalLinkage,
1844 nullptr, "__stack_chk_guard");
1847 // Currently only support "standard" __stack_chk_guard.
1848 // TODO: add LOAD_STACK_GUARD support.
1849 Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const {
1850 return M.getNamedValue("__stack_chk_guard");
1853 Function *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const {
1857 unsigned TargetLoweringBase::getMinimumJumpTableEntries() const {
1858 return MinimumJumpTableEntries;
1861 void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) {
1862 MinimumJumpTableEntries = Val;
1865 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
1866 return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
1869 unsigned TargetLoweringBase::getMaximumJumpTableSize() const {
1870 return MaximumJumpTableSize;
1873 void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) {
1874 MaximumJumpTableSize = Val;
1877 bool TargetLoweringBase::isJumpTableRelative() const {
1878 return getTargetMachine().isPositionIndependent();
1881 //===----------------------------------------------------------------------===//
1882 // Reciprocal Estimates
1883 //===----------------------------------------------------------------------===//
1885 /// Get the reciprocal estimate attribute string for a function that will
1886 /// override the target defaults.
1887 static StringRef getRecipEstimateForFunc(MachineFunction &MF) {
1888 const Function &F = MF.getFunction();
1889 return F.getFnAttribute("reciprocal-estimates").getValueAsString();
1892 /// Construct a string for the given reciprocal operation of the given type.
1893 /// This string should match the corresponding option to the front-end's
1894 /// "-mrecip" flag assuming those strings have been passed through in an
1895 /// attribute string. For example, "vec-divf" for a division of a vXf32.
1896 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
1897 std::string Name = VT.isVector() ? "vec-" : "";
1899 Name += IsSqrt ? "sqrt" : "div";
1901 // TODO: Handle "half" or other float types?
1902 if (VT.getScalarType() == MVT::f64) {
1905 assert(VT.getScalarType() == MVT::f32 &&
1906 "Unexpected FP type for reciprocal estimate");
1913 /// Return the character position and value (a single numeric character) of a
1914 /// customized refinement operation in the input string if it exists. Return
1915 /// false if there is no customized refinement step count.
1916 static bool parseRefinementStep(StringRef In, size_t &Position,
1918 const char RefStepToken = ':';
1919 Position = In.find(RefStepToken);
1920 if (Position == StringRef::npos)
1923 StringRef RefStepString = In.substr(Position + 1);
1924 // Allow exactly one numeric character for the additional refinement
1926 if (RefStepString.size() == 1) {
1927 char RefStepChar = RefStepString[0];
1928 if (RefStepChar >= '0' && RefStepChar <= '9') {
1929 Value = RefStepChar - '0';
1933 report_fatal_error("Invalid refinement step for -recip.");
1936 /// For the input attribute string, return one of the ReciprocalEstimate enum
1937 /// status values (enabled, disabled, or not specified) for this operation on
1938 /// the specified data type.
1939 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
1940 if (Override.empty())
1941 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1943 SmallVector<StringRef, 4> OverrideVector;
1944 Override.split(OverrideVector, ',');
1945 unsigned NumArgs = OverrideVector.size();
1947 // Check if "all", "none", or "default" was specified.
1949 // Look for an optional setting of the number of refinement steps needed
1950 // for this type of reciprocal operation.
1953 if (parseRefinementStep(Override, RefPos, RefSteps)) {
1954 // Split the string for further processing.
1955 Override = Override.substr(0, RefPos);
1958 // All reciprocal types are enabled.
1959 if (Override == "all")
1960 return TargetLoweringBase::ReciprocalEstimate::Enabled;
1962 // All reciprocal types are disabled.
1963 if (Override == "none")
1964 return TargetLoweringBase::ReciprocalEstimate::Disabled;
1966 // Target defaults for enablement are used.
1967 if (Override == "default")
1968 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1971 // The attribute string may omit the size suffix ('f'/'d').
1972 std::string VTName = getReciprocalOpName(IsSqrt, VT);
1973 std::string VTNameNoSize = VTName;
1974 VTNameNoSize.pop_back();
1975 static const char DisabledPrefix = '!';
1977 for (StringRef RecipType : OverrideVector) {
1980 if (parseRefinementStep(RecipType, RefPos, RefSteps))
1981 RecipType = RecipType.substr(0, RefPos);
1983 // Ignore the disablement token for string matching.
1984 bool IsDisabled = RecipType[0] == DisabledPrefix;
1986 RecipType = RecipType.substr(1);
1988 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
1989 return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled
1990 : TargetLoweringBase::ReciprocalEstimate::Enabled;
1993 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1996 /// For the input attribute string, return the customized refinement step count
1997 /// for this operation on the specified data type. If the step count does not
1998 /// exist, return the ReciprocalEstimate enum value for unspecified.
1999 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
2000 if (Override.empty())
2001 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2003 SmallVector<StringRef, 4> OverrideVector;
2004 Override.split(OverrideVector, ',');
2005 unsigned NumArgs = OverrideVector.size();
2007 // Check if "all", "default", or "none" was specified.
2009 // Look for an optional setting of the number of refinement steps needed
2010 // for this type of reciprocal operation.
2013 if (!parseRefinementStep(Override, RefPos, RefSteps))
2014 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2016 // Split the string for further processing.
2017 Override = Override.substr(0, RefPos);
2018 assert(Override != "none" &&
2019 "Disabled reciprocals, but specifed refinement steps?");
2021 // If this is a general override, return the specified number of steps.
2022 if (Override == "all" || Override == "default")
2026 // The attribute string may omit the size suffix ('f'/'d').
2027 std::string VTName = getReciprocalOpName(IsSqrt, VT);
2028 std::string VTNameNoSize = VTName;
2029 VTNameNoSize.pop_back();
2031 for (StringRef RecipType : OverrideVector) {
2034 if (!parseRefinementStep(RecipType, RefPos, RefSteps))
2037 RecipType = RecipType.substr(0, RefPos);
2038 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
2042 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2045 int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT,
2046 MachineFunction &MF) const {
2047 return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
2050 int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT,
2051 MachineFunction &MF) const {
2052 return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
2055 int TargetLoweringBase::getSqrtRefinementSteps(EVT VT,
2056 MachineFunction &MF) const {
2057 return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
2060 int TargetLoweringBase::getDivRefinementSteps(EVT VT,
2061 MachineFunction &MF) const {
2062 return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
2065 void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const {
2066 MF.getRegInfo().freezeReservedRegs(MF);
2069 MachineMemOperand::Flags
2070 TargetLoweringBase::getLoadMemOperandFlags(const LoadInst &LI,
2071 const DataLayout &DL) const {
2072 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad;
2073 if (LI.isVolatile())
2074 Flags |= MachineMemOperand::MOVolatile;
2076 if (LI.hasMetadata(LLVMContext::MD_nontemporal))
2077 Flags |= MachineMemOperand::MONonTemporal;
2079 if (LI.hasMetadata(LLVMContext::MD_invariant_load))
2080 Flags |= MachineMemOperand::MOInvariant;
2082 if (isDereferenceablePointer(LI.getPointerOperand(), LI.getType(), DL))
2083 Flags |= MachineMemOperand::MODereferenceable;
2085 Flags |= getTargetMMOFlags(LI);
2089 MachineMemOperand::Flags
2090 TargetLoweringBase::getStoreMemOperandFlags(const StoreInst &SI,
2091 const DataLayout &DL) const {
2092 MachineMemOperand::Flags Flags = MachineMemOperand::MOStore;
2094 if (SI.isVolatile())
2095 Flags |= MachineMemOperand::MOVolatile;
2097 if (SI.hasMetadata(LLVMContext::MD_nontemporal))
2098 Flags |= MachineMemOperand::MONonTemporal;
2100 // FIXME: Not preserving dereferenceable
2101 Flags |= getTargetMMOFlags(SI);
2105 MachineMemOperand::Flags
2106 TargetLoweringBase::getAtomicMemOperandFlags(const Instruction &AI,
2107 const DataLayout &DL) const {
2108 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
2110 if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&AI)) {
2111 if (RMW->isVolatile())
2112 Flags |= MachineMemOperand::MOVolatile;
2113 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(&AI)) {
2114 if (CmpX->isVolatile())
2115 Flags |= MachineMemOperand::MOVolatile;
2117 llvm_unreachable("not an atomic instruction");
2119 // FIXME: Not preserving dereferenceable
2120 Flags |= getTargetMMOFlags(AI);
2124 //===----------------------------------------------------------------------===//
2126 //===----------------------------------------------------------------------===//
2128 bool TargetLoweringBase::shouldLocalize(const MachineInstr &MI,
2129 const TargetTransformInfo *TTI) const {
2130 auto &MF = *MI.getMF();
2131 auto &MRI = MF.getRegInfo();
2132 // Assuming a spill and reload of a value has a cost of 1 instruction each,
2133 // this helper function computes the maximum number of uses we should consider
2134 // for remat. E.g. on arm64 global addresses take 2 insts to materialize. We
2135 // break even in terms of code size when the original MI has 2 users vs
2136 // choosing to potentially spill. Any more than 2 users we we have a net code
2137 // size increase. This doesn't take into account register pressure though.
2138 auto maxUses = [](unsigned RematCost) {
2139 // A cost of 1 means remats are basically free.
2145 // Remat is too expensive, only sink if there's one user.
2148 llvm_unreachable("Unexpected remat cost");
2151 // Helper to walk through uses and terminate if we've reached a limit. Saves
2152 // us spending time traversing uses if all we want to know is if it's >= min.
2153 auto isUsesAtMost = [&](unsigned Reg, unsigned MaxUses) {
2154 unsigned NumUses = 0;
2155 auto UI = MRI.use_instr_nodbg_begin(Reg), UE = MRI.use_instr_nodbg_end();
2156 for (; UI != UE && NumUses < MaxUses; ++UI) {
2159 // If we haven't reached the end yet then there are more than MaxUses users.
2163 switch (MI.getOpcode()) {
2166 // Constants-like instructions should be close to their users.
2167 // We don't want long live-ranges for them.
2168 case TargetOpcode::G_CONSTANT:
2169 case TargetOpcode::G_FCONSTANT:
2170 case TargetOpcode::G_FRAME_INDEX:
2171 case TargetOpcode::G_INTTOPTR:
2173 case TargetOpcode::G_GLOBAL_VALUE: {
2174 unsigned RematCost = TTI->getGISelRematGlobalCost();
2175 Register Reg = MI.getOperand(0).getReg();
2176 unsigned MaxUses = maxUses(RematCost);
2177 if (MaxUses == UINT_MAX)
2178 return true; // Remats are "free" so always localize.
2179 bool B = isUsesAtMost(Reg, MaxUses);