1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the TargetLoweringBase class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/ADT/BitVector.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringExtras.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/ADT/Triple.h"
20 #include "llvm/ADT/Twine.h"
21 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/CodeGen/ISDOpcodes.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/MachineValueType.h"
32 #include "llvm/CodeGen/RuntimeLibcalls.h"
33 #include "llvm/CodeGen/StackMaps.h"
34 #include "llvm/CodeGen/TargetLowering.h"
35 #include "llvm/CodeGen/TargetOpcodes.h"
36 #include "llvm/CodeGen/TargetRegisterInfo.h"
37 #include "llvm/CodeGen/ValueTypes.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/GlobalValue.h"
44 #include "llvm/IR/GlobalVariable.h"
45 #include "llvm/IR/IRBuilder.h"
46 #include "llvm/IR/Module.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/Support/BranchProbability.h"
49 #include "llvm/Support/Casting.h"
50 #include "llvm/Support/CommandLine.h"
51 #include "llvm/Support/Compiler.h"
52 #include "llvm/Support/ErrorHandling.h"
53 #include "llvm/Support/MathExtras.h"
54 #include "llvm/Target/TargetMachine.h"
67 static cl::opt<bool> JumpIsExpensiveOverride(
68 "jump-is-expensive", cl::init(false),
69 cl::desc("Do not create extra branches to split comparison logic."),
72 static cl::opt<unsigned> MinimumJumpTableEntries
73 ("min-jump-table-entries", cl::init(4), cl::Hidden,
74 cl::desc("Set minimum number of entries to use a jump table."));
76 static cl::opt<unsigned> MaximumJumpTableSize
77 ("max-jump-table-size", cl::init(0), cl::Hidden,
78 cl::desc("Set maximum size of jump tables; zero for no limit."));
80 /// Minimum jump table density for normal functions.
81 static cl::opt<unsigned>
82 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
83 cl::desc("Minimum density for building a jump table in "
84 "a normal function"));
86 /// Minimum jump table density for -Os or -Oz functions.
87 static cl::opt<unsigned> OptsizeJumpTableDensity(
88 "optsize-jump-table-density", cl::init(40), cl::Hidden,
89 cl::desc("Minimum density for building a jump table in "
90 "an optsize function"));
92 // Although this default value is arbitrary, it is not random. It is assumed
93 // that a condition that evaluates the same way by a higher percentage than this
94 // is best represented as control flow. Therefore, the default value N should be
95 // set such that the win from N% correct executions is greater than the loss
96 // from (100 - N)% mispredicted executions for the majority of intended targets.
97 static cl::opt<int> MinPercentageForPredictableBranch(
98 "min-predictable-branch", cl::init(99),
99 cl::desc("Minimum percentage (0-100) that a condition must be either true "
100 "or false to assume that the condition is predictable"),
103 /// InitLibcallNames - Set default libcall names.
104 static void InitLibcallNames(const char **Names, const Triple &TT) {
105 #define HANDLE_LIBCALL(code, name) \
106 Names[RTLIB::code] = name;
107 #include "llvm/CodeGen/RuntimeLibcalls.def"
108 #undef HANDLE_LIBCALL
110 // A few names are different on particular architectures or environments.
111 if (TT.isOSDarwin()) {
112 // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
113 // of the gnueabi-style __gnu_*_ieee.
114 // FIXME: What about other targets?
115 Names[RTLIB::FPEXT_F16_F32] = "__extendhfsf2";
116 Names[RTLIB::FPROUND_F32_F16] = "__truncsfhf2";
118 Names[RTLIB::FPEXT_F16_F32] = "__gnu_h2f_ieee";
119 Names[RTLIB::FPROUND_F32_F16] = "__gnu_f2h_ieee";
122 if (TT.isGNUEnvironment() || TT.isOSFuchsia()) {
123 Names[RTLIB::SINCOS_F32] = "sincosf";
124 Names[RTLIB::SINCOS_F64] = "sincos";
125 Names[RTLIB::SINCOS_F80] = "sincosl";
126 Names[RTLIB::SINCOS_F128] = "sincosl";
127 Names[RTLIB::SINCOS_PPCF128] = "sincosl";
130 if (TT.isOSOpenBSD()) {
131 Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = nullptr;
135 /// Set default libcall CallingConvs.
136 static void InitLibcallCallingConvs(CallingConv::ID *CCs) {
137 for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
138 CCs[LC] = CallingConv::C;
141 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
142 /// UNKNOWN_LIBCALL if there is none.
143 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
144 if (OpVT == MVT::f16) {
145 if (RetVT == MVT::f32)
146 return FPEXT_F16_F32;
147 } else if (OpVT == MVT::f32) {
148 if (RetVT == MVT::f64)
149 return FPEXT_F32_F64;
150 if (RetVT == MVT::f128)
151 return FPEXT_F32_F128;
152 if (RetVT == MVT::ppcf128)
153 return FPEXT_F32_PPCF128;
154 } else if (OpVT == MVT::f64) {
155 if (RetVT == MVT::f128)
156 return FPEXT_F64_F128;
157 else if (RetVT == MVT::ppcf128)
158 return FPEXT_F64_PPCF128;
161 return UNKNOWN_LIBCALL;
164 /// getFPROUND - Return the FPROUND_*_* value for the given types, or
165 /// UNKNOWN_LIBCALL if there is none.
166 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
167 if (RetVT == MVT::f16) {
168 if (OpVT == MVT::f32)
169 return FPROUND_F32_F16;
170 if (OpVT == MVT::f64)
171 return FPROUND_F64_F16;
172 if (OpVT == MVT::f80)
173 return FPROUND_F80_F16;
174 if (OpVT == MVT::f128)
175 return FPROUND_F128_F16;
176 if (OpVT == MVT::ppcf128)
177 return FPROUND_PPCF128_F16;
178 } else if (RetVT == MVT::f32) {
179 if (OpVT == MVT::f64)
180 return FPROUND_F64_F32;
181 if (OpVT == MVT::f80)
182 return FPROUND_F80_F32;
183 if (OpVT == MVT::f128)
184 return FPROUND_F128_F32;
185 if (OpVT == MVT::ppcf128)
186 return FPROUND_PPCF128_F32;
187 } else if (RetVT == MVT::f64) {
188 if (OpVT == MVT::f80)
189 return FPROUND_F80_F64;
190 if (OpVT == MVT::f128)
191 return FPROUND_F128_F64;
192 if (OpVT == MVT::ppcf128)
193 return FPROUND_PPCF128_F64;
196 return UNKNOWN_LIBCALL;
199 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
200 /// UNKNOWN_LIBCALL if there is none.
201 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
202 if (OpVT == MVT::f32) {
203 if (RetVT == MVT::i32)
204 return FPTOSINT_F32_I32;
205 if (RetVT == MVT::i64)
206 return FPTOSINT_F32_I64;
207 if (RetVT == MVT::i128)
208 return FPTOSINT_F32_I128;
209 } else if (OpVT == MVT::f64) {
210 if (RetVT == MVT::i32)
211 return FPTOSINT_F64_I32;
212 if (RetVT == MVT::i64)
213 return FPTOSINT_F64_I64;
214 if (RetVT == MVT::i128)
215 return FPTOSINT_F64_I128;
216 } else if (OpVT == MVT::f80) {
217 if (RetVT == MVT::i32)
218 return FPTOSINT_F80_I32;
219 if (RetVT == MVT::i64)
220 return FPTOSINT_F80_I64;
221 if (RetVT == MVT::i128)
222 return FPTOSINT_F80_I128;
223 } else if (OpVT == MVT::f128) {
224 if (RetVT == MVT::i32)
225 return FPTOSINT_F128_I32;
226 if (RetVT == MVT::i64)
227 return FPTOSINT_F128_I64;
228 if (RetVT == MVT::i128)
229 return FPTOSINT_F128_I128;
230 } else if (OpVT == MVT::ppcf128) {
231 if (RetVT == MVT::i32)
232 return FPTOSINT_PPCF128_I32;
233 if (RetVT == MVT::i64)
234 return FPTOSINT_PPCF128_I64;
235 if (RetVT == MVT::i128)
236 return FPTOSINT_PPCF128_I128;
238 return UNKNOWN_LIBCALL;
241 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
242 /// UNKNOWN_LIBCALL if there is none.
243 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
244 if (OpVT == MVT::f32) {
245 if (RetVT == MVT::i32)
246 return FPTOUINT_F32_I32;
247 if (RetVT == MVT::i64)
248 return FPTOUINT_F32_I64;
249 if (RetVT == MVT::i128)
250 return FPTOUINT_F32_I128;
251 } else if (OpVT == MVT::f64) {
252 if (RetVT == MVT::i32)
253 return FPTOUINT_F64_I32;
254 if (RetVT == MVT::i64)
255 return FPTOUINT_F64_I64;
256 if (RetVT == MVT::i128)
257 return FPTOUINT_F64_I128;
258 } else if (OpVT == MVT::f80) {
259 if (RetVT == MVT::i32)
260 return FPTOUINT_F80_I32;
261 if (RetVT == MVT::i64)
262 return FPTOUINT_F80_I64;
263 if (RetVT == MVT::i128)
264 return FPTOUINT_F80_I128;
265 } else if (OpVT == MVT::f128) {
266 if (RetVT == MVT::i32)
267 return FPTOUINT_F128_I32;
268 if (RetVT == MVT::i64)
269 return FPTOUINT_F128_I64;
270 if (RetVT == MVT::i128)
271 return FPTOUINT_F128_I128;
272 } else if (OpVT == MVT::ppcf128) {
273 if (RetVT == MVT::i32)
274 return FPTOUINT_PPCF128_I32;
275 if (RetVT == MVT::i64)
276 return FPTOUINT_PPCF128_I64;
277 if (RetVT == MVT::i128)
278 return FPTOUINT_PPCF128_I128;
280 return UNKNOWN_LIBCALL;
283 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
284 /// UNKNOWN_LIBCALL if there is none.
285 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
286 if (OpVT == MVT::i32) {
287 if (RetVT == MVT::f32)
288 return SINTTOFP_I32_F32;
289 if (RetVT == MVT::f64)
290 return SINTTOFP_I32_F64;
291 if (RetVT == MVT::f80)
292 return SINTTOFP_I32_F80;
293 if (RetVT == MVT::f128)
294 return SINTTOFP_I32_F128;
295 if (RetVT == MVT::ppcf128)
296 return SINTTOFP_I32_PPCF128;
297 } else if (OpVT == MVT::i64) {
298 if (RetVT == MVT::f32)
299 return SINTTOFP_I64_F32;
300 if (RetVT == MVT::f64)
301 return SINTTOFP_I64_F64;
302 if (RetVT == MVT::f80)
303 return SINTTOFP_I64_F80;
304 if (RetVT == MVT::f128)
305 return SINTTOFP_I64_F128;
306 if (RetVT == MVT::ppcf128)
307 return SINTTOFP_I64_PPCF128;
308 } else if (OpVT == MVT::i128) {
309 if (RetVT == MVT::f32)
310 return SINTTOFP_I128_F32;
311 if (RetVT == MVT::f64)
312 return SINTTOFP_I128_F64;
313 if (RetVT == MVT::f80)
314 return SINTTOFP_I128_F80;
315 if (RetVT == MVT::f128)
316 return SINTTOFP_I128_F128;
317 if (RetVT == MVT::ppcf128)
318 return SINTTOFP_I128_PPCF128;
320 return UNKNOWN_LIBCALL;
323 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
324 /// UNKNOWN_LIBCALL if there is none.
325 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
326 if (OpVT == MVT::i32) {
327 if (RetVT == MVT::f32)
328 return UINTTOFP_I32_F32;
329 if (RetVT == MVT::f64)
330 return UINTTOFP_I32_F64;
331 if (RetVT == MVT::f80)
332 return UINTTOFP_I32_F80;
333 if (RetVT == MVT::f128)
334 return UINTTOFP_I32_F128;
335 if (RetVT == MVT::ppcf128)
336 return UINTTOFP_I32_PPCF128;
337 } else if (OpVT == MVT::i64) {
338 if (RetVT == MVT::f32)
339 return UINTTOFP_I64_F32;
340 if (RetVT == MVT::f64)
341 return UINTTOFP_I64_F64;
342 if (RetVT == MVT::f80)
343 return UINTTOFP_I64_F80;
344 if (RetVT == MVT::f128)
345 return UINTTOFP_I64_F128;
346 if (RetVT == MVT::ppcf128)
347 return UINTTOFP_I64_PPCF128;
348 } else if (OpVT == MVT::i128) {
349 if (RetVT == MVT::f32)
350 return UINTTOFP_I128_F32;
351 if (RetVT == MVT::f64)
352 return UINTTOFP_I128_F64;
353 if (RetVT == MVT::f80)
354 return UINTTOFP_I128_F80;
355 if (RetVT == MVT::f128)
356 return UINTTOFP_I128_F128;
357 if (RetVT == MVT::ppcf128)
358 return UINTTOFP_I128_PPCF128;
360 return UNKNOWN_LIBCALL;
363 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
364 #define OP_TO_LIBCALL(Name, Enum) \
366 switch (VT.SimpleTy) { \
368 return UNKNOWN_LIBCALL; \
382 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
383 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
384 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
385 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
386 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
387 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
388 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
389 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
390 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
391 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
392 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
393 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
398 return UNKNOWN_LIBCALL;
401 RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
402 switch (ElementSize) {
404 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
406 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
408 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
410 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
412 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
414 return UNKNOWN_LIBCALL;
418 RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
419 switch (ElementSize) {
421 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
423 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
425 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
427 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
429 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
431 return UNKNOWN_LIBCALL;
435 RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
436 switch (ElementSize) {
438 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
440 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
442 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
444 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
446 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
448 return UNKNOWN_LIBCALL;
452 /// InitCmpLibcallCCs - Set default comparison libcall CC.
453 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
454 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
455 CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
456 CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
457 CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
458 CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ;
459 CCs[RTLIB::UNE_F32] = ISD::SETNE;
460 CCs[RTLIB::UNE_F64] = ISD::SETNE;
461 CCs[RTLIB::UNE_F128] = ISD::SETNE;
462 CCs[RTLIB::UNE_PPCF128] = ISD::SETNE;
463 CCs[RTLIB::OGE_F32] = ISD::SETGE;
464 CCs[RTLIB::OGE_F64] = ISD::SETGE;
465 CCs[RTLIB::OGE_F128] = ISD::SETGE;
466 CCs[RTLIB::OGE_PPCF128] = ISD::SETGE;
467 CCs[RTLIB::OLT_F32] = ISD::SETLT;
468 CCs[RTLIB::OLT_F64] = ISD::SETLT;
469 CCs[RTLIB::OLT_F128] = ISD::SETLT;
470 CCs[RTLIB::OLT_PPCF128] = ISD::SETLT;
471 CCs[RTLIB::OLE_F32] = ISD::SETLE;
472 CCs[RTLIB::OLE_F64] = ISD::SETLE;
473 CCs[RTLIB::OLE_F128] = ISD::SETLE;
474 CCs[RTLIB::OLE_PPCF128] = ISD::SETLE;
475 CCs[RTLIB::OGT_F32] = ISD::SETGT;
476 CCs[RTLIB::OGT_F64] = ISD::SETGT;
477 CCs[RTLIB::OGT_F128] = ISD::SETGT;
478 CCs[RTLIB::OGT_PPCF128] = ISD::SETGT;
479 CCs[RTLIB::UO_F32] = ISD::SETNE;
480 CCs[RTLIB::UO_F64] = ISD::SETNE;
481 CCs[RTLIB::UO_F128] = ISD::SETNE;
482 CCs[RTLIB::UO_PPCF128] = ISD::SETNE;
483 CCs[RTLIB::O_F32] = ISD::SETEQ;
484 CCs[RTLIB::O_F64] = ISD::SETEQ;
485 CCs[RTLIB::O_F128] = ISD::SETEQ;
486 CCs[RTLIB::O_PPCF128] = ISD::SETEQ;
489 /// NOTE: The TargetMachine owns TLOF.
490 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
493 // Perform these initializations only once.
494 MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove =
495 MaxLoadsPerMemcmp = 8;
496 MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize =
497 MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4;
498 UseUnderscoreSetJmp = false;
499 UseUnderscoreLongJmp = false;
500 HasMultipleConditionRegisters = false;
501 HasExtractBitsInsn = false;
502 JumpIsExpensive = JumpIsExpensiveOverride;
503 PredictableSelectIsExpensive = false;
504 EnableExtLdPromotion = false;
505 HasFloatingPointExceptions = true;
506 StackPointerRegisterToSaveRestore = 0;
507 BooleanContents = UndefinedBooleanContent;
508 BooleanFloatContents = UndefinedBooleanContent;
509 BooleanVectorContents = UndefinedBooleanContent;
510 SchedPreferenceInfo = Sched::ILP;
512 JumpBufAlignment = 0;
513 MinFunctionAlignment = 0;
514 PrefFunctionAlignment = 0;
515 PrefLoopAlignment = 0;
516 GatherAllAliasesMaxDepth = 18;
517 MinStackArgumentAlignment = 1;
518 // TODO: the default will be switched to 0 in the next commit, along
519 // with the Target-specific changes necessary.
520 MaxAtomicSizeInBitsSupported = 1024;
522 MinCmpXchgSizeInBits = 0;
523 SupportsUnalignedAtomics = false;
525 std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
527 InitLibcallNames(LibcallRoutineNames, TM.getTargetTriple());
528 InitCmpLibcallCCs(CmpLibcallCCs);
529 InitLibcallCallingConvs(LibcallCallingConvs);
532 void TargetLoweringBase::initActions() {
533 // All operations default to being supported.
534 memset(OpActions, 0, sizeof(OpActions));
535 memset(LoadExtActions, 0, sizeof(LoadExtActions));
536 memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
537 memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
538 memset(CondCodeActions, 0, sizeof(CondCodeActions));
539 std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
540 std::fill(std::begin(TargetDAGCombineArray),
541 std::end(TargetDAGCombineArray), 0);
543 // Set default actions for various operations.
544 for (MVT VT : MVT::all_valuetypes()) {
545 // Default all indexed load / store to expand.
546 for (unsigned IM = (unsigned)ISD::PRE_INC;
547 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
548 setIndexedLoadAction(IM, VT, Expand);
549 setIndexedStoreAction(IM, VT, Expand);
552 // Most backends expect to see the node which just returns the value loaded.
553 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand);
555 // These operations default to expand.
556 setOperationAction(ISD::FGETSIGN, VT, Expand);
557 setOperationAction(ISD::CONCAT_VECTORS, VT, Expand);
558 setOperationAction(ISD::FMINNUM, VT, Expand);
559 setOperationAction(ISD::FMAXNUM, VT, Expand);
560 setOperationAction(ISD::FMINNAN, VT, Expand);
561 setOperationAction(ISD::FMAXNAN, VT, Expand);
562 setOperationAction(ISD::FMAD, VT, Expand);
563 setOperationAction(ISD::SMIN, VT, Expand);
564 setOperationAction(ISD::SMAX, VT, Expand);
565 setOperationAction(ISD::UMIN, VT, Expand);
566 setOperationAction(ISD::UMAX, VT, Expand);
567 setOperationAction(ISD::ABS, VT, Expand);
569 // Overflow operations default to expand
570 setOperationAction(ISD::SADDO, VT, Expand);
571 setOperationAction(ISD::SSUBO, VT, Expand);
572 setOperationAction(ISD::UADDO, VT, Expand);
573 setOperationAction(ISD::USUBO, VT, Expand);
574 setOperationAction(ISD::SMULO, VT, Expand);
575 setOperationAction(ISD::UMULO, VT, Expand);
577 // ADDCARRY operations default to expand
578 setOperationAction(ISD::ADDCARRY, VT, Expand);
579 setOperationAction(ISD::SUBCARRY, VT, Expand);
580 setOperationAction(ISD::SETCCCARRY, VT, Expand);
582 // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
583 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
584 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
586 setOperationAction(ISD::BITREVERSE, VT, Expand);
588 // These library functions default to expand.
589 setOperationAction(ISD::FROUND, VT, Expand);
590 setOperationAction(ISD::FPOWI, VT, Expand);
592 // These operations default to expand for vector types.
594 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
595 setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, VT, Expand);
596 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Expand);
597 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Expand);
600 // For most targets @llvm.get.dynamic.area.offset just returns 0.
601 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand);
604 // Most targets ignore the @llvm.prefetch intrinsic.
605 setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
607 // Most targets also ignore the @llvm.readcyclecounter intrinsic.
608 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand);
610 // ConstantFP nodes default to expand. Targets can either change this to
611 // Legal, in which case all fp constants are legal, or use isFPImmLegal()
612 // to optimize expansions for certain constants.
613 setOperationAction(ISD::ConstantFP, MVT::f16, Expand);
614 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
615 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
616 setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
617 setOperationAction(ISD::ConstantFP, MVT::f128, Expand);
619 // These library functions default to expand.
620 for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
621 setOperationAction(ISD::FLOG , VT, Expand);
622 setOperationAction(ISD::FLOG2, VT, Expand);
623 setOperationAction(ISD::FLOG10, VT, Expand);
624 setOperationAction(ISD::FEXP , VT, Expand);
625 setOperationAction(ISD::FEXP2, VT, Expand);
626 setOperationAction(ISD::FFLOOR, VT, Expand);
627 setOperationAction(ISD::FNEARBYINT, VT, Expand);
628 setOperationAction(ISD::FCEIL, VT, Expand);
629 setOperationAction(ISD::FRINT, VT, Expand);
630 setOperationAction(ISD::FTRUNC, VT, Expand);
631 setOperationAction(ISD::FROUND, VT, Expand);
634 // Default ISD::TRAP to expand (which turns it into abort).
635 setOperationAction(ISD::TRAP, MVT::Other, Expand);
637 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
638 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
639 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
642 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
644 return MVT::getIntegerVT(8 * DL.getPointerSize(0));
647 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy,
648 const DataLayout &DL) const {
649 assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
650 if (LHSTy.isVector())
652 return getScalarShiftAmountTy(DL, LHSTy);
655 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
656 assert(isTypeLegal(VT));
668 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) {
669 // If the command-line option was specified, ignore this request.
670 if (!JumpIsExpensiveOverride.getNumOccurrences())
671 JumpIsExpensive = isExpensive;
674 TargetLoweringBase::LegalizeKind
675 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
676 // If this is a simple type, use the ComputeRegisterProp mechanism.
678 MVT SVT = VT.getSimpleVT();
679 assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
680 MVT NVT = TransformToType[SVT.SimpleTy];
681 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
683 assert((LA == TypeLegal || LA == TypeSoftenFloat ||
684 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger) &&
685 "Promote may not follow Expand or Promote");
687 if (LA == TypeSplitVector)
688 return LegalizeKind(LA,
689 EVT::getVectorVT(Context, SVT.getVectorElementType(),
690 SVT.getVectorNumElements() / 2));
691 if (LA == TypeScalarizeVector)
692 return LegalizeKind(LA, SVT.getVectorElementType());
693 return LegalizeKind(LA, NVT);
696 // Handle Extended Scalar Types.
697 if (!VT.isVector()) {
698 assert(VT.isInteger() && "Float types must be simple");
699 unsigned BitSize = VT.getSizeInBits();
700 // First promote to a power-of-two size, then expand if necessary.
701 if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
702 EVT NVT = VT.getRoundIntegerType(Context);
703 assert(NVT != VT && "Unable to round integer VT");
704 LegalizeKind NextStep = getTypeConversion(Context, NVT);
705 // Avoid multi-step promotion.
706 if (NextStep.first == TypePromoteInteger)
708 // Return rounded integer type.
709 return LegalizeKind(TypePromoteInteger, NVT);
712 return LegalizeKind(TypeExpandInteger,
713 EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
716 // Handle vector types.
717 unsigned NumElts = VT.getVectorNumElements();
718 EVT EltVT = VT.getVectorElementType();
720 // Vectors with only one element are always scalarized.
722 return LegalizeKind(TypeScalarizeVector, EltVT);
724 // Try to widen vector elements until the element type is a power of two and
725 // promote it to a legal type later on, for example:
726 // <3 x i8> -> <4 x i8> -> <4 x i32>
727 if (EltVT.isInteger()) {
728 // Vectors with a number of elements that is not a power of two are always
729 // widened, for example <3 x i8> -> <4 x i8>.
730 if (!VT.isPow2VectorType()) {
731 NumElts = (unsigned)NextPowerOf2(NumElts);
732 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
733 return LegalizeKind(TypeWidenVector, NVT);
736 // Examine the element type.
737 LegalizeKind LK = getTypeConversion(Context, EltVT);
739 // If type is to be expanded, split the vector.
740 // <4 x i140> -> <2 x i140>
741 if (LK.first == TypeExpandInteger)
742 return LegalizeKind(TypeSplitVector,
743 EVT::getVectorVT(Context, EltVT, NumElts / 2));
745 // Promote the integer element types until a legal vector type is found
746 // or until the element integer type is too big. If a legal type was not
747 // found, fallback to the usual mechanism of widening/splitting the
749 EVT OldEltVT = EltVT;
751 // Increase the bitwidth of the element to the next pow-of-two
752 // (which is greater than 8 bits).
753 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
754 .getRoundIntegerType(Context);
756 // Stop trying when getting a non-simple element type.
757 // Note that vector elements may be greater than legal vector element
758 // types. Example: X86 XMM registers hold 64bit element on 32bit
760 if (!EltVT.isSimple())
763 // Build a new vector type and check if it is legal.
764 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
765 // Found a legal promoted vector type.
766 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
767 return LegalizeKind(TypePromoteInteger,
768 EVT::getVectorVT(Context, EltVT, NumElts));
771 // Reset the type to the unexpanded type if we did not find a legal vector
772 // type with a promoted vector element type.
776 // Try to widen the vector until a legal type is found.
777 // If there is no wider legal type, split the vector.
779 // Round up to the next power of 2.
780 NumElts = (unsigned)NextPowerOf2(NumElts);
782 // If there is no simple vector type with this many elements then there
783 // cannot be a larger legal vector type. Note that this assumes that
784 // there are no skipped intermediate vector types in the simple types.
785 if (!EltVT.isSimple())
787 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
788 if (LargerVector == MVT())
791 // If this type is legal then widen the vector.
792 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
793 return LegalizeKind(TypeWidenVector, LargerVector);
796 // Widen odd vectors to next power of two.
797 if (!VT.isPow2VectorType()) {
798 EVT NVT = VT.getPow2VectorType(Context);
799 return LegalizeKind(TypeWidenVector, NVT);
802 // Vectors with illegal element types are expanded.
803 EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
804 return LegalizeKind(TypeSplitVector, NVT);
807 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
808 unsigned &NumIntermediates,
810 TargetLoweringBase *TLI) {
811 // Figure out the right, legal destination reg to copy into.
812 unsigned NumElts = VT.getVectorNumElements();
813 MVT EltTy = VT.getVectorElementType();
815 unsigned NumVectorRegs = 1;
817 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
818 // could break down into LHS/RHS like LegalizeDAG does.
819 if (!isPowerOf2_32(NumElts)) {
820 NumVectorRegs = NumElts;
824 // Divide the input until we get to a supported size. This will always
825 // end with a scalar if the target doesn't support vectors.
826 while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
831 NumIntermediates = NumVectorRegs;
833 MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
834 if (!TLI->isTypeLegal(NewVT))
836 IntermediateVT = NewVT;
838 unsigned NewVTSize = NewVT.getSizeInBits();
840 // Convert sizes such as i33 to i64.
841 if (!isPowerOf2_32(NewVTSize))
842 NewVTSize = NextPowerOf2(NewVTSize);
844 MVT DestVT = TLI->getRegisterType(NewVT);
846 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
847 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
849 // Otherwise, promotion or legal types use the same number of registers as
850 // the vector decimated to the appropriate level.
851 return NumVectorRegs;
854 /// isLegalRC - Return true if the value types that can be represented by the
855 /// specified register class are all legal.
856 bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI,
857 const TargetRegisterClass &RC) const {
858 for (auto I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
864 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
865 /// sequence of memory operands that is recognized by PrologEpilogInserter.
867 TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
868 MachineBasicBlock *MBB) const {
869 MachineInstr *MI = &InitialMI;
870 MachineFunction &MF = *MI->getMF();
871 MachineFrameInfo &MFI = MF.getFrameInfo();
873 // We're handling multiple types of operands here:
874 // PATCHPOINT MetaArgs - live-in, read only, direct
875 // STATEPOINT Deopt Spill - live-through, read only, indirect
876 // STATEPOINT Deopt Alloca - live-through, read only, direct
877 // (We're currently conservative and mark the deopt slots read/write in
879 // STATEPOINT GC Spill - live-through, read/write, indirect
880 // STATEPOINT GC Alloca - live-through, read/write, direct
881 // The live-in vs live-through is handled already (the live through ones are
882 // all stack slots), but we need to handle the different type of stackmap
883 // operands and memory effects here.
885 // MI changes inside this loop as we grow operands.
886 for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
887 MachineOperand &MO = MI->getOperand(OperIdx);
891 // foldMemoryOperand builds a new MI after replacing a single FI operand
892 // with the canonical set of five x86 addressing-mode operands.
893 int FI = MO.getIndex();
894 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
896 // Copy operands before the frame-index.
897 for (unsigned i = 0; i < OperIdx; ++i)
898 MIB.add(MI->getOperand(i));
899 // Add frame index operands recognized by stackmaps.cpp
900 if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
901 // indirect-mem-ref tag, size, #FI, offset.
902 // Used for spills inserted by StatepointLowering. This codepath is not
903 // used for patchpoints/stackmaps at all, for these spilling is done via
904 // foldMemoryOperand callback only.
905 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
906 MIB.addImm(StackMaps::IndirectMemRefOp);
907 MIB.addImm(MFI.getObjectSize(FI));
908 MIB.add(MI->getOperand(OperIdx));
911 // direct-mem-ref tag, #FI, offset.
912 // Used by patchpoint, and direct alloca arguments to statepoints
913 MIB.addImm(StackMaps::DirectMemRefOp);
914 MIB.add(MI->getOperand(OperIdx));
917 // Copy the operands after the frame index.
918 for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
919 MIB.add(MI->getOperand(i));
921 // Inherit previous memory operands.
922 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
923 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
925 // Add a new memory operand for this FI.
926 assert(MFI.getObjectOffset(FI) != -1);
928 auto Flags = MachineMemOperand::MOLoad;
929 if (MI->getOpcode() == TargetOpcode::STATEPOINT) {
930 Flags |= MachineMemOperand::MOStore;
931 Flags |= MachineMemOperand::MOVolatile;
933 MachineMemOperand *MMO = MF.getMachineMemOperand(
934 MachinePointerInfo::getFixedStack(MF, FI), Flags,
935 MF.getDataLayout().getPointerSize(), MFI.getObjectAlignment(FI));
936 MIB->addMemOperand(MF, MMO);
938 // Replace the instruction and update the operand index.
939 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
940 OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;
941 MI->eraseFromParent();
947 /// findRepresentativeClass - Return the largest legal super-reg register class
948 /// of the register class for the specified type and its associated "cost".
949 // This function is in TargetLowering because it uses RegClassForVT which would
950 // need to be moved to TargetRegisterInfo and would necessitate moving
951 // isTypeLegal over as well - a massive change that would just require
952 // TargetLowering having a TargetRegisterInfo class member that it would use.
953 std::pair<const TargetRegisterClass *, uint8_t>
954 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI,
956 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
958 return std::make_pair(RC, 0);
960 // Compute the set of all super-register classes.
961 BitVector SuperRegRC(TRI->getNumRegClasses());
962 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
963 SuperRegRC.setBitsInMask(RCI.getMask());
965 // Find the first legal register class with the largest spill size.
966 const TargetRegisterClass *BestRC = RC;
967 for (unsigned i : SuperRegRC.set_bits()) {
968 const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
969 // We want the largest possible spill size.
970 if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
972 if (!isLegalRC(*TRI, *SuperRC))
976 return std::make_pair(BestRC, 1);
979 /// computeRegisterProperties - Once all of the register classes are added,
980 /// this allows us to compute derived properties we expose.
981 void TargetLoweringBase::computeRegisterProperties(
982 const TargetRegisterInfo *TRI) {
983 static_assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE,
984 "Too many value types for ValueTypeActions to hold!");
986 // Everything defaults to needing one register.
987 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
988 NumRegistersForVT[i] = 1;
989 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
991 // ...except isVoid, which doesn't need any registers.
992 NumRegistersForVT[MVT::isVoid] = 0;
994 // Find the largest integer register class.
995 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
996 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
997 assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
999 // Every integer value type larger than this largest register takes twice as
1000 // many registers to represent as the previous ValueType.
1001 for (unsigned ExpandedReg = LargestIntReg + 1;
1002 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1003 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1004 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1005 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1006 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
1010 // Inspect all of the ValueType's smaller than the largest integer
1011 // register to see which ones need promotion.
1012 unsigned LegalIntReg = LargestIntReg;
1013 for (unsigned IntReg = LargestIntReg - 1;
1014 IntReg >= (unsigned)MVT::i1; --IntReg) {
1015 MVT IVT = (MVT::SimpleValueType)IntReg;
1016 if (isTypeLegal(IVT)) {
1017 LegalIntReg = IntReg;
1019 RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1020 (const MVT::SimpleValueType)LegalIntReg;
1021 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
1025 // ppcf128 type is really two f64's.
1026 if (!isTypeLegal(MVT::ppcf128)) {
1027 if (isTypeLegal(MVT::f64)) {
1028 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1029 RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1030 TransformToType[MVT::ppcf128] = MVT::f64;
1031 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
1033 NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
1034 RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
1035 TransformToType[MVT::ppcf128] = MVT::i128;
1036 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat);
1040 // Decide how to handle f128. If the target does not have native f128 support,
1041 // expand it to i128 and we will be generating soft float library calls.
1042 if (!isTypeLegal(MVT::f128)) {
1043 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1044 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1045 TransformToType[MVT::f128] = MVT::i128;
1046 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
1049 // Decide how to handle f64. If the target does not have native f64 support,
1050 // expand it to i64 and we will be generating soft float library calls.
1051 if (!isTypeLegal(MVT::f64)) {
1052 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1053 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1054 TransformToType[MVT::f64] = MVT::i64;
1055 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
1058 // Decide how to handle f32. If the target does not have native f32 support,
1059 // expand it to i32 and we will be generating soft float library calls.
1060 if (!isTypeLegal(MVT::f32)) {
1061 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1062 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1063 TransformToType[MVT::f32] = MVT::i32;
1064 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
1067 // Decide how to handle f16. If the target does not have native f16 support,
1068 // promote it to f32, because there are no f16 library calls (except for
1070 if (!isTypeLegal(MVT::f16)) {
1071 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1072 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1073 TransformToType[MVT::f16] = MVT::f32;
1074 ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
1077 // Loop over all of the vector value types to see which need transformations.
1078 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1079 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1080 MVT VT = (MVT::SimpleValueType) i;
1081 if (isTypeLegal(VT))
1084 MVT EltVT = VT.getVectorElementType();
1085 unsigned NElts = VT.getVectorNumElements();
1086 bool IsLegalWiderType = false;
1087 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1088 switch (PreferredAction) {
1089 case TypePromoteInteger:
1090 // Try to promote the elements of integer vectors. If no legal
1091 // promotion was found, fall through to the widen-vector method.
1092 for (unsigned nVT = i + 1; nVT <= MVT::LAST_INTEGER_VECTOR_VALUETYPE; ++nVT) {
1093 MVT SVT = (MVT::SimpleValueType) nVT;
1094 // Promote vectors of integers to vectors with the same number
1095 // of elements, with a wider element type.
1096 if (SVT.getScalarSizeInBits() > EltVT.getSizeInBits() &&
1097 SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)) {
1098 TransformToType[i] = SVT;
1099 RegisterTypeForVT[i] = SVT;
1100 NumRegistersForVT[i] = 1;
1101 ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
1102 IsLegalWiderType = true;
1106 if (IsLegalWiderType)
1110 case TypeWidenVector:
1111 // Try to widen the vector.
1112 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1113 MVT SVT = (MVT::SimpleValueType) nVT;
1114 if (SVT.getVectorElementType() == EltVT
1115 && SVT.getVectorNumElements() > NElts && isTypeLegal(SVT)) {
1116 TransformToType[i] = SVT;
1117 RegisterTypeForVT[i] = SVT;
1118 NumRegistersForVT[i] = 1;
1119 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1120 IsLegalWiderType = true;
1124 if (IsLegalWiderType)
1128 case TypeSplitVector:
1129 case TypeScalarizeVector: {
1132 unsigned NumIntermediates;
1133 NumRegistersForVT[i] = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1134 NumIntermediates, RegisterVT, this);
1135 RegisterTypeForVT[i] = RegisterVT;
1137 MVT NVT = VT.getPow2VectorType();
1139 // Type is already a power of 2. The default action is to split.
1140 TransformToType[i] = MVT::Other;
1141 if (PreferredAction == TypeScalarizeVector)
1142 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
1143 else if (PreferredAction == TypeSplitVector)
1144 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1146 // Set type action according to the number of elements.
1147 ValueTypeActions.setTypeAction(VT, NElts == 1 ? TypeScalarizeVector
1150 TransformToType[i] = NVT;
1151 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1156 llvm_unreachable("Unknown vector legalization action!");
1160 // Determine the 'representative' register class for each value type.
1161 // An representative register class is the largest (meaning one which is
1162 // not a sub-register class / subreg register class) legal register class for
1163 // a group of value types. For example, on i386, i8, i16, and i32
1164 // representative would be GR32; while on x86_64 it's GR64.
1165 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1166 const TargetRegisterClass* RRC;
1168 std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
1169 RepRegClassForVT[i] = RRC;
1170 RepRegClassCostForVT[i] = Cost;
1174 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1176 assert(!VT.isVector() && "No default SetCC type for vectors!");
1177 return getPointerTy(DL).SimpleTy;
1180 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const {
1181 return MVT::i32; // return the default value
1184 /// getVectorTypeBreakdown - Vector types are broken down into some number of
1185 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
1186 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1187 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1189 /// This method returns the number of registers needed, and the VT for each
1190 /// register. It also returns the VT and quantity of the intermediate values
1191 /// before they are promoted/expanded.
1192 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
1193 EVT &IntermediateVT,
1194 unsigned &NumIntermediates,
1195 MVT &RegisterVT) const {
1196 unsigned NumElts = VT.getVectorNumElements();
1198 // If there is a wider vector type with the same element type as this one,
1199 // or a promoted vector type that has the same number of elements which
1200 // are wider, then we should convert to that legal vector type.
1201 // This handles things like <2 x float> -> <4 x float> and
1202 // <4 x i1> -> <4 x i32>.
1203 LegalizeTypeAction TA = getTypeAction(Context, VT);
1204 if (NumElts != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1205 EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1206 if (isTypeLegal(RegisterEVT)) {
1207 IntermediateVT = RegisterEVT;
1208 RegisterVT = RegisterEVT.getSimpleVT();
1209 NumIntermediates = 1;
1214 // Figure out the right, legal destination reg to copy into.
1215 EVT EltTy = VT.getVectorElementType();
1217 unsigned NumVectorRegs = 1;
1219 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
1220 // could break down into LHS/RHS like LegalizeDAG does.
1221 if (!isPowerOf2_32(NumElts)) {
1222 NumVectorRegs = NumElts;
1226 // Divide the input until we get to a supported size. This will always
1227 // end with a scalar if the target doesn't support vectors.
1228 while (NumElts > 1 && !isTypeLegal(
1229 EVT::getVectorVT(Context, EltTy, NumElts))) {
1231 NumVectorRegs <<= 1;
1234 NumIntermediates = NumVectorRegs;
1236 EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
1237 if (!isTypeLegal(NewVT))
1239 IntermediateVT = NewVT;
1241 MVT DestVT = getRegisterType(Context, NewVT);
1242 RegisterVT = DestVT;
1243 unsigned NewVTSize = NewVT.getSizeInBits();
1245 // Convert sizes such as i33 to i64.
1246 if (!isPowerOf2_32(NewVTSize))
1247 NewVTSize = NextPowerOf2(NewVTSize);
1249 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1250 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1252 // Otherwise, promotion or legal types use the same number of registers as
1253 // the vector decimated to the appropriate level.
1254 return NumVectorRegs;
1257 /// Get the EVTs and ArgFlags collections that represent the legalized return
1258 /// type of the given function. This does not require a DAG or a return value,
1259 /// and is suitable for use before any DAGs for the function are constructed.
1260 /// TODO: Move this out of TargetLowering.cpp.
1261 void llvm::GetReturnInfo(Type *ReturnType, AttributeList attr,
1262 SmallVectorImpl<ISD::OutputArg> &Outs,
1263 const TargetLowering &TLI, const DataLayout &DL) {
1264 SmallVector<EVT, 4> ValueVTs;
1265 ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1266 unsigned NumValues = ValueVTs.size();
1267 if (NumValues == 0) return;
1269 for (unsigned j = 0, f = NumValues; j != f; ++j) {
1270 EVT VT = ValueVTs[j];
1271 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1273 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1274 ExtendKind = ISD::SIGN_EXTEND;
1275 else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1276 ExtendKind = ISD::ZERO_EXTEND;
1278 // FIXME: C calling convention requires the return type to be promoted to
1279 // at least 32-bit. But this is not necessary for non-C calling
1280 // conventions. The frontend should mark functions whose return values
1281 // require promoting with signext or zeroext attributes.
1282 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
1283 MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
1284 if (VT.bitsLT(MinVT))
1289 TLI.getNumRegistersForCallingConv(ReturnType->getContext(), VT);
1291 TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), VT);
1293 // 'inreg' on function refers to return value
1294 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1295 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::InReg))
1298 // Propagate extension type if any
1299 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1301 else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1304 for (unsigned i = 0; i < NumParts; ++i)
1305 Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isFixed=*/true, 0, 0));
1309 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1310 /// function arguments in the caller parameter area. This is the actual
1311 /// alignment, not its logarithm.
1312 unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty,
1313 const DataLayout &DL) const {
1314 return DL.getABITypeAlignment(Ty);
1317 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1318 const DataLayout &DL, EVT VT,
1322 // Check if the specified alignment is sufficient based on the data layout.
1323 // TODO: While using the data layout works in practice, a better solution
1324 // would be to implement this check directly (make this a virtual function).
1325 // For example, the ABI alignment may change based on software platform while
1326 // this function should only be affected by hardware implementation.
1327 Type *Ty = VT.getTypeForEVT(Context);
1328 if (Alignment >= DL.getABITypeAlignment(Ty)) {
1329 // Assume that an access that meets the ABI-specified alignment is fast.
1330 if (Fast != nullptr)
1335 // This is a misaligned access.
1336 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
1339 BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const {
1340 return BranchProbability(MinPercentageForPredictableBranch, 100);
1343 //===----------------------------------------------------------------------===//
1344 // TargetTransformInfo Helpers
1345 //===----------------------------------------------------------------------===//
1347 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
1348 enum InstructionOpcodes {
1349 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1350 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1351 #include "llvm/IR/Instruction.def"
1353 switch (static_cast<InstructionOpcodes>(Opcode)) {
1356 case Switch: return 0;
1357 case IndirectBr: return 0;
1358 case Invoke: return 0;
1359 case Resume: return 0;
1360 case Unreachable: return 0;
1361 case CleanupRet: return 0;
1362 case CatchRet: return 0;
1363 case CatchPad: return 0;
1364 case CatchSwitch: return 0;
1365 case CleanupPad: return 0;
1366 case Add: return ISD::ADD;
1367 case FAdd: return ISD::FADD;
1368 case Sub: return ISD::SUB;
1369 case FSub: return ISD::FSUB;
1370 case Mul: return ISD::MUL;
1371 case FMul: return ISD::FMUL;
1372 case UDiv: return ISD::UDIV;
1373 case SDiv: return ISD::SDIV;
1374 case FDiv: return ISD::FDIV;
1375 case URem: return ISD::UREM;
1376 case SRem: return ISD::SREM;
1377 case FRem: return ISD::FREM;
1378 case Shl: return ISD::SHL;
1379 case LShr: return ISD::SRL;
1380 case AShr: return ISD::SRA;
1381 case And: return ISD::AND;
1382 case Or: return ISD::OR;
1383 case Xor: return ISD::XOR;
1384 case Alloca: return 0;
1385 case Load: return ISD::LOAD;
1386 case Store: return ISD::STORE;
1387 case GetElementPtr: return 0;
1388 case Fence: return 0;
1389 case AtomicCmpXchg: return 0;
1390 case AtomicRMW: return 0;
1391 case Trunc: return ISD::TRUNCATE;
1392 case ZExt: return ISD::ZERO_EXTEND;
1393 case SExt: return ISD::SIGN_EXTEND;
1394 case FPToUI: return ISD::FP_TO_UINT;
1395 case FPToSI: return ISD::FP_TO_SINT;
1396 case UIToFP: return ISD::UINT_TO_FP;
1397 case SIToFP: return ISD::SINT_TO_FP;
1398 case FPTrunc: return ISD::FP_ROUND;
1399 case FPExt: return ISD::FP_EXTEND;
1400 case PtrToInt: return ISD::BITCAST;
1401 case IntToPtr: return ISD::BITCAST;
1402 case BitCast: return ISD::BITCAST;
1403 case AddrSpaceCast: return ISD::ADDRSPACECAST;
1404 case ICmp: return ISD::SETCC;
1405 case FCmp: return ISD::SETCC;
1407 case Call: return 0;
1408 case Select: return ISD::SELECT;
1409 case UserOp1: return 0;
1410 case UserOp2: return 0;
1411 case VAArg: return 0;
1412 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
1413 case InsertElement: return ISD::INSERT_VECTOR_ELT;
1414 case ShuffleVector: return ISD::VECTOR_SHUFFLE;
1415 case ExtractValue: return ISD::MERGE_VALUES;
1416 case InsertValue: return ISD::MERGE_VALUES;
1417 case LandingPad: return 0;
1420 llvm_unreachable("Unknown instruction type encountered!");
1424 TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL,
1426 LLVMContext &C = Ty->getContext();
1427 EVT MTy = getValueType(DL, Ty);
1430 // We keep legalizing the type until we find a legal kind. We assume that
1431 // the only operation that costs anything is the split. After splitting
1432 // we need to handle two types.
1434 LegalizeKind LK = getTypeConversion(C, MTy);
1436 if (LK.first == TypeLegal)
1437 return std::make_pair(Cost, MTy.getSimpleVT());
1439 if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
1442 // Do not loop with f128 type.
1443 if (MTy == LK.second)
1444 return std::make_pair(Cost, MTy.getSimpleVT());
1446 // Keep legalizing the type.
1451 Value *TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
1452 bool UseTLS) const {
1453 // compiler-rt provides a variable with a magic name. Targets that do not
1454 // link with compiler-rt may also provide such a variable.
1455 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1456 const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
1457 auto UnsafeStackPtr =
1458 dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
1460 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1462 if (!UnsafeStackPtr) {
1463 auto TLSModel = UseTLS ?
1464 GlobalValue::InitialExecTLSModel :
1465 GlobalValue::NotThreadLocal;
1466 // The global variable is not defined yet, define it ourselves.
1467 // We use the initial-exec TLS model because we do not support the
1468 // variable living anywhere other than in the main executable.
1469 UnsafeStackPtr = new GlobalVariable(
1470 *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
1471 UnsafeStackPtrVar, nullptr, TLSModel);
1473 // The variable exists, check its type and attributes.
1474 if (UnsafeStackPtr->getValueType() != StackPtrTy)
1475 report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
1476 if (UseTLS != UnsafeStackPtr->isThreadLocal())
1477 report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
1478 (UseTLS ? "" : "not ") + "be thread-local");
1480 return UnsafeStackPtr;
1483 Value *TargetLoweringBase::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
1484 if (!TM.getTargetTriple().isAndroid())
1485 return getDefaultSafeStackPointerLocation(IRB, true);
1487 // Android provides a libc function to retrieve the address of the current
1488 // thread's unsafe stack pointer.
1489 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1490 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1491 Value *Fn = M->getOrInsertFunction("__safestack_pointer_address",
1492 StackPtrTy->getPointerTo(0));
1493 return IRB.CreateCall(Fn);
1496 //===----------------------------------------------------------------------===//
1497 // Loop Strength Reduction hooks
1498 //===----------------------------------------------------------------------===//
1500 /// isLegalAddressingMode - Return true if the addressing mode represented
1501 /// by AM is legal for this target, for a load/store of the specified type.
1502 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
1503 const AddrMode &AM, Type *Ty,
1504 unsigned AS, Instruction *I) const {
1505 // The default implementation of this implements a conservative RISCy, r+r and
1508 // Allows a sign-extended 16-bit immediate field.
1509 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
1512 // No global is ever allowed as a base.
1516 // Only support r+r,
1518 case 0: // "r+i" or just "i", depending on HasBaseReg.
1521 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
1523 // Otherwise we have r+r or r+i.
1526 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
1528 // Allow 2*r as r+r.
1530 default: // Don't allow n * r
1537 //===----------------------------------------------------------------------===//
1539 //===----------------------------------------------------------------------===//
1541 // For OpenBSD return its special guard variable. Otherwise return nullptr,
1542 // so that SelectionDAG handle SSP.
1543 Value *TargetLoweringBase::getIRStackGuard(IRBuilder<> &IRB) const {
1544 if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
1545 Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
1546 PointerType *PtrTy = Type::getInt8PtrTy(M.getContext());
1547 return M.getOrInsertGlobal("__guard_local", PtrTy);
1552 // Currently only support "standard" __stack_chk_guard.
1553 // TODO: add LOAD_STACK_GUARD support.
1554 void TargetLoweringBase::insertSSPDeclarations(Module &M) const {
1555 M.getOrInsertGlobal("__stack_chk_guard", Type::getInt8PtrTy(M.getContext()));
1558 // Currently only support "standard" __stack_chk_guard.
1559 // TODO: add LOAD_STACK_GUARD support.
1560 Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const {
1561 return M.getGlobalVariable("__stack_chk_guard", true);
1564 Value *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const {
1568 unsigned TargetLoweringBase::getMinimumJumpTableEntries() const {
1569 return MinimumJumpTableEntries;
1572 void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) {
1573 MinimumJumpTableEntries = Val;
1576 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
1577 return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
1580 unsigned TargetLoweringBase::getMaximumJumpTableSize() const {
1581 return MaximumJumpTableSize;
1584 void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) {
1585 MaximumJumpTableSize = Val;
1588 //===----------------------------------------------------------------------===//
1589 // Reciprocal Estimates
1590 //===----------------------------------------------------------------------===//
1592 /// Get the reciprocal estimate attribute string for a function that will
1593 /// override the target defaults.
1594 static StringRef getRecipEstimateForFunc(MachineFunction &MF) {
1595 const Function &F = MF.getFunction();
1596 return F.getFnAttribute("reciprocal-estimates").getValueAsString();
1599 /// Construct a string for the given reciprocal operation of the given type.
1600 /// This string should match the corresponding option to the front-end's
1601 /// "-mrecip" flag assuming those strings have been passed through in an
1602 /// attribute string. For example, "vec-divf" for a division of a vXf32.
1603 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
1604 std::string Name = VT.isVector() ? "vec-" : "";
1606 Name += IsSqrt ? "sqrt" : "div";
1608 // TODO: Handle "half" or other float types?
1609 if (VT.getScalarType() == MVT::f64) {
1612 assert(VT.getScalarType() == MVT::f32 &&
1613 "Unexpected FP type for reciprocal estimate");
1620 /// Return the character position and value (a single numeric character) of a
1621 /// customized refinement operation in the input string if it exists. Return
1622 /// false if there is no customized refinement step count.
1623 static bool parseRefinementStep(StringRef In, size_t &Position,
1625 const char RefStepToken = ':';
1626 Position = In.find(RefStepToken);
1627 if (Position == StringRef::npos)
1630 StringRef RefStepString = In.substr(Position + 1);
1631 // Allow exactly one numeric character for the additional refinement
1633 if (RefStepString.size() == 1) {
1634 char RefStepChar = RefStepString[0];
1635 if (RefStepChar >= '0' && RefStepChar <= '9') {
1636 Value = RefStepChar - '0';
1640 report_fatal_error("Invalid refinement step for -recip.");
1643 /// For the input attribute string, return one of the ReciprocalEstimate enum
1644 /// status values (enabled, disabled, or not specified) for this operation on
1645 /// the specified data type.
1646 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
1647 if (Override.empty())
1648 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1650 SmallVector<StringRef, 4> OverrideVector;
1651 SplitString(Override, OverrideVector, ",");
1652 unsigned NumArgs = OverrideVector.size();
1654 // Check if "all", "none", or "default" was specified.
1656 // Look for an optional setting of the number of refinement steps needed
1657 // for this type of reciprocal operation.
1660 if (parseRefinementStep(Override, RefPos, RefSteps)) {
1661 // Split the string for further processing.
1662 Override = Override.substr(0, RefPos);
1665 // All reciprocal types are enabled.
1666 if (Override == "all")
1667 return TargetLoweringBase::ReciprocalEstimate::Enabled;
1669 // All reciprocal types are disabled.
1670 if (Override == "none")
1671 return TargetLoweringBase::ReciprocalEstimate::Disabled;
1673 // Target defaults for enablement are used.
1674 if (Override == "default")
1675 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1678 // The attribute string may omit the size suffix ('f'/'d').
1679 std::string VTName = getReciprocalOpName(IsSqrt, VT);
1680 std::string VTNameNoSize = VTName;
1681 VTNameNoSize.pop_back();
1682 static const char DisabledPrefix = '!';
1684 for (StringRef RecipType : OverrideVector) {
1687 if (parseRefinementStep(RecipType, RefPos, RefSteps))
1688 RecipType = RecipType.substr(0, RefPos);
1690 // Ignore the disablement token for string matching.
1691 bool IsDisabled = RecipType[0] == DisabledPrefix;
1693 RecipType = RecipType.substr(1);
1695 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
1696 return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled
1697 : TargetLoweringBase::ReciprocalEstimate::Enabled;
1700 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1703 /// For the input attribute string, return the customized refinement step count
1704 /// for this operation on the specified data type. If the step count does not
1705 /// exist, return the ReciprocalEstimate enum value for unspecified.
1706 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
1707 if (Override.empty())
1708 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1710 SmallVector<StringRef, 4> OverrideVector;
1711 SplitString(Override, OverrideVector, ",");
1712 unsigned NumArgs = OverrideVector.size();
1714 // Check if "all", "default", or "none" was specified.
1716 // Look for an optional setting of the number of refinement steps needed
1717 // for this type of reciprocal operation.
1720 if (!parseRefinementStep(Override, RefPos, RefSteps))
1721 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1723 // Split the string for further processing.
1724 Override = Override.substr(0, RefPos);
1725 assert(Override != "none" &&
1726 "Disabled reciprocals, but specifed refinement steps?");
1728 // If this is a general override, return the specified number of steps.
1729 if (Override == "all" || Override == "default")
1733 // The attribute string may omit the size suffix ('f'/'d').
1734 std::string VTName = getReciprocalOpName(IsSqrt, VT);
1735 std::string VTNameNoSize = VTName;
1736 VTNameNoSize.pop_back();
1738 for (StringRef RecipType : OverrideVector) {
1741 if (!parseRefinementStep(RecipType, RefPos, RefSteps))
1744 RecipType = RecipType.substr(0, RefPos);
1745 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
1749 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
1752 int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT,
1753 MachineFunction &MF) const {
1754 return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
1757 int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT,
1758 MachineFunction &MF) const {
1759 return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
1762 int TargetLoweringBase::getSqrtRefinementSteps(EVT VT,
1763 MachineFunction &MF) const {
1764 return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
1767 int TargetLoweringBase::getDivRefinementSteps(EVT VT,
1768 MachineFunction &MF) const {
1769 return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
1772 void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const {
1773 MF.getRegInfo().freezeReservedRegs(MF);