1 //===-- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ---===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the TargetLoweringBase class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/ADT/BitVector.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/StringExtras.h"
17 #include "llvm/ADT/Triple.h"
18 #include "llvm/CodeGen/Analysis.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineJumpTableInfo.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/StackMaps.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/GlobalVariable.h"
28 #include "llvm/IR/Mangler.h"
29 #include "llvm/MC/MCAsmInfo.h"
30 #include "llvm/MC/MCContext.h"
31 #include "llvm/MC/MCExpr.h"
32 #include "llvm/Support/BranchProbability.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/MathExtras.h"
36 #include "llvm/Target/TargetLowering.h"
37 #include "llvm/Target/TargetLoweringObjectFile.h"
38 #include "llvm/Target/TargetMachine.h"
39 #include "llvm/Target/TargetRegisterInfo.h"
40 #include "llvm/Target/TargetSubtargetInfo.h"
44 static cl::opt<bool> JumpIsExpensiveOverride(
45 "jump-is-expensive", cl::init(false),
46 cl::desc("Do not create extra branches to split comparison logic."),
49 static cl::opt<unsigned> MinimumJumpTableEntries
50 ("min-jump-table-entries", cl::init(4), cl::Hidden,
51 cl::desc("Set minimum number of entries to use a jump table."));
53 static cl::opt<unsigned> MaximumJumpTableSize
54 ("max-jump-table-size", cl::init(0), cl::Hidden,
55 cl::desc("Set maximum size of jump tables; zero for no limit."));
57 /// Minimum jump table density for normal functions.
58 static cl::opt<unsigned>
59 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
60 cl::desc("Minimum density for building a jump table in "
61 "a normal function"));
63 /// Minimum jump table density for -Os or -Oz functions.
64 static cl::opt<unsigned> OptsizeJumpTableDensity(
65 "optsize-jump-table-density", cl::init(40), cl::Hidden,
66 cl::desc("Minimum density for building a jump table in "
67 "an optsize function"));
69 // Although this default value is arbitrary, it is not random. It is assumed
70 // that a condition that evaluates the same way by a higher percentage than this
71 // is best represented as control flow. Therefore, the default value N should be
72 // set such that the win from N% correct executions is greater than the loss
73 // from (100 - N)% mispredicted executions for the majority of intended targets.
74 static cl::opt<int> MinPercentageForPredictableBranch(
75 "min-predictable-branch", cl::init(99),
76 cl::desc("Minimum percentage (0-100) that a condition must be either true "
77 "or false to assume that the condition is predictable"),
80 /// InitLibcallNames - Set default libcall names.
82 static void InitLibcallNames(const char **Names, const Triple &TT) {
83 Names[RTLIB::SHL_I16] = "__ashlhi3";
84 Names[RTLIB::SHL_I32] = "__ashlsi3";
85 Names[RTLIB::SHL_I64] = "__ashldi3";
86 Names[RTLIB::SHL_I128] = "__ashlti3";
87 Names[RTLIB::SRL_I16] = "__lshrhi3";
88 Names[RTLIB::SRL_I32] = "__lshrsi3";
89 Names[RTLIB::SRL_I64] = "__lshrdi3";
90 Names[RTLIB::SRL_I128] = "__lshrti3";
91 Names[RTLIB::SRA_I16] = "__ashrhi3";
92 Names[RTLIB::SRA_I32] = "__ashrsi3";
93 Names[RTLIB::SRA_I64] = "__ashrdi3";
94 Names[RTLIB::SRA_I128] = "__ashrti3";
95 Names[RTLIB::MUL_I8] = "__mulqi3";
96 Names[RTLIB::MUL_I16] = "__mulhi3";
97 Names[RTLIB::MUL_I32] = "__mulsi3";
98 Names[RTLIB::MUL_I64] = "__muldi3";
99 Names[RTLIB::MUL_I128] = "__multi3";
100 Names[RTLIB::MULO_I32] = "__mulosi4";
101 Names[RTLIB::MULO_I64] = "__mulodi4";
102 Names[RTLIB::MULO_I128] = "__muloti4";
103 Names[RTLIB::SDIV_I8] = "__divqi3";
104 Names[RTLIB::SDIV_I16] = "__divhi3";
105 Names[RTLIB::SDIV_I32] = "__divsi3";
106 Names[RTLIB::SDIV_I64] = "__divdi3";
107 Names[RTLIB::SDIV_I128] = "__divti3";
108 Names[RTLIB::UDIV_I8] = "__udivqi3";
109 Names[RTLIB::UDIV_I16] = "__udivhi3";
110 Names[RTLIB::UDIV_I32] = "__udivsi3";
111 Names[RTLIB::UDIV_I64] = "__udivdi3";
112 Names[RTLIB::UDIV_I128] = "__udivti3";
113 Names[RTLIB::SREM_I8] = "__modqi3";
114 Names[RTLIB::SREM_I16] = "__modhi3";
115 Names[RTLIB::SREM_I32] = "__modsi3";
116 Names[RTLIB::SREM_I64] = "__moddi3";
117 Names[RTLIB::SREM_I128] = "__modti3";
118 Names[RTLIB::UREM_I8] = "__umodqi3";
119 Names[RTLIB::UREM_I16] = "__umodhi3";
120 Names[RTLIB::UREM_I32] = "__umodsi3";
121 Names[RTLIB::UREM_I64] = "__umoddi3";
122 Names[RTLIB::UREM_I128] = "__umodti3";
124 Names[RTLIB::NEG_I32] = "__negsi2";
125 Names[RTLIB::NEG_I64] = "__negdi2";
126 Names[RTLIB::ADD_F32] = "__addsf3";
127 Names[RTLIB::ADD_F64] = "__adddf3";
128 Names[RTLIB::ADD_F80] = "__addxf3";
129 Names[RTLIB::ADD_F128] = "__addtf3";
130 Names[RTLIB::ADD_PPCF128] = "__gcc_qadd";
131 Names[RTLIB::SUB_F32] = "__subsf3";
132 Names[RTLIB::SUB_F64] = "__subdf3";
133 Names[RTLIB::SUB_F80] = "__subxf3";
134 Names[RTLIB::SUB_F128] = "__subtf3";
135 Names[RTLIB::SUB_PPCF128] = "__gcc_qsub";
136 Names[RTLIB::MUL_F32] = "__mulsf3";
137 Names[RTLIB::MUL_F64] = "__muldf3";
138 Names[RTLIB::MUL_F80] = "__mulxf3";
139 Names[RTLIB::MUL_F128] = "__multf3";
140 Names[RTLIB::MUL_PPCF128] = "__gcc_qmul";
141 Names[RTLIB::DIV_F32] = "__divsf3";
142 Names[RTLIB::DIV_F64] = "__divdf3";
143 Names[RTLIB::DIV_F80] = "__divxf3";
144 Names[RTLIB::DIV_F128] = "__divtf3";
145 Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv";
146 Names[RTLIB::REM_F32] = "fmodf";
147 Names[RTLIB::REM_F64] = "fmod";
148 Names[RTLIB::REM_F80] = "fmodl";
149 Names[RTLIB::REM_F128] = "fmodl";
150 Names[RTLIB::REM_PPCF128] = "fmodl";
151 Names[RTLIB::FMA_F32] = "fmaf";
152 Names[RTLIB::FMA_F64] = "fma";
153 Names[RTLIB::FMA_F80] = "fmal";
154 Names[RTLIB::FMA_F128] = "fmal";
155 Names[RTLIB::FMA_PPCF128] = "fmal";
156 Names[RTLIB::POWI_F32] = "__powisf2";
157 Names[RTLIB::POWI_F64] = "__powidf2";
158 Names[RTLIB::POWI_F80] = "__powixf2";
159 Names[RTLIB::POWI_F128] = "__powitf2";
160 Names[RTLIB::POWI_PPCF128] = "__powitf2";
161 Names[RTLIB::SQRT_F32] = "sqrtf";
162 Names[RTLIB::SQRT_F64] = "sqrt";
163 Names[RTLIB::SQRT_F80] = "sqrtl";
164 Names[RTLIB::SQRT_F128] = "sqrtl";
165 Names[RTLIB::SQRT_PPCF128] = "sqrtl";
166 Names[RTLIB::LOG_F32] = "logf";
167 Names[RTLIB::LOG_F64] = "log";
168 Names[RTLIB::LOG_F80] = "logl";
169 Names[RTLIB::LOG_F128] = "logl";
170 Names[RTLIB::LOG_PPCF128] = "logl";
171 Names[RTLIB::LOG2_F32] = "log2f";
172 Names[RTLIB::LOG2_F64] = "log2";
173 Names[RTLIB::LOG2_F80] = "log2l";
174 Names[RTLIB::LOG2_F128] = "log2l";
175 Names[RTLIB::LOG2_PPCF128] = "log2l";
176 Names[RTLIB::LOG10_F32] = "log10f";
177 Names[RTLIB::LOG10_F64] = "log10";
178 Names[RTLIB::LOG10_F80] = "log10l";
179 Names[RTLIB::LOG10_F128] = "log10l";
180 Names[RTLIB::LOG10_PPCF128] = "log10l";
181 Names[RTLIB::EXP_F32] = "expf";
182 Names[RTLIB::EXP_F64] = "exp";
183 Names[RTLIB::EXP_F80] = "expl";
184 Names[RTLIB::EXP_F128] = "expl";
185 Names[RTLIB::EXP_PPCF128] = "expl";
186 Names[RTLIB::EXP2_F32] = "exp2f";
187 Names[RTLIB::EXP2_F64] = "exp2";
188 Names[RTLIB::EXP2_F80] = "exp2l";
189 Names[RTLIB::EXP2_F128] = "exp2l";
190 Names[RTLIB::EXP2_PPCF128] = "exp2l";
191 Names[RTLIB::SIN_F32] = "sinf";
192 Names[RTLIB::SIN_F64] = "sin";
193 Names[RTLIB::SIN_F80] = "sinl";
194 Names[RTLIB::SIN_F128] = "sinl";
195 Names[RTLIB::SIN_PPCF128] = "sinl";
196 Names[RTLIB::COS_F32] = "cosf";
197 Names[RTLIB::COS_F64] = "cos";
198 Names[RTLIB::COS_F80] = "cosl";
199 Names[RTLIB::COS_F128] = "cosl";
200 Names[RTLIB::COS_PPCF128] = "cosl";
201 Names[RTLIB::POW_F32] = "powf";
202 Names[RTLIB::POW_F64] = "pow";
203 Names[RTLIB::POW_F80] = "powl";
204 Names[RTLIB::POW_F128] = "powl";
205 Names[RTLIB::POW_PPCF128] = "powl";
206 Names[RTLIB::CEIL_F32] = "ceilf";
207 Names[RTLIB::CEIL_F64] = "ceil";
208 Names[RTLIB::CEIL_F80] = "ceill";
209 Names[RTLIB::CEIL_F128] = "ceill";
210 Names[RTLIB::CEIL_PPCF128] = "ceill";
211 Names[RTLIB::TRUNC_F32] = "truncf";
212 Names[RTLIB::TRUNC_F64] = "trunc";
213 Names[RTLIB::TRUNC_F80] = "truncl";
214 Names[RTLIB::TRUNC_F128] = "truncl";
215 Names[RTLIB::TRUNC_PPCF128] = "truncl";
216 Names[RTLIB::RINT_F32] = "rintf";
217 Names[RTLIB::RINT_F64] = "rint";
218 Names[RTLIB::RINT_F80] = "rintl";
219 Names[RTLIB::RINT_F128] = "rintl";
220 Names[RTLIB::RINT_PPCF128] = "rintl";
221 Names[RTLIB::NEARBYINT_F32] = "nearbyintf";
222 Names[RTLIB::NEARBYINT_F64] = "nearbyint";
223 Names[RTLIB::NEARBYINT_F80] = "nearbyintl";
224 Names[RTLIB::NEARBYINT_F128] = "nearbyintl";
225 Names[RTLIB::NEARBYINT_PPCF128] = "nearbyintl";
226 Names[RTLIB::ROUND_F32] = "roundf";
227 Names[RTLIB::ROUND_F64] = "round";
228 Names[RTLIB::ROUND_F80] = "roundl";
229 Names[RTLIB::ROUND_F128] = "roundl";
230 Names[RTLIB::ROUND_PPCF128] = "roundl";
231 Names[RTLIB::FLOOR_F32] = "floorf";
232 Names[RTLIB::FLOOR_F64] = "floor";
233 Names[RTLIB::FLOOR_F80] = "floorl";
234 Names[RTLIB::FLOOR_F128] = "floorl";
235 Names[RTLIB::FLOOR_PPCF128] = "floorl";
236 Names[RTLIB::FMIN_F32] = "fminf";
237 Names[RTLIB::FMIN_F64] = "fmin";
238 Names[RTLIB::FMIN_F80] = "fminl";
239 Names[RTLIB::FMIN_F128] = "fminl";
240 Names[RTLIB::FMIN_PPCF128] = "fminl";
241 Names[RTLIB::FMAX_F32] = "fmaxf";
242 Names[RTLIB::FMAX_F64] = "fmax";
243 Names[RTLIB::FMAX_F80] = "fmaxl";
244 Names[RTLIB::FMAX_F128] = "fmaxl";
245 Names[RTLIB::FMAX_PPCF128] = "fmaxl";
246 Names[RTLIB::ROUND_F32] = "roundf";
247 Names[RTLIB::ROUND_F64] = "round";
248 Names[RTLIB::ROUND_F80] = "roundl";
249 Names[RTLIB::ROUND_F128] = "roundl";
250 Names[RTLIB::ROUND_PPCF128] = "roundl";
251 Names[RTLIB::COPYSIGN_F32] = "copysignf";
252 Names[RTLIB::COPYSIGN_F64] = "copysign";
253 Names[RTLIB::COPYSIGN_F80] = "copysignl";
254 Names[RTLIB::COPYSIGN_F128] = "copysignl";
255 Names[RTLIB::COPYSIGN_PPCF128] = "copysignl";
256 Names[RTLIB::FPEXT_F32_PPCF128] = "__gcc_stoq";
257 Names[RTLIB::FPEXT_F64_PPCF128] = "__gcc_dtoq";
258 Names[RTLIB::FPEXT_F64_F128] = "__extenddftf2";
259 Names[RTLIB::FPEXT_F32_F128] = "__extendsftf2";
260 Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2";
261 if (TT.isOSDarwin()) {
262 // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
263 // of the gnueabi-style __gnu_*_ieee.
264 // FIXME: What about other targets?
265 Names[RTLIB::FPEXT_F16_F32] = "__extendhfsf2";
266 Names[RTLIB::FPROUND_F32_F16] = "__truncsfhf2";
268 Names[RTLIB::FPEXT_F16_F32] = "__gnu_h2f_ieee";
269 Names[RTLIB::FPROUND_F32_F16] = "__gnu_f2h_ieee";
271 Names[RTLIB::FPROUND_F64_F16] = "__truncdfhf2";
272 Names[RTLIB::FPROUND_F80_F16] = "__truncxfhf2";
273 Names[RTLIB::FPROUND_F128_F16] = "__trunctfhf2";
274 Names[RTLIB::FPROUND_PPCF128_F16] = "__trunctfhf2";
275 Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2";
276 Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2";
277 Names[RTLIB::FPROUND_F128_F32] = "__trunctfsf2";
278 Names[RTLIB::FPROUND_PPCF128_F32] = "__gcc_qtos";
279 Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2";
280 Names[RTLIB::FPROUND_F128_F64] = "__trunctfdf2";
281 Names[RTLIB::FPROUND_PPCF128_F64] = "__gcc_qtod";
282 Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi";
283 Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi";
284 Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti";
285 Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi";
286 Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi";
287 Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti";
288 Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi";
289 Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi";
290 Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti";
291 Names[RTLIB::FPTOSINT_F128_I32] = "__fixtfsi";
292 Names[RTLIB::FPTOSINT_F128_I64] = "__fixtfdi";
293 Names[RTLIB::FPTOSINT_F128_I128] = "__fixtfti";
294 Names[RTLIB::FPTOSINT_PPCF128_I32] = "__gcc_qtou";
295 Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi";
296 Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti";
297 Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi";
298 Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi";
299 Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti";
300 Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi";
301 Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi";
302 Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti";
303 Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi";
304 Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi";
305 Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti";
306 Names[RTLIB::FPTOUINT_F128_I32] = "__fixunstfsi";
307 Names[RTLIB::FPTOUINT_F128_I64] = "__fixunstfdi";
308 Names[RTLIB::FPTOUINT_F128_I128] = "__fixunstfti";
309 Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi";
310 Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi";
311 Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti";
312 Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf";
313 Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf";
314 Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf";
315 Names[RTLIB::SINTTOFP_I32_F128] = "__floatsitf";
316 Names[RTLIB::SINTTOFP_I32_PPCF128] = "__gcc_itoq";
317 Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf";
318 Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf";
319 Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf";
320 Names[RTLIB::SINTTOFP_I64_F128] = "__floatditf";
321 Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf";
322 Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf";
323 Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf";
324 Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf";
325 Names[RTLIB::SINTTOFP_I128_F128] = "__floattitf";
326 Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf";
327 Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf";
328 Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf";
329 Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf";
330 Names[RTLIB::UINTTOFP_I32_F128] = "__floatunsitf";
331 Names[RTLIB::UINTTOFP_I32_PPCF128] = "__gcc_utoq";
332 Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf";
333 Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf";
334 Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf";
335 Names[RTLIB::UINTTOFP_I64_F128] = "__floatunditf";
336 Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf";
337 Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf";
338 Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf";
339 Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf";
340 Names[RTLIB::UINTTOFP_I128_F128] = "__floatuntitf";
341 Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf";
342 Names[RTLIB::OEQ_F32] = "__eqsf2";
343 Names[RTLIB::OEQ_F64] = "__eqdf2";
344 Names[RTLIB::OEQ_F128] = "__eqtf2";
345 Names[RTLIB::OEQ_PPCF128] = "__gcc_qeq";
346 Names[RTLIB::UNE_F32] = "__nesf2";
347 Names[RTLIB::UNE_F64] = "__nedf2";
348 Names[RTLIB::UNE_F128] = "__netf2";
349 Names[RTLIB::UNE_PPCF128] = "__gcc_qne";
350 Names[RTLIB::OGE_F32] = "__gesf2";
351 Names[RTLIB::OGE_F64] = "__gedf2";
352 Names[RTLIB::OGE_F128] = "__getf2";
353 Names[RTLIB::OGE_PPCF128] = "__gcc_qge";
354 Names[RTLIB::OLT_F32] = "__ltsf2";
355 Names[RTLIB::OLT_F64] = "__ltdf2";
356 Names[RTLIB::OLT_F128] = "__lttf2";
357 Names[RTLIB::OLT_PPCF128] = "__gcc_qlt";
358 Names[RTLIB::OLE_F32] = "__lesf2";
359 Names[RTLIB::OLE_F64] = "__ledf2";
360 Names[RTLIB::OLE_F128] = "__letf2";
361 Names[RTLIB::OLE_PPCF128] = "__gcc_qle";
362 Names[RTLIB::OGT_F32] = "__gtsf2";
363 Names[RTLIB::OGT_F64] = "__gtdf2";
364 Names[RTLIB::OGT_F128] = "__gttf2";
365 Names[RTLIB::OGT_PPCF128] = "__gcc_qgt";
366 Names[RTLIB::UO_F32] = "__unordsf2";
367 Names[RTLIB::UO_F64] = "__unorddf2";
368 Names[RTLIB::UO_F128] = "__unordtf2";
369 Names[RTLIB::UO_PPCF128] = "__gcc_qunord";
370 Names[RTLIB::O_F32] = "__unordsf2";
371 Names[RTLIB::O_F64] = "__unorddf2";
372 Names[RTLIB::O_F128] = "__unordtf2";
373 Names[RTLIB::O_PPCF128] = "__gcc_qunord";
374 Names[RTLIB::MEMCPY] = "memcpy";
375 Names[RTLIB::MEMMOVE] = "memmove";
376 Names[RTLIB::MEMSET] = "memset";
377 Names[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_1] =
378 "__llvm_memcpy_element_unordered_atomic_1";
379 Names[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_2] =
380 "__llvm_memcpy_element_unordered_atomic_2";
381 Names[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_4] =
382 "__llvm_memcpy_element_unordered_atomic_4";
383 Names[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_8] =
384 "__llvm_memcpy_element_unordered_atomic_8";
385 Names[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_16] =
386 "__llvm_memcpy_element_unordered_atomic_16";
387 Names[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1] =
388 "__llvm_memmove_element_unordered_atomic_1";
389 Names[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2] =
390 "__llvm_memmove_element_unordered_atomic_2";
391 Names[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4] =
392 "__llvm_memmove_element_unordered_atomic_4";
393 Names[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8] =
394 "__llvm_memmove_element_unordered_atomic_8";
395 Names[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16] =
396 "__llvm_memmove_element_unordered_atomic_16";
397 Names[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_1] =
398 "__llvm_memset_element_unordered_atomic_1";
399 Names[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_2] =
400 "__llvm_memset_element_unordered_atomic_2";
401 Names[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_4] =
402 "__llvm_memset_element_unordered_atomic_4";
403 Names[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_8] =
404 "__llvm_memset_element_unordered_atomic_8";
405 Names[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_16] =
406 "__llvm_memset_element_unordered_atomic_16";
407 Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume";
408 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] = "__sync_val_compare_and_swap_1";
409 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] = "__sync_val_compare_and_swap_2";
410 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4] = "__sync_val_compare_and_swap_4";
411 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8] = "__sync_val_compare_and_swap_8";
412 Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_16] = "__sync_val_compare_and_swap_16";
413 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_1] = "__sync_lock_test_and_set_1";
414 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_2] = "__sync_lock_test_and_set_2";
415 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_4] = "__sync_lock_test_and_set_4";
416 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_8] = "__sync_lock_test_and_set_8";
417 Names[RTLIB::SYNC_LOCK_TEST_AND_SET_16] = "__sync_lock_test_and_set_16";
418 Names[RTLIB::SYNC_FETCH_AND_ADD_1] = "__sync_fetch_and_add_1";
419 Names[RTLIB::SYNC_FETCH_AND_ADD_2] = "__sync_fetch_and_add_2";
420 Names[RTLIB::SYNC_FETCH_AND_ADD_4] = "__sync_fetch_and_add_4";
421 Names[RTLIB::SYNC_FETCH_AND_ADD_8] = "__sync_fetch_and_add_8";
422 Names[RTLIB::SYNC_FETCH_AND_ADD_16] = "__sync_fetch_and_add_16";
423 Names[RTLIB::SYNC_FETCH_AND_SUB_1] = "__sync_fetch_and_sub_1";
424 Names[RTLIB::SYNC_FETCH_AND_SUB_2] = "__sync_fetch_and_sub_2";
425 Names[RTLIB::SYNC_FETCH_AND_SUB_4] = "__sync_fetch_and_sub_4";
426 Names[RTLIB::SYNC_FETCH_AND_SUB_8] = "__sync_fetch_and_sub_8";
427 Names[RTLIB::SYNC_FETCH_AND_SUB_16] = "__sync_fetch_and_sub_16";
428 Names[RTLIB::SYNC_FETCH_AND_AND_1] = "__sync_fetch_and_and_1";
429 Names[RTLIB::SYNC_FETCH_AND_AND_2] = "__sync_fetch_and_and_2";
430 Names[RTLIB::SYNC_FETCH_AND_AND_4] = "__sync_fetch_and_and_4";
431 Names[RTLIB::SYNC_FETCH_AND_AND_8] = "__sync_fetch_and_and_8";
432 Names[RTLIB::SYNC_FETCH_AND_AND_16] = "__sync_fetch_and_and_16";
433 Names[RTLIB::SYNC_FETCH_AND_OR_1] = "__sync_fetch_and_or_1";
434 Names[RTLIB::SYNC_FETCH_AND_OR_2] = "__sync_fetch_and_or_2";
435 Names[RTLIB::SYNC_FETCH_AND_OR_4] = "__sync_fetch_and_or_4";
436 Names[RTLIB::SYNC_FETCH_AND_OR_8] = "__sync_fetch_and_or_8";
437 Names[RTLIB::SYNC_FETCH_AND_OR_16] = "__sync_fetch_and_or_16";
438 Names[RTLIB::SYNC_FETCH_AND_XOR_1] = "__sync_fetch_and_xor_1";
439 Names[RTLIB::SYNC_FETCH_AND_XOR_2] = "__sync_fetch_and_xor_2";
440 Names[RTLIB::SYNC_FETCH_AND_XOR_4] = "__sync_fetch_and_xor_4";
441 Names[RTLIB::SYNC_FETCH_AND_XOR_8] = "__sync_fetch_and_xor_8";
442 Names[RTLIB::SYNC_FETCH_AND_XOR_16] = "__sync_fetch_and_xor_16";
443 Names[RTLIB::SYNC_FETCH_AND_NAND_1] = "__sync_fetch_and_nand_1";
444 Names[RTLIB::SYNC_FETCH_AND_NAND_2] = "__sync_fetch_and_nand_2";
445 Names[RTLIB::SYNC_FETCH_AND_NAND_4] = "__sync_fetch_and_nand_4";
446 Names[RTLIB::SYNC_FETCH_AND_NAND_8] = "__sync_fetch_and_nand_8";
447 Names[RTLIB::SYNC_FETCH_AND_NAND_16] = "__sync_fetch_and_nand_16";
448 Names[RTLIB::SYNC_FETCH_AND_MAX_1] = "__sync_fetch_and_max_1";
449 Names[RTLIB::SYNC_FETCH_AND_MAX_2] = "__sync_fetch_and_max_2";
450 Names[RTLIB::SYNC_FETCH_AND_MAX_4] = "__sync_fetch_and_max_4";
451 Names[RTLIB::SYNC_FETCH_AND_MAX_8] = "__sync_fetch_and_max_8";
452 Names[RTLIB::SYNC_FETCH_AND_MAX_16] = "__sync_fetch_and_max_16";
453 Names[RTLIB::SYNC_FETCH_AND_UMAX_1] = "__sync_fetch_and_umax_1";
454 Names[RTLIB::SYNC_FETCH_AND_UMAX_2] = "__sync_fetch_and_umax_2";
455 Names[RTLIB::SYNC_FETCH_AND_UMAX_4] = "__sync_fetch_and_umax_4";
456 Names[RTLIB::SYNC_FETCH_AND_UMAX_8] = "__sync_fetch_and_umax_8";
457 Names[RTLIB::SYNC_FETCH_AND_UMAX_16] = "__sync_fetch_and_umax_16";
458 Names[RTLIB::SYNC_FETCH_AND_MIN_1] = "__sync_fetch_and_min_1";
459 Names[RTLIB::SYNC_FETCH_AND_MIN_2] = "__sync_fetch_and_min_2";
460 Names[RTLIB::SYNC_FETCH_AND_MIN_4] = "__sync_fetch_and_min_4";
461 Names[RTLIB::SYNC_FETCH_AND_MIN_8] = "__sync_fetch_and_min_8";
462 Names[RTLIB::SYNC_FETCH_AND_MIN_16] = "__sync_fetch_and_min_16";
463 Names[RTLIB::SYNC_FETCH_AND_UMIN_1] = "__sync_fetch_and_umin_1";
464 Names[RTLIB::SYNC_FETCH_AND_UMIN_2] = "__sync_fetch_and_umin_2";
465 Names[RTLIB::SYNC_FETCH_AND_UMIN_4] = "__sync_fetch_and_umin_4";
466 Names[RTLIB::SYNC_FETCH_AND_UMIN_8] = "__sync_fetch_and_umin_8";
467 Names[RTLIB::SYNC_FETCH_AND_UMIN_16] = "__sync_fetch_and_umin_16";
469 Names[RTLIB::ATOMIC_LOAD] = "__atomic_load";
470 Names[RTLIB::ATOMIC_LOAD_1] = "__atomic_load_1";
471 Names[RTLIB::ATOMIC_LOAD_2] = "__atomic_load_2";
472 Names[RTLIB::ATOMIC_LOAD_4] = "__atomic_load_4";
473 Names[RTLIB::ATOMIC_LOAD_8] = "__atomic_load_8";
474 Names[RTLIB::ATOMIC_LOAD_16] = "__atomic_load_16";
476 Names[RTLIB::ATOMIC_STORE] = "__atomic_store";
477 Names[RTLIB::ATOMIC_STORE_1] = "__atomic_store_1";
478 Names[RTLIB::ATOMIC_STORE_2] = "__atomic_store_2";
479 Names[RTLIB::ATOMIC_STORE_4] = "__atomic_store_4";
480 Names[RTLIB::ATOMIC_STORE_8] = "__atomic_store_8";
481 Names[RTLIB::ATOMIC_STORE_16] = "__atomic_store_16";
483 Names[RTLIB::ATOMIC_EXCHANGE] = "__atomic_exchange";
484 Names[RTLIB::ATOMIC_EXCHANGE_1] = "__atomic_exchange_1";
485 Names[RTLIB::ATOMIC_EXCHANGE_2] = "__atomic_exchange_2";
486 Names[RTLIB::ATOMIC_EXCHANGE_4] = "__atomic_exchange_4";
487 Names[RTLIB::ATOMIC_EXCHANGE_8] = "__atomic_exchange_8";
488 Names[RTLIB::ATOMIC_EXCHANGE_16] = "__atomic_exchange_16";
490 Names[RTLIB::ATOMIC_COMPARE_EXCHANGE] = "__atomic_compare_exchange";
491 Names[RTLIB::ATOMIC_COMPARE_EXCHANGE_1] = "__atomic_compare_exchange_1";
492 Names[RTLIB::ATOMIC_COMPARE_EXCHANGE_2] = "__atomic_compare_exchange_2";
493 Names[RTLIB::ATOMIC_COMPARE_EXCHANGE_4] = "__atomic_compare_exchange_4";
494 Names[RTLIB::ATOMIC_COMPARE_EXCHANGE_8] = "__atomic_compare_exchange_8";
495 Names[RTLIB::ATOMIC_COMPARE_EXCHANGE_16] = "__atomic_compare_exchange_16";
497 Names[RTLIB::ATOMIC_FETCH_ADD_1] = "__atomic_fetch_add_1";
498 Names[RTLIB::ATOMIC_FETCH_ADD_2] = "__atomic_fetch_add_2";
499 Names[RTLIB::ATOMIC_FETCH_ADD_4] = "__atomic_fetch_add_4";
500 Names[RTLIB::ATOMIC_FETCH_ADD_8] = "__atomic_fetch_add_8";
501 Names[RTLIB::ATOMIC_FETCH_ADD_16] = "__atomic_fetch_add_16";
502 Names[RTLIB::ATOMIC_FETCH_SUB_1] = "__atomic_fetch_sub_1";
503 Names[RTLIB::ATOMIC_FETCH_SUB_2] = "__atomic_fetch_sub_2";
504 Names[RTLIB::ATOMIC_FETCH_SUB_4] = "__atomic_fetch_sub_4";
505 Names[RTLIB::ATOMIC_FETCH_SUB_8] = "__atomic_fetch_sub_8";
506 Names[RTLIB::ATOMIC_FETCH_SUB_16] = "__atomic_fetch_sub_16";
507 Names[RTLIB::ATOMIC_FETCH_AND_1] = "__atomic_fetch_and_1";
508 Names[RTLIB::ATOMIC_FETCH_AND_2] = "__atomic_fetch_and_2";
509 Names[RTLIB::ATOMIC_FETCH_AND_4] = "__atomic_fetch_and_4";
510 Names[RTLIB::ATOMIC_FETCH_AND_8] = "__atomic_fetch_and_8";
511 Names[RTLIB::ATOMIC_FETCH_AND_16] = "__atomic_fetch_and_16";
512 Names[RTLIB::ATOMIC_FETCH_OR_1] = "__atomic_fetch_or_1";
513 Names[RTLIB::ATOMIC_FETCH_OR_2] = "__atomic_fetch_or_2";
514 Names[RTLIB::ATOMIC_FETCH_OR_4] = "__atomic_fetch_or_4";
515 Names[RTLIB::ATOMIC_FETCH_OR_8] = "__atomic_fetch_or_8";
516 Names[RTLIB::ATOMIC_FETCH_OR_16] = "__atomic_fetch_or_16";
517 Names[RTLIB::ATOMIC_FETCH_XOR_1] = "__atomic_fetch_xor_1";
518 Names[RTLIB::ATOMIC_FETCH_XOR_2] = "__atomic_fetch_xor_2";
519 Names[RTLIB::ATOMIC_FETCH_XOR_4] = "__atomic_fetch_xor_4";
520 Names[RTLIB::ATOMIC_FETCH_XOR_8] = "__atomic_fetch_xor_8";
521 Names[RTLIB::ATOMIC_FETCH_XOR_16] = "__atomic_fetch_xor_16";
522 Names[RTLIB::ATOMIC_FETCH_NAND_1] = "__atomic_fetch_nand_1";
523 Names[RTLIB::ATOMIC_FETCH_NAND_2] = "__atomic_fetch_nand_2";
524 Names[RTLIB::ATOMIC_FETCH_NAND_4] = "__atomic_fetch_nand_4";
525 Names[RTLIB::ATOMIC_FETCH_NAND_8] = "__atomic_fetch_nand_8";
526 Names[RTLIB::ATOMIC_FETCH_NAND_16] = "__atomic_fetch_nand_16";
528 if (TT.isGNUEnvironment()) {
529 Names[RTLIB::SINCOS_F32] = "sincosf";
530 Names[RTLIB::SINCOS_F64] = "sincos";
531 Names[RTLIB::SINCOS_F80] = "sincosl";
532 Names[RTLIB::SINCOS_F128] = "sincosl";
533 Names[RTLIB::SINCOS_PPCF128] = "sincosl";
536 if (!TT.isOSOpenBSD()) {
537 Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = "__stack_chk_fail";
540 Names[RTLIB::DEOPTIMIZE] = "__llvm_deoptimize";
543 /// Set default libcall CallingConvs.
544 static void InitLibcallCallingConvs(CallingConv::ID *CCs) {
545 for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
546 CCs[LC] = CallingConv::C;
549 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
550 /// UNKNOWN_LIBCALL if there is none.
551 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
552 if (OpVT == MVT::f16) {
553 if (RetVT == MVT::f32)
554 return FPEXT_F16_F32;
555 } else if (OpVT == MVT::f32) {
556 if (RetVT == MVT::f64)
557 return FPEXT_F32_F64;
558 if (RetVT == MVT::f128)
559 return FPEXT_F32_F128;
560 if (RetVT == MVT::ppcf128)
561 return FPEXT_F32_PPCF128;
562 } else if (OpVT == MVT::f64) {
563 if (RetVT == MVT::f128)
564 return FPEXT_F64_F128;
565 else if (RetVT == MVT::ppcf128)
566 return FPEXT_F64_PPCF128;
569 return UNKNOWN_LIBCALL;
572 /// getFPROUND - Return the FPROUND_*_* value for the given types, or
573 /// UNKNOWN_LIBCALL if there is none.
574 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
575 if (RetVT == MVT::f16) {
576 if (OpVT == MVT::f32)
577 return FPROUND_F32_F16;
578 if (OpVT == MVT::f64)
579 return FPROUND_F64_F16;
580 if (OpVT == MVT::f80)
581 return FPROUND_F80_F16;
582 if (OpVT == MVT::f128)
583 return FPROUND_F128_F16;
584 if (OpVT == MVT::ppcf128)
585 return FPROUND_PPCF128_F16;
586 } else if (RetVT == MVT::f32) {
587 if (OpVT == MVT::f64)
588 return FPROUND_F64_F32;
589 if (OpVT == MVT::f80)
590 return FPROUND_F80_F32;
591 if (OpVT == MVT::f128)
592 return FPROUND_F128_F32;
593 if (OpVT == MVT::ppcf128)
594 return FPROUND_PPCF128_F32;
595 } else if (RetVT == MVT::f64) {
596 if (OpVT == MVT::f80)
597 return FPROUND_F80_F64;
598 if (OpVT == MVT::f128)
599 return FPROUND_F128_F64;
600 if (OpVT == MVT::ppcf128)
601 return FPROUND_PPCF128_F64;
604 return UNKNOWN_LIBCALL;
607 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
608 /// UNKNOWN_LIBCALL if there is none.
609 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
610 if (OpVT == MVT::f32) {
611 if (RetVT == MVT::i32)
612 return FPTOSINT_F32_I32;
613 if (RetVT == MVT::i64)
614 return FPTOSINT_F32_I64;
615 if (RetVT == MVT::i128)
616 return FPTOSINT_F32_I128;
617 } else if (OpVT == MVT::f64) {
618 if (RetVT == MVT::i32)
619 return FPTOSINT_F64_I32;
620 if (RetVT == MVT::i64)
621 return FPTOSINT_F64_I64;
622 if (RetVT == MVT::i128)
623 return FPTOSINT_F64_I128;
624 } else if (OpVT == MVT::f80) {
625 if (RetVT == MVT::i32)
626 return FPTOSINT_F80_I32;
627 if (RetVT == MVT::i64)
628 return FPTOSINT_F80_I64;
629 if (RetVT == MVT::i128)
630 return FPTOSINT_F80_I128;
631 } else if (OpVT == MVT::f128) {
632 if (RetVT == MVT::i32)
633 return FPTOSINT_F128_I32;
634 if (RetVT == MVT::i64)
635 return FPTOSINT_F128_I64;
636 if (RetVT == MVT::i128)
637 return FPTOSINT_F128_I128;
638 } else if (OpVT == MVT::ppcf128) {
639 if (RetVT == MVT::i32)
640 return FPTOSINT_PPCF128_I32;
641 if (RetVT == MVT::i64)
642 return FPTOSINT_PPCF128_I64;
643 if (RetVT == MVT::i128)
644 return FPTOSINT_PPCF128_I128;
646 return UNKNOWN_LIBCALL;
649 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
650 /// UNKNOWN_LIBCALL if there is none.
651 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
652 if (OpVT == MVT::f32) {
653 if (RetVT == MVT::i32)
654 return FPTOUINT_F32_I32;
655 if (RetVT == MVT::i64)
656 return FPTOUINT_F32_I64;
657 if (RetVT == MVT::i128)
658 return FPTOUINT_F32_I128;
659 } else if (OpVT == MVT::f64) {
660 if (RetVT == MVT::i32)
661 return FPTOUINT_F64_I32;
662 if (RetVT == MVT::i64)
663 return FPTOUINT_F64_I64;
664 if (RetVT == MVT::i128)
665 return FPTOUINT_F64_I128;
666 } else if (OpVT == MVT::f80) {
667 if (RetVT == MVT::i32)
668 return FPTOUINT_F80_I32;
669 if (RetVT == MVT::i64)
670 return FPTOUINT_F80_I64;
671 if (RetVT == MVT::i128)
672 return FPTOUINT_F80_I128;
673 } else if (OpVT == MVT::f128) {
674 if (RetVT == MVT::i32)
675 return FPTOUINT_F128_I32;
676 if (RetVT == MVT::i64)
677 return FPTOUINT_F128_I64;
678 if (RetVT == MVT::i128)
679 return FPTOUINT_F128_I128;
680 } else if (OpVT == MVT::ppcf128) {
681 if (RetVT == MVT::i32)
682 return FPTOUINT_PPCF128_I32;
683 if (RetVT == MVT::i64)
684 return FPTOUINT_PPCF128_I64;
685 if (RetVT == MVT::i128)
686 return FPTOUINT_PPCF128_I128;
688 return UNKNOWN_LIBCALL;
691 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
692 /// UNKNOWN_LIBCALL if there is none.
693 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
694 if (OpVT == MVT::i32) {
695 if (RetVT == MVT::f32)
696 return SINTTOFP_I32_F32;
697 if (RetVT == MVT::f64)
698 return SINTTOFP_I32_F64;
699 if (RetVT == MVT::f80)
700 return SINTTOFP_I32_F80;
701 if (RetVT == MVT::f128)
702 return SINTTOFP_I32_F128;
703 if (RetVT == MVT::ppcf128)
704 return SINTTOFP_I32_PPCF128;
705 } else if (OpVT == MVT::i64) {
706 if (RetVT == MVT::f32)
707 return SINTTOFP_I64_F32;
708 if (RetVT == MVT::f64)
709 return SINTTOFP_I64_F64;
710 if (RetVT == MVT::f80)
711 return SINTTOFP_I64_F80;
712 if (RetVT == MVT::f128)
713 return SINTTOFP_I64_F128;
714 if (RetVT == MVT::ppcf128)
715 return SINTTOFP_I64_PPCF128;
716 } else if (OpVT == MVT::i128) {
717 if (RetVT == MVT::f32)
718 return SINTTOFP_I128_F32;
719 if (RetVT == MVT::f64)
720 return SINTTOFP_I128_F64;
721 if (RetVT == MVT::f80)
722 return SINTTOFP_I128_F80;
723 if (RetVT == MVT::f128)
724 return SINTTOFP_I128_F128;
725 if (RetVT == MVT::ppcf128)
726 return SINTTOFP_I128_PPCF128;
728 return UNKNOWN_LIBCALL;
731 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
732 /// UNKNOWN_LIBCALL if there is none.
733 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
734 if (OpVT == MVT::i32) {
735 if (RetVT == MVT::f32)
736 return UINTTOFP_I32_F32;
737 if (RetVT == MVT::f64)
738 return UINTTOFP_I32_F64;
739 if (RetVT == MVT::f80)
740 return UINTTOFP_I32_F80;
741 if (RetVT == MVT::f128)
742 return UINTTOFP_I32_F128;
743 if (RetVT == MVT::ppcf128)
744 return UINTTOFP_I32_PPCF128;
745 } else if (OpVT == MVT::i64) {
746 if (RetVT == MVT::f32)
747 return UINTTOFP_I64_F32;
748 if (RetVT == MVT::f64)
749 return UINTTOFP_I64_F64;
750 if (RetVT == MVT::f80)
751 return UINTTOFP_I64_F80;
752 if (RetVT == MVT::f128)
753 return UINTTOFP_I64_F128;
754 if (RetVT == MVT::ppcf128)
755 return UINTTOFP_I64_PPCF128;
756 } else if (OpVT == MVT::i128) {
757 if (RetVT == MVT::f32)
758 return UINTTOFP_I128_F32;
759 if (RetVT == MVT::f64)
760 return UINTTOFP_I128_F64;
761 if (RetVT == MVT::f80)
762 return UINTTOFP_I128_F80;
763 if (RetVT == MVT::f128)
764 return UINTTOFP_I128_F128;
765 if (RetVT == MVT::ppcf128)
766 return UINTTOFP_I128_PPCF128;
768 return UNKNOWN_LIBCALL;
771 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
772 #define OP_TO_LIBCALL(Name, Enum) \
774 switch (VT.SimpleTy) { \
776 return UNKNOWN_LIBCALL; \
790 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
791 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
792 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
793 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
794 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
795 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
796 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
797 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
798 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
799 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
800 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
801 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
806 return UNKNOWN_LIBCALL;
809 RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
810 switch (ElementSize) {
812 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
814 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
816 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
818 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
820 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
822 return UNKNOWN_LIBCALL;
826 RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
827 switch (ElementSize) {
829 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
831 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
833 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
835 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
837 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
839 return UNKNOWN_LIBCALL;
843 RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
844 switch (ElementSize) {
846 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
848 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
850 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
852 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
854 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
856 return UNKNOWN_LIBCALL;
860 /// InitCmpLibcallCCs - Set default comparison libcall CC.
862 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
863 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
864 CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
865 CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
866 CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
867 CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ;
868 CCs[RTLIB::UNE_F32] = ISD::SETNE;
869 CCs[RTLIB::UNE_F64] = ISD::SETNE;
870 CCs[RTLIB::UNE_F128] = ISD::SETNE;
871 CCs[RTLIB::UNE_PPCF128] = ISD::SETNE;
872 CCs[RTLIB::OGE_F32] = ISD::SETGE;
873 CCs[RTLIB::OGE_F64] = ISD::SETGE;
874 CCs[RTLIB::OGE_F128] = ISD::SETGE;
875 CCs[RTLIB::OGE_PPCF128] = ISD::SETGE;
876 CCs[RTLIB::OLT_F32] = ISD::SETLT;
877 CCs[RTLIB::OLT_F64] = ISD::SETLT;
878 CCs[RTLIB::OLT_F128] = ISD::SETLT;
879 CCs[RTLIB::OLT_PPCF128] = ISD::SETLT;
880 CCs[RTLIB::OLE_F32] = ISD::SETLE;
881 CCs[RTLIB::OLE_F64] = ISD::SETLE;
882 CCs[RTLIB::OLE_F128] = ISD::SETLE;
883 CCs[RTLIB::OLE_PPCF128] = ISD::SETLE;
884 CCs[RTLIB::OGT_F32] = ISD::SETGT;
885 CCs[RTLIB::OGT_F64] = ISD::SETGT;
886 CCs[RTLIB::OGT_F128] = ISD::SETGT;
887 CCs[RTLIB::OGT_PPCF128] = ISD::SETGT;
888 CCs[RTLIB::UO_F32] = ISD::SETNE;
889 CCs[RTLIB::UO_F64] = ISD::SETNE;
890 CCs[RTLIB::UO_F128] = ISD::SETNE;
891 CCs[RTLIB::UO_PPCF128] = ISD::SETNE;
892 CCs[RTLIB::O_F32] = ISD::SETEQ;
893 CCs[RTLIB::O_F64] = ISD::SETEQ;
894 CCs[RTLIB::O_F128] = ISD::SETEQ;
895 CCs[RTLIB::O_PPCF128] = ISD::SETEQ;
898 /// NOTE: The TargetMachine owns TLOF.
899 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
902 // Perform these initializations only once.
903 MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove =
904 MaxLoadsPerMemcmp = 8;
905 MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize =
906 MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4;
907 UseUnderscoreSetJmp = false;
908 UseUnderscoreLongJmp = false;
909 HasMultipleConditionRegisters = false;
910 HasExtractBitsInsn = false;
911 JumpIsExpensive = JumpIsExpensiveOverride;
912 PredictableSelectIsExpensive = false;
913 EnableExtLdPromotion = false;
914 HasFloatingPointExceptions = true;
915 StackPointerRegisterToSaveRestore = 0;
916 BooleanContents = UndefinedBooleanContent;
917 BooleanFloatContents = UndefinedBooleanContent;
918 BooleanVectorContents = UndefinedBooleanContent;
919 SchedPreferenceInfo = Sched::ILP;
921 JumpBufAlignment = 0;
922 MinFunctionAlignment = 0;
923 PrefFunctionAlignment = 0;
924 PrefLoopAlignment = 0;
925 GatherAllAliasesMaxDepth = 18;
926 MinStackArgumentAlignment = 1;
927 // TODO: the default will be switched to 0 in the next commit, along
928 // with the Target-specific changes necessary.
929 MaxAtomicSizeInBitsSupported = 1024;
931 MinCmpXchgSizeInBits = 0;
933 std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
935 InitLibcallNames(LibcallRoutineNames, TM.getTargetTriple());
936 InitCmpLibcallCCs(CmpLibcallCCs);
937 InitLibcallCallingConvs(LibcallCallingConvs);
940 void TargetLoweringBase::initActions() {
941 // All operations default to being supported.
942 memset(OpActions, 0, sizeof(OpActions));
943 memset(LoadExtActions, 0, sizeof(LoadExtActions));
944 memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
945 memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
946 memset(CondCodeActions, 0, sizeof(CondCodeActions));
947 std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
948 std::fill(std::begin(TargetDAGCombineArray),
949 std::end(TargetDAGCombineArray), 0);
951 // Set default actions for various operations.
952 for (MVT VT : MVT::all_valuetypes()) {
953 // Default all indexed load / store to expand.
954 for (unsigned IM = (unsigned)ISD::PRE_INC;
955 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
956 setIndexedLoadAction(IM, VT, Expand);
957 setIndexedStoreAction(IM, VT, Expand);
960 // Most backends expect to see the node which just returns the value loaded.
961 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand);
963 // These operations default to expand.
964 setOperationAction(ISD::FGETSIGN, VT, Expand);
965 setOperationAction(ISD::CONCAT_VECTORS, VT, Expand);
966 setOperationAction(ISD::FMINNUM, VT, Expand);
967 setOperationAction(ISD::FMAXNUM, VT, Expand);
968 setOperationAction(ISD::FMINNAN, VT, Expand);
969 setOperationAction(ISD::FMAXNAN, VT, Expand);
970 setOperationAction(ISD::FMAD, VT, Expand);
971 setOperationAction(ISD::SMIN, VT, Expand);
972 setOperationAction(ISD::SMAX, VT, Expand);
973 setOperationAction(ISD::UMIN, VT, Expand);
974 setOperationAction(ISD::UMAX, VT, Expand);
975 setOperationAction(ISD::ABS, VT, Expand);
977 // Overflow operations default to expand
978 setOperationAction(ISD::SADDO, VT, Expand);
979 setOperationAction(ISD::SSUBO, VT, Expand);
980 setOperationAction(ISD::UADDO, VT, Expand);
981 setOperationAction(ISD::USUBO, VT, Expand);
982 setOperationAction(ISD::SMULO, VT, Expand);
983 setOperationAction(ISD::UMULO, VT, Expand);
985 // ADDCARRY operations default to expand
986 setOperationAction(ISD::ADDCARRY, VT, Expand);
987 setOperationAction(ISD::SUBCARRY, VT, Expand);
988 setOperationAction(ISD::SETCCCARRY, VT, Expand);
990 // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
991 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
992 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
994 setOperationAction(ISD::BITREVERSE, VT, Expand);
996 // These library functions default to expand.
997 setOperationAction(ISD::FROUND, VT, Expand);
998 setOperationAction(ISD::FPOWI, VT, Expand);
1000 // These operations default to expand for vector types.
1001 if (VT.isVector()) {
1002 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
1003 setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, VT, Expand);
1004 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Expand);
1005 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Expand);
1008 // For most targets @llvm.get.dynamic.area.offset just returns 0.
1009 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand);
1012 // Most targets ignore the @llvm.prefetch intrinsic.
1013 setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
1015 // Most targets also ignore the @llvm.readcyclecounter intrinsic.
1016 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand);
1018 // ConstantFP nodes default to expand. Targets can either change this to
1019 // Legal, in which case all fp constants are legal, or use isFPImmLegal()
1020 // to optimize expansions for certain constants.
1021 setOperationAction(ISD::ConstantFP, MVT::f16, Expand);
1022 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
1023 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
1024 setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
1025 setOperationAction(ISD::ConstantFP, MVT::f128, Expand);
1027 // These library functions default to expand.
1028 for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
1029 setOperationAction(ISD::FLOG , VT, Expand);
1030 setOperationAction(ISD::FLOG2, VT, Expand);
1031 setOperationAction(ISD::FLOG10, VT, Expand);
1032 setOperationAction(ISD::FEXP , VT, Expand);
1033 setOperationAction(ISD::FEXP2, VT, Expand);
1034 setOperationAction(ISD::FFLOOR, VT, Expand);
1035 setOperationAction(ISD::FNEARBYINT, VT, Expand);
1036 setOperationAction(ISD::FCEIL, VT, Expand);
1037 setOperationAction(ISD::FRINT, VT, Expand);
1038 setOperationAction(ISD::FTRUNC, VT, Expand);
1039 setOperationAction(ISD::FROUND, VT, Expand);
1042 // Default ISD::TRAP to expand (which turns it into abort).
1043 setOperationAction(ISD::TRAP, MVT::Other, Expand);
1045 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
1046 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
1048 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
1051 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
1053 return MVT::getIntegerVT(8 * DL.getPointerSize(0));
1056 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy,
1057 const DataLayout &DL) const {
1058 assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
1059 if (LHSTy.isVector())
1061 return getScalarShiftAmountTy(DL, LHSTy);
1064 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
1065 assert(isTypeLegal(VT));
1077 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) {
1078 // If the command-line option was specified, ignore this request.
1079 if (!JumpIsExpensiveOverride.getNumOccurrences())
1080 JumpIsExpensive = isExpensive;
1083 TargetLoweringBase::LegalizeKind
1084 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
1085 // If this is a simple type, use the ComputeRegisterProp mechanism.
1086 if (VT.isSimple()) {
1087 MVT SVT = VT.getSimpleVT();
1088 assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
1089 MVT NVT = TransformToType[SVT.SimpleTy];
1090 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
1092 assert((LA == TypeLegal || LA == TypeSoftenFloat ||
1093 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger) &&
1094 "Promote may not follow Expand or Promote");
1096 if (LA == TypeSplitVector)
1097 return LegalizeKind(LA,
1098 EVT::getVectorVT(Context, SVT.getVectorElementType(),
1099 SVT.getVectorNumElements() / 2));
1100 if (LA == TypeScalarizeVector)
1101 return LegalizeKind(LA, SVT.getVectorElementType());
1102 return LegalizeKind(LA, NVT);
1105 // Handle Extended Scalar Types.
1106 if (!VT.isVector()) {
1107 assert(VT.isInteger() && "Float types must be simple");
1108 unsigned BitSize = VT.getSizeInBits();
1109 // First promote to a power-of-two size, then expand if necessary.
1110 if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
1111 EVT NVT = VT.getRoundIntegerType(Context);
1112 assert(NVT != VT && "Unable to round integer VT");
1113 LegalizeKind NextStep = getTypeConversion(Context, NVT);
1114 // Avoid multi-step promotion.
1115 if (NextStep.first == TypePromoteInteger)
1117 // Return rounded integer type.
1118 return LegalizeKind(TypePromoteInteger, NVT);
1121 return LegalizeKind(TypeExpandInteger,
1122 EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
1125 // Handle vector types.
1126 unsigned NumElts = VT.getVectorNumElements();
1127 EVT EltVT = VT.getVectorElementType();
1129 // Vectors with only one element are always scalarized.
1131 return LegalizeKind(TypeScalarizeVector, EltVT);
1133 // Try to widen vector elements until the element type is a power of two and
1134 // promote it to a legal type later on, for example:
1135 // <3 x i8> -> <4 x i8> -> <4 x i32>
1136 if (EltVT.isInteger()) {
1137 // Vectors with a number of elements that is not a power of two are always
1138 // widened, for example <3 x i8> -> <4 x i8>.
1139 if (!VT.isPow2VectorType()) {
1140 NumElts = (unsigned)NextPowerOf2(NumElts);
1141 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
1142 return LegalizeKind(TypeWidenVector, NVT);
1145 // Examine the element type.
1146 LegalizeKind LK = getTypeConversion(Context, EltVT);
1148 // If type is to be expanded, split the vector.
1149 // <4 x i140> -> <2 x i140>
1150 if (LK.first == TypeExpandInteger)
1151 return LegalizeKind(TypeSplitVector,
1152 EVT::getVectorVT(Context, EltVT, NumElts / 2));
1154 // Promote the integer element types until a legal vector type is found
1155 // or until the element integer type is too big. If a legal type was not
1156 // found, fallback to the usual mechanism of widening/splitting the
1158 EVT OldEltVT = EltVT;
1160 // Increase the bitwidth of the element to the next pow-of-two
1161 // (which is greater than 8 bits).
1162 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
1163 .getRoundIntegerType(Context);
1165 // Stop trying when getting a non-simple element type.
1166 // Note that vector elements may be greater than legal vector element
1167 // types. Example: X86 XMM registers hold 64bit element on 32bit
1169 if (!EltVT.isSimple())
1172 // Build a new vector type and check if it is legal.
1173 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1174 // Found a legal promoted vector type.
1175 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
1176 return LegalizeKind(TypePromoteInteger,
1177 EVT::getVectorVT(Context, EltVT, NumElts));
1180 // Reset the type to the unexpanded type if we did not find a legal vector
1181 // type with a promoted vector element type.
1185 // Try to widen the vector until a legal type is found.
1186 // If there is no wider legal type, split the vector.
1188 // Round up to the next power of 2.
1189 NumElts = (unsigned)NextPowerOf2(NumElts);
1191 // If there is no simple vector type with this many elements then there
1192 // cannot be a larger legal vector type. Note that this assumes that
1193 // there are no skipped intermediate vector types in the simple types.
1194 if (!EltVT.isSimple())
1196 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1197 if (LargerVector == MVT())
1200 // If this type is legal then widen the vector.
1201 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1202 return LegalizeKind(TypeWidenVector, LargerVector);
1205 // Widen odd vectors to next power of two.
1206 if (!VT.isPow2VectorType()) {
1207 EVT NVT = VT.getPow2VectorType(Context);
1208 return LegalizeKind(TypeWidenVector, NVT);
1211 // Vectors with illegal element types are expanded.
1212 EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
1213 return LegalizeKind(TypeSplitVector, NVT);
1216 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
1217 unsigned &NumIntermediates,
1219 TargetLoweringBase *TLI) {
1220 // Figure out the right, legal destination reg to copy into.
1221 unsigned NumElts = VT.getVectorNumElements();
1222 MVT EltTy = VT.getVectorElementType();
1224 unsigned NumVectorRegs = 1;
1226 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
1227 // could break down into LHS/RHS like LegalizeDAG does.
1228 if (!isPowerOf2_32(NumElts)) {
1229 NumVectorRegs = NumElts;
1233 // Divide the input until we get to a supported size. This will always
1234 // end with a scalar if the target doesn't support vectors.
1235 while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
1237 NumVectorRegs <<= 1;
1240 NumIntermediates = NumVectorRegs;
1242 MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
1243 if (!TLI->isTypeLegal(NewVT))
1245 IntermediateVT = NewVT;
1247 unsigned NewVTSize = NewVT.getSizeInBits();
1249 // Convert sizes such as i33 to i64.
1250 if (!isPowerOf2_32(NewVTSize))
1251 NewVTSize = NextPowerOf2(NewVTSize);
1253 MVT DestVT = TLI->getRegisterType(NewVT);
1254 RegisterVT = DestVT;
1255 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1256 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1258 // Otherwise, promotion or legal types use the same number of registers as
1259 // the vector decimated to the appropriate level.
1260 return NumVectorRegs;
1263 /// isLegalRC - Return true if the value types that can be represented by the
1264 /// specified register class are all legal.
1265 bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI,
1266 const TargetRegisterClass &RC) const {
1267 for (auto I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
1268 if (isTypeLegal(*I))
1273 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
1274 /// sequence of memory operands that is recognized by PrologEpilogInserter.
1276 TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
1277 MachineBasicBlock *MBB) const {
1278 MachineInstr *MI = &InitialMI;
1279 MachineFunction &MF = *MI->getParent()->getParent();
1280 MachineFrameInfo &MFI = MF.getFrameInfo();
1282 // We're handling multiple types of operands here:
1283 // PATCHPOINT MetaArgs - live-in, read only, direct
1284 // STATEPOINT Deopt Spill - live-through, read only, indirect
1285 // STATEPOINT Deopt Alloca - live-through, read only, direct
1286 // (We're currently conservative and mark the deopt slots read/write in
1288 // STATEPOINT GC Spill - live-through, read/write, indirect
1289 // STATEPOINT GC Alloca - live-through, read/write, direct
1290 // The live-in vs live-through is handled already (the live through ones are
1291 // all stack slots), but we need to handle the different type of stackmap
1292 // operands and memory effects here.
1294 // MI changes inside this loop as we grow operands.
1295 for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
1296 MachineOperand &MO = MI->getOperand(OperIdx);
1300 // foldMemoryOperand builds a new MI after replacing a single FI operand
1301 // with the canonical set of five x86 addressing-mode operands.
1302 int FI = MO.getIndex();
1303 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
1305 // Copy operands before the frame-index.
1306 for (unsigned i = 0; i < OperIdx; ++i)
1307 MIB.add(MI->getOperand(i));
1308 // Add frame index operands recognized by stackmaps.cpp
1309 if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
1310 // indirect-mem-ref tag, size, #FI, offset.
1311 // Used for spills inserted by StatepointLowering. This codepath is not
1312 // used for patchpoints/stackmaps at all, for these spilling is done via
1313 // foldMemoryOperand callback only.
1314 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
1315 MIB.addImm(StackMaps::IndirectMemRefOp);
1316 MIB.addImm(MFI.getObjectSize(FI));
1317 MIB.add(MI->getOperand(OperIdx));
1320 // direct-mem-ref tag, #FI, offset.
1321 // Used by patchpoint, and direct alloca arguments to statepoints
1322 MIB.addImm(StackMaps::DirectMemRefOp);
1323 MIB.add(MI->getOperand(OperIdx));
1326 // Copy the operands after the frame index.
1327 for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
1328 MIB.add(MI->getOperand(i));
1330 // Inherit previous memory operands.
1331 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
1332 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
1334 // Add a new memory operand for this FI.
1335 assert(MFI.getObjectOffset(FI) != -1);
1337 auto Flags = MachineMemOperand::MOLoad;
1338 if (MI->getOpcode() == TargetOpcode::STATEPOINT) {
1339 Flags |= MachineMemOperand::MOStore;
1340 Flags |= MachineMemOperand::MOVolatile;
1342 MachineMemOperand *MMO = MF.getMachineMemOperand(
1343 MachinePointerInfo::getFixedStack(MF, FI), Flags,
1344 MF.getDataLayout().getPointerSize(), MFI.getObjectAlignment(FI));
1345 MIB->addMemOperand(MF, MMO);
1347 // Replace the instruction and update the operand index.
1348 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1349 OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;
1350 MI->eraseFromParent();
1356 /// findRepresentativeClass - Return the largest legal super-reg register class
1357 /// of the register class for the specified type and its associated "cost".
1358 // This function is in TargetLowering because it uses RegClassForVT which would
1359 // need to be moved to TargetRegisterInfo and would necessitate moving
1360 // isTypeLegal over as well - a massive change that would just require
1361 // TargetLowering having a TargetRegisterInfo class member that it would use.
1362 std::pair<const TargetRegisterClass *, uint8_t>
1363 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI,
1365 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1367 return std::make_pair(RC, 0);
1369 // Compute the set of all super-register classes.
1370 BitVector SuperRegRC(TRI->getNumRegClasses());
1371 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
1372 SuperRegRC.setBitsInMask(RCI.getMask());
1374 // Find the first legal register class with the largest spill size.
1375 const TargetRegisterClass *BestRC = RC;
1376 for (unsigned i : SuperRegRC.set_bits()) {
1377 const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
1378 // We want the largest possible spill size.
1379 if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
1381 if (!isLegalRC(*TRI, *SuperRC))
1385 return std::make_pair(BestRC, 1);
1388 /// computeRegisterProperties - Once all of the register classes are added,
1389 /// this allows us to compute derived properties we expose.
1390 void TargetLoweringBase::computeRegisterProperties(
1391 const TargetRegisterInfo *TRI) {
1392 static_assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE,
1393 "Too many value types for ValueTypeActions to hold!");
1395 // Everything defaults to needing one register.
1396 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1397 NumRegistersForVT[i] = 1;
1398 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1400 // ...except isVoid, which doesn't need any registers.
1401 NumRegistersForVT[MVT::isVoid] = 0;
1403 // Find the largest integer register class.
1404 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1405 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1406 assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1408 // Every integer value type larger than this largest register takes twice as
1409 // many registers to represent as the previous ValueType.
1410 for (unsigned ExpandedReg = LargestIntReg + 1;
1411 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1412 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1413 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1414 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1415 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
1419 // Inspect all of the ValueType's smaller than the largest integer
1420 // register to see which ones need promotion.
1421 unsigned LegalIntReg = LargestIntReg;
1422 for (unsigned IntReg = LargestIntReg - 1;
1423 IntReg >= (unsigned)MVT::i1; --IntReg) {
1424 MVT IVT = (MVT::SimpleValueType)IntReg;
1425 if (isTypeLegal(IVT)) {
1426 LegalIntReg = IntReg;
1428 RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1429 (const MVT::SimpleValueType)LegalIntReg;
1430 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
1434 // ppcf128 type is really two f64's.
1435 if (!isTypeLegal(MVT::ppcf128)) {
1436 if (isTypeLegal(MVT::f64)) {
1437 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1438 RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1439 TransformToType[MVT::ppcf128] = MVT::f64;
1440 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
1442 NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
1443 RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
1444 TransformToType[MVT::ppcf128] = MVT::i128;
1445 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat);
1449 // Decide how to handle f128. If the target does not have native f128 support,
1450 // expand it to i128 and we will be generating soft float library calls.
1451 if (!isTypeLegal(MVT::f128)) {
1452 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1453 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1454 TransformToType[MVT::f128] = MVT::i128;
1455 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
1458 // Decide how to handle f64. If the target does not have native f64 support,
1459 // expand it to i64 and we will be generating soft float library calls.
1460 if (!isTypeLegal(MVT::f64)) {
1461 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1462 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1463 TransformToType[MVT::f64] = MVT::i64;
1464 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
1467 // Decide how to handle f32. If the target does not have native f32 support,
1468 // expand it to i32 and we will be generating soft float library calls.
1469 if (!isTypeLegal(MVT::f32)) {
1470 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1471 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1472 TransformToType[MVT::f32] = MVT::i32;
1473 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
1476 // Decide how to handle f16. If the target does not have native f16 support,
1477 // promote it to f32, because there are no f16 library calls (except for
1479 if (!isTypeLegal(MVT::f16)) {
1480 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1481 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1482 TransformToType[MVT::f16] = MVT::f32;
1483 ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
1486 // Loop over all of the vector value types to see which need transformations.
1487 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1488 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1489 MVT VT = (MVT::SimpleValueType) i;
1490 if (isTypeLegal(VT))
1493 MVT EltVT = VT.getVectorElementType();
1494 unsigned NElts = VT.getVectorNumElements();
1495 bool IsLegalWiderType = false;
1496 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1497 switch (PreferredAction) {
1498 case TypePromoteInteger: {
1499 // Try to promote the elements of integer vectors. If no legal
1500 // promotion was found, fall through to the widen-vector method.
1501 for (unsigned nVT = i + 1; nVT <= MVT::LAST_INTEGER_VECTOR_VALUETYPE; ++nVT) {
1502 MVT SVT = (MVT::SimpleValueType) nVT;
1503 // Promote vectors of integers to vectors with the same number
1504 // of elements, with a wider element type.
1505 if (SVT.getScalarSizeInBits() > EltVT.getSizeInBits() &&
1506 SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)) {
1507 TransformToType[i] = SVT;
1508 RegisterTypeForVT[i] = SVT;
1509 NumRegistersForVT[i] = 1;
1510 ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
1511 IsLegalWiderType = true;
1515 if (IsLegalWiderType)
1519 case TypeWidenVector: {
1520 // Try to widen the vector.
1521 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1522 MVT SVT = (MVT::SimpleValueType) nVT;
1523 if (SVT.getVectorElementType() == EltVT
1524 && SVT.getVectorNumElements() > NElts && isTypeLegal(SVT)) {
1525 TransformToType[i] = SVT;
1526 RegisterTypeForVT[i] = SVT;
1527 NumRegistersForVT[i] = 1;
1528 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1529 IsLegalWiderType = true;
1533 if (IsLegalWiderType)
1537 case TypeSplitVector:
1538 case TypeScalarizeVector: {
1541 unsigned NumIntermediates;
1542 NumRegistersForVT[i] = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1543 NumIntermediates, RegisterVT, this);
1544 RegisterTypeForVT[i] = RegisterVT;
1546 MVT NVT = VT.getPow2VectorType();
1548 // Type is already a power of 2. The default action is to split.
1549 TransformToType[i] = MVT::Other;
1550 if (PreferredAction == TypeScalarizeVector)
1551 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
1552 else if (PreferredAction == TypeSplitVector)
1553 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1555 // Set type action according to the number of elements.
1556 ValueTypeActions.setTypeAction(VT, NElts == 1 ? TypeScalarizeVector
1559 TransformToType[i] = NVT;
1560 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1565 llvm_unreachable("Unknown vector legalization action!");
1569 // Determine the 'representative' register class for each value type.
1570 // An representative register class is the largest (meaning one which is
1571 // not a sub-register class / subreg register class) legal register class for
1572 // a group of value types. For example, on i386, i8, i16, and i32
1573 // representative would be GR32; while on x86_64 it's GR64.
1574 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1575 const TargetRegisterClass* RRC;
1577 std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
1578 RepRegClassForVT[i] = RRC;
1579 RepRegClassCostForVT[i] = Cost;
1583 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1585 assert(!VT.isVector() && "No default SetCC type for vectors!");
1586 return getPointerTy(DL).SimpleTy;
1589 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const {
1590 return MVT::i32; // return the default value
1593 /// getVectorTypeBreakdown - Vector types are broken down into some number of
1594 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
1595 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1596 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1598 /// This method returns the number of registers needed, and the VT for each
1599 /// register. It also returns the VT and quantity of the intermediate values
1600 /// before they are promoted/expanded.
1602 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
1603 EVT &IntermediateVT,
1604 unsigned &NumIntermediates,
1605 MVT &RegisterVT) const {
1606 unsigned NumElts = VT.getVectorNumElements();
1608 // If there is a wider vector type with the same element type as this one,
1609 // or a promoted vector type that has the same number of elements which
1610 // are wider, then we should convert to that legal vector type.
1611 // This handles things like <2 x float> -> <4 x float> and
1612 // <4 x i1> -> <4 x i32>.
1613 LegalizeTypeAction TA = getTypeAction(Context, VT);
1614 if (NumElts != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1615 EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1616 if (isTypeLegal(RegisterEVT)) {
1617 IntermediateVT = RegisterEVT;
1618 RegisterVT = RegisterEVT.getSimpleVT();
1619 NumIntermediates = 1;
1624 // Figure out the right, legal destination reg to copy into.
1625 EVT EltTy = VT.getVectorElementType();
1627 unsigned NumVectorRegs = 1;
1629 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
1630 // could break down into LHS/RHS like LegalizeDAG does.
1631 if (!isPowerOf2_32(NumElts)) {
1632 NumVectorRegs = NumElts;
1636 // Divide the input until we get to a supported size. This will always
1637 // end with a scalar if the target doesn't support vectors.
1638 while (NumElts > 1 && !isTypeLegal(
1639 EVT::getVectorVT(Context, EltTy, NumElts))) {
1641 NumVectorRegs <<= 1;
1644 NumIntermediates = NumVectorRegs;
1646 EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
1647 if (!isTypeLegal(NewVT))
1649 IntermediateVT = NewVT;
1651 MVT DestVT = getRegisterType(Context, NewVT);
1652 RegisterVT = DestVT;
1653 unsigned NewVTSize = NewVT.getSizeInBits();
1655 // Convert sizes such as i33 to i64.
1656 if (!isPowerOf2_32(NewVTSize))
1657 NewVTSize = NextPowerOf2(NewVTSize);
1659 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1660 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1662 // Otherwise, promotion or legal types use the same number of registers as
1663 // the vector decimated to the appropriate level.
1664 return NumVectorRegs;
1667 /// Get the EVTs and ArgFlags collections that represent the legalized return
1668 /// type of the given function. This does not require a DAG or a return value,
1669 /// and is suitable for use before any DAGs for the function are constructed.
1670 /// TODO: Move this out of TargetLowering.cpp.
1671 void llvm::GetReturnInfo(Type *ReturnType, AttributeList attr,
1672 SmallVectorImpl<ISD::OutputArg> &Outs,
1673 const TargetLowering &TLI, const DataLayout &DL) {
1674 SmallVector<EVT, 4> ValueVTs;
1675 ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1676 unsigned NumValues = ValueVTs.size();
1677 if (NumValues == 0) return;
1679 for (unsigned j = 0, f = NumValues; j != f; ++j) {
1680 EVT VT = ValueVTs[j];
1681 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1683 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1684 ExtendKind = ISD::SIGN_EXTEND;
1685 else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1686 ExtendKind = ISD::ZERO_EXTEND;
1688 // FIXME: C calling convention requires the return type to be promoted to
1689 // at least 32-bit. But this is not necessary for non-C calling
1690 // conventions. The frontend should mark functions whose return values
1691 // require promoting with signext or zeroext attributes.
1692 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
1693 MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
1694 if (VT.bitsLT(MinVT))
1699 TLI.getNumRegistersForCallingConv(ReturnType->getContext(), VT);
1701 TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), VT);
1703 // 'inreg' on function refers to return value
1704 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1705 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::InReg))
1708 // Propagate extension type if any
1709 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1711 else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1714 for (unsigned i = 0; i < NumParts; ++i)
1715 Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isFixed=*/true, 0, 0));
1719 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1720 /// function arguments in the caller parameter area. This is the actual
1721 /// alignment, not its logarithm.
1722 unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty,
1723 const DataLayout &DL) const {
1724 return DL.getABITypeAlignment(Ty);
1727 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1728 const DataLayout &DL, EVT VT,
1732 // Check if the specified alignment is sufficient based on the data layout.
1733 // TODO: While using the data layout works in practice, a better solution
1734 // would be to implement this check directly (make this a virtual function).
1735 // For example, the ABI alignment may change based on software platform while
1736 // this function should only be affected by hardware implementation.
1737 Type *Ty = VT.getTypeForEVT(Context);
1738 if (Alignment >= DL.getABITypeAlignment(Ty)) {
1739 // Assume that an access that meets the ABI-specified alignment is fast.
1740 if (Fast != nullptr)
1745 // This is a misaligned access.
1746 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
1749 BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const {
1750 return BranchProbability(MinPercentageForPredictableBranch, 100);
1753 //===----------------------------------------------------------------------===//
1754 // TargetTransformInfo Helpers
1755 //===----------------------------------------------------------------------===//
1757 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
1758 enum InstructionOpcodes {
1759 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1760 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1761 #include "llvm/IR/Instruction.def"
1763 switch (static_cast<InstructionOpcodes>(Opcode)) {
1766 case Switch: return 0;
1767 case IndirectBr: return 0;
1768 case Invoke: return 0;
1769 case Resume: return 0;
1770 case Unreachable: return 0;
1771 case CleanupRet: return 0;
1772 case CatchRet: return 0;
1773 case CatchPad: return 0;
1774 case CatchSwitch: return 0;
1775 case CleanupPad: return 0;
1776 case Add: return ISD::ADD;
1777 case FAdd: return ISD::FADD;
1778 case Sub: return ISD::SUB;
1779 case FSub: return ISD::FSUB;
1780 case Mul: return ISD::MUL;
1781 case FMul: return ISD::FMUL;
1782 case UDiv: return ISD::UDIV;
1783 case SDiv: return ISD::SDIV;
1784 case FDiv: return ISD::FDIV;
1785 case URem: return ISD::UREM;
1786 case SRem: return ISD::SREM;
1787 case FRem: return ISD::FREM;
1788 case Shl: return ISD::SHL;
1789 case LShr: return ISD::SRL;
1790 case AShr: return ISD::SRA;
1791 case And: return ISD::AND;
1792 case Or: return ISD::OR;
1793 case Xor: return ISD::XOR;
1794 case Alloca: return 0;
1795 case Load: return ISD::LOAD;
1796 case Store: return ISD::STORE;
1797 case GetElementPtr: return 0;
1798 case Fence: return 0;
1799 case AtomicCmpXchg: return 0;
1800 case AtomicRMW: return 0;
1801 case Trunc: return ISD::TRUNCATE;
1802 case ZExt: return ISD::ZERO_EXTEND;
1803 case SExt: return ISD::SIGN_EXTEND;
1804 case FPToUI: return ISD::FP_TO_UINT;
1805 case FPToSI: return ISD::FP_TO_SINT;
1806 case UIToFP: return ISD::UINT_TO_FP;
1807 case SIToFP: return ISD::SINT_TO_FP;
1808 case FPTrunc: return ISD::FP_ROUND;
1809 case FPExt: return ISD::FP_EXTEND;
1810 case PtrToInt: return ISD::BITCAST;
1811 case IntToPtr: return ISD::BITCAST;
1812 case BitCast: return ISD::BITCAST;
1813 case AddrSpaceCast: return ISD::ADDRSPACECAST;
1814 case ICmp: return ISD::SETCC;
1815 case FCmp: return ISD::SETCC;
1817 case Call: return 0;
1818 case Select: return ISD::SELECT;
1819 case UserOp1: return 0;
1820 case UserOp2: return 0;
1821 case VAArg: return 0;
1822 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
1823 case InsertElement: return ISD::INSERT_VECTOR_ELT;
1824 case ShuffleVector: return ISD::VECTOR_SHUFFLE;
1825 case ExtractValue: return ISD::MERGE_VALUES;
1826 case InsertValue: return ISD::MERGE_VALUES;
1827 case LandingPad: return 0;
1830 llvm_unreachable("Unknown instruction type encountered!");
1834 TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL,
1836 LLVMContext &C = Ty->getContext();
1837 EVT MTy = getValueType(DL, Ty);
1840 // We keep legalizing the type until we find a legal kind. We assume that
1841 // the only operation that costs anything is the split. After splitting
1842 // we need to handle two types.
1844 LegalizeKind LK = getTypeConversion(C, MTy);
1846 if (LK.first == TypeLegal)
1847 return std::make_pair(Cost, MTy.getSimpleVT());
1849 if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
1852 // Do not loop with f128 type.
1853 if (MTy == LK.second)
1854 return std::make_pair(Cost, MTy.getSimpleVT());
1856 // Keep legalizing the type.
1861 Value *TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
1862 bool UseTLS) const {
1863 // compiler-rt provides a variable with a magic name. Targets that do not
1864 // link with compiler-rt may also provide such a variable.
1865 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1866 const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
1867 auto UnsafeStackPtr =
1868 dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
1870 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1872 if (!UnsafeStackPtr) {
1873 auto TLSModel = UseTLS ?
1874 GlobalValue::InitialExecTLSModel :
1875 GlobalValue::NotThreadLocal;
1876 // The global variable is not defined yet, define it ourselves.
1877 // We use the initial-exec TLS model because we do not support the
1878 // variable living anywhere other than in the main executable.
1879 UnsafeStackPtr = new GlobalVariable(
1880 *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
1881 UnsafeStackPtrVar, nullptr, TLSModel);
1883 // The variable exists, check its type and attributes.
1884 if (UnsafeStackPtr->getValueType() != StackPtrTy)
1885 report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
1886 if (UseTLS != UnsafeStackPtr->isThreadLocal())
1887 report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
1888 (UseTLS ? "" : "not ") + "be thread-local");
1890 return UnsafeStackPtr;
1893 Value *TargetLoweringBase::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
1894 if (!TM.getTargetTriple().isAndroid())
1895 return getDefaultSafeStackPointerLocation(IRB, true);
1897 // Android provides a libc function to retrieve the address of the current
1898 // thread's unsafe stack pointer.
1899 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1900 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1901 Value *Fn = M->getOrInsertFunction("__safestack_pointer_address",
1902 StackPtrTy->getPointerTo(0));
1903 return IRB.CreateCall(Fn);
1906 //===----------------------------------------------------------------------===//
1907 // Loop Strength Reduction hooks
1908 //===----------------------------------------------------------------------===//
1910 /// isLegalAddressingMode - Return true if the addressing mode represented
1911 /// by AM is legal for this target, for a load/store of the specified type.
1912 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
1913 const AddrMode &AM, Type *Ty,
1914 unsigned AS) const {
1915 // The default implementation of this implements a conservative RISCy, r+r and
1918 // Allows a sign-extended 16-bit immediate field.
1919 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
1922 // No global is ever allowed as a base.
1926 // Only support r+r,
1928 case 0: // "r+i" or just "i", depending on HasBaseReg.
1931 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
1933 // Otherwise we have r+r or r+i.
1936 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
1938 // Allow 2*r as r+r.
1940 default: // Don't allow n * r
1947 //===----------------------------------------------------------------------===//
1949 //===----------------------------------------------------------------------===//
1951 // For OpenBSD return its special guard variable. Otherwise return nullptr,
1952 // so that SelectionDAG handle SSP.
1953 Value *TargetLoweringBase::getIRStackGuard(IRBuilder<> &IRB) const {
1954 if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
1955 Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
1956 PointerType *PtrTy = Type::getInt8PtrTy(M.getContext());
1957 return M.getOrInsertGlobal("__guard_local", PtrTy);
1962 // Currently only support "standard" __stack_chk_guard.
1963 // TODO: add LOAD_STACK_GUARD support.
1964 void TargetLoweringBase::insertSSPDeclarations(Module &M) const {
1965 M.getOrInsertGlobal("__stack_chk_guard", Type::getInt8PtrTy(M.getContext()));
1968 // Currently only support "standard" __stack_chk_guard.
1969 // TODO: add LOAD_STACK_GUARD support.
1970 Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const {
1971 return M.getGlobalVariable("__stack_chk_guard", true);
1974 Value *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const {
1978 unsigned TargetLoweringBase::getMinimumJumpTableEntries() const {
1979 return MinimumJumpTableEntries;
1982 void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) {
1983 MinimumJumpTableEntries = Val;
1986 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
1987 return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
1990 unsigned TargetLoweringBase::getMaximumJumpTableSize() const {
1991 return MaximumJumpTableSize;
1994 void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) {
1995 MaximumJumpTableSize = Val;
1998 //===----------------------------------------------------------------------===//
1999 // Reciprocal Estimates
2000 //===----------------------------------------------------------------------===//
2002 /// Get the reciprocal estimate attribute string for a function that will
2003 /// override the target defaults.
2004 static StringRef getRecipEstimateForFunc(MachineFunction &MF) {
2005 const Function *F = MF.getFunction();
2006 return F->getFnAttribute("reciprocal-estimates").getValueAsString();
2009 /// Construct a string for the given reciprocal operation of the given type.
2010 /// This string should match the corresponding option to the front-end's
2011 /// "-mrecip" flag assuming those strings have been passed through in an
2012 /// attribute string. For example, "vec-divf" for a division of a vXf32.
2013 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
2014 std::string Name = VT.isVector() ? "vec-" : "";
2016 Name += IsSqrt ? "sqrt" : "div";
2018 // TODO: Handle "half" or other float types?
2019 if (VT.getScalarType() == MVT::f64) {
2022 assert(VT.getScalarType() == MVT::f32 &&
2023 "Unexpected FP type for reciprocal estimate");
2030 /// Return the character position and value (a single numeric character) of a
2031 /// customized refinement operation in the input string if it exists. Return
2032 /// false if there is no customized refinement step count.
2033 static bool parseRefinementStep(StringRef In, size_t &Position,
2035 const char RefStepToken = ':';
2036 Position = In.find(RefStepToken);
2037 if (Position == StringRef::npos)
2040 StringRef RefStepString = In.substr(Position + 1);
2041 // Allow exactly one numeric character for the additional refinement
2043 if (RefStepString.size() == 1) {
2044 char RefStepChar = RefStepString[0];
2045 if (RefStepChar >= '0' && RefStepChar <= '9') {
2046 Value = RefStepChar - '0';
2050 report_fatal_error("Invalid refinement step for -recip.");
2053 /// For the input attribute string, return one of the ReciprocalEstimate enum
2054 /// status values (enabled, disabled, or not specified) for this operation on
2055 /// the specified data type.
2056 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
2057 if (Override.empty())
2058 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2060 SmallVector<StringRef, 4> OverrideVector;
2061 SplitString(Override, OverrideVector, ",");
2062 unsigned NumArgs = OverrideVector.size();
2064 // Check if "all", "none", or "default" was specified.
2066 // Look for an optional setting of the number of refinement steps needed
2067 // for this type of reciprocal operation.
2070 if (parseRefinementStep(Override, RefPos, RefSteps)) {
2071 // Split the string for further processing.
2072 Override = Override.substr(0, RefPos);
2075 // All reciprocal types are enabled.
2076 if (Override == "all")
2077 return TargetLoweringBase::ReciprocalEstimate::Enabled;
2079 // All reciprocal types are disabled.
2080 if (Override == "none")
2081 return TargetLoweringBase::ReciprocalEstimate::Disabled;
2083 // Target defaults for enablement are used.
2084 if (Override == "default")
2085 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2088 // The attribute string may omit the size suffix ('f'/'d').
2089 std::string VTName = getReciprocalOpName(IsSqrt, VT);
2090 std::string VTNameNoSize = VTName;
2091 VTNameNoSize.pop_back();
2092 static const char DisabledPrefix = '!';
2094 for (StringRef RecipType : OverrideVector) {
2097 if (parseRefinementStep(RecipType, RefPos, RefSteps))
2098 RecipType = RecipType.substr(0, RefPos);
2100 // Ignore the disablement token for string matching.
2101 bool IsDisabled = RecipType[0] == DisabledPrefix;
2103 RecipType = RecipType.substr(1);
2105 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
2106 return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled
2107 : TargetLoweringBase::ReciprocalEstimate::Enabled;
2110 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2113 /// For the input attribute string, return the customized refinement step count
2114 /// for this operation on the specified data type. If the step count does not
2115 /// exist, return the ReciprocalEstimate enum value for unspecified.
2116 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
2117 if (Override.empty())
2118 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2120 SmallVector<StringRef, 4> OverrideVector;
2121 SplitString(Override, OverrideVector, ",");
2122 unsigned NumArgs = OverrideVector.size();
2124 // Check if "all", "default", or "none" was specified.
2126 // Look for an optional setting of the number of refinement steps needed
2127 // for this type of reciprocal operation.
2130 if (!parseRefinementStep(Override, RefPos, RefSteps))
2131 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2133 // Split the string for further processing.
2134 Override = Override.substr(0, RefPos);
2135 assert(Override != "none" &&
2136 "Disabled reciprocals, but specifed refinement steps?");
2138 // If this is a general override, return the specified number of steps.
2139 if (Override == "all" || Override == "default")
2143 // The attribute string may omit the size suffix ('f'/'d').
2144 std::string VTName = getReciprocalOpName(IsSqrt, VT);
2145 std::string VTNameNoSize = VTName;
2146 VTNameNoSize.pop_back();
2148 for (StringRef RecipType : OverrideVector) {
2151 if (!parseRefinementStep(RecipType, RefPos, RefSteps))
2154 RecipType = RecipType.substr(0, RefPos);
2155 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
2159 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2162 int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT,
2163 MachineFunction &MF) const {
2164 return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
2167 int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT,
2168 MachineFunction &MF) const {
2169 return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
2172 int TargetLoweringBase::getSqrtRefinementSteps(EVT VT,
2173 MachineFunction &MF) const {
2174 return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
2177 int TargetLoweringBase::getDivRefinementSteps(EVT VT,
2178 MachineFunction &MF) const {
2179 return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
2182 void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const {
2183 MF.getRegInfo().freezeReservedRegs(MF);