1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This is the parent TargetLowering class for hardware code gen
13 //===----------------------------------------------------------------------===//
15 #include "AMDGPUISelLowering.h"
17 #include "AMDGPUCallLowering.h"
18 #include "AMDGPUFrameLowering.h"
19 #include "AMDGPUSubtarget.h"
20 #include "AMDGPUTargetMachine.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "R600MachineFunctionInfo.h"
23 #include "SIInstrInfo.h"
24 #include "SIMachineFunctionInfo.h"
25 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
26 #include "llvm/CodeGen/Analysis.h"
27 #include "llvm/CodeGen/CallingConvLower.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/SelectionDAG.h"
31 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DiagnosticInfo.h"
34 #include "llvm/Support/KnownBits.h"
35 #include "llvm/Support/MathExtras.h"
38 #include "AMDGPUGenCallingConv.inc"
40 static cl::opt<bool> AMDGPUBypassSlowDiv(
41 "amdgpu-bypass-slow-div",
42 cl::desc("Skip 64-bit divide for dynamic 32-bit values"),
45 // Find a larger type to do a load / store of a vector with.
46 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
47 unsigned StoreSize = VT.getStoreSizeInBits();
49 return EVT::getIntegerVT(Ctx, StoreSize);
51 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
52 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
55 unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) {
56 EVT VT = Op.getValueType();
57 KnownBits Known = DAG.computeKnownBits(Op);
58 return VT.getSizeInBits() - Known.countMinLeadingZeros();
61 unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) {
62 EVT VT = Op.getValueType();
64 // In order for this to be a signed 24-bit value, bit 23, must
66 return VT.getSizeInBits() - DAG.ComputeNumSignBits(Op);
69 AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
70 const AMDGPUSubtarget &STI)
71 : TargetLowering(TM), Subtarget(&STI) {
72 // Lower floating point store/load to integer store/load to reduce the number
73 // of patterns in tablegen.
74 setOperationAction(ISD::LOAD, MVT::f32, Promote);
75 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
77 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
78 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
80 setOperationAction(ISD::LOAD, MVT::v3f32, Promote);
81 AddPromotedToType(ISD::LOAD, MVT::v3f32, MVT::v3i32);
83 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
84 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
86 setOperationAction(ISD::LOAD, MVT::v5f32, Promote);
87 AddPromotedToType(ISD::LOAD, MVT::v5f32, MVT::v5i32);
89 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
90 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
92 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
93 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
95 setOperationAction(ISD::LOAD, MVT::v32f32, Promote);
96 AddPromotedToType(ISD::LOAD, MVT::v32f32, MVT::v32i32);
98 setOperationAction(ISD::LOAD, MVT::i64, Promote);
99 AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
101 setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
102 AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32);
104 setOperationAction(ISD::LOAD, MVT::f64, Promote);
105 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32);
107 setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
108 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32);
110 setOperationAction(ISD::LOAD, MVT::v4i64, Promote);
111 AddPromotedToType(ISD::LOAD, MVT::v4i64, MVT::v8i32);
113 setOperationAction(ISD::LOAD, MVT::v4f64, Promote);
114 AddPromotedToType(ISD::LOAD, MVT::v4f64, MVT::v8i32);
116 setOperationAction(ISD::LOAD, MVT::v8i64, Promote);
117 AddPromotedToType(ISD::LOAD, MVT::v8i64, MVT::v16i32);
119 setOperationAction(ISD::LOAD, MVT::v8f64, Promote);
120 AddPromotedToType(ISD::LOAD, MVT::v8f64, MVT::v16i32);
122 setOperationAction(ISD::LOAD, MVT::v16i64, Promote);
123 AddPromotedToType(ISD::LOAD, MVT::v16i64, MVT::v32i32);
125 setOperationAction(ISD::LOAD, MVT::v16f64, Promote);
126 AddPromotedToType(ISD::LOAD, MVT::v16f64, MVT::v32i32);
128 // There are no 64-bit extloads. These should be done as a 32-bit extload and
129 // an extension to 64-bit.
130 for (MVT VT : MVT::integer_valuetypes()) {
131 setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand);
132 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand);
133 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand);
136 for (MVT VT : MVT::integer_valuetypes()) {
140 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
141 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal);
142 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal);
143 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
145 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
146 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal);
147 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal);
148 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
150 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
151 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal);
152 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal);
153 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
156 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
157 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand);
158 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand);
159 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand);
160 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand);
161 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand);
162 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand);
163 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand);
164 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand);
165 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand);
166 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v3i16, Expand);
167 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v3i16, Expand);
168 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v3i16, Expand);
169 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand);
170 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand);
171 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand);
174 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
175 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
176 setLoadExtAction(ISD::EXTLOAD, MVT::v3f32, MVT::v3f16, Expand);
177 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
178 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
179 setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Expand);
180 setLoadExtAction(ISD::EXTLOAD, MVT::v32f32, MVT::v32f16, Expand);
182 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
183 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
184 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
185 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand);
186 setLoadExtAction(ISD::EXTLOAD, MVT::v16f64, MVT::v16f32, Expand);
188 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
189 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
190 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
191 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
192 setLoadExtAction(ISD::EXTLOAD, MVT::v16f64, MVT::v16f16, Expand);
194 setOperationAction(ISD::STORE, MVT::f32, Promote);
195 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
197 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
198 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
200 setOperationAction(ISD::STORE, MVT::v3f32, Promote);
201 AddPromotedToType(ISD::STORE, MVT::v3f32, MVT::v3i32);
203 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
204 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
206 setOperationAction(ISD::STORE, MVT::v5f32, Promote);
207 AddPromotedToType(ISD::STORE, MVT::v5f32, MVT::v5i32);
209 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
210 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
212 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
213 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
215 setOperationAction(ISD::STORE, MVT::v32f32, Promote);
216 AddPromotedToType(ISD::STORE, MVT::v32f32, MVT::v32i32);
218 setOperationAction(ISD::STORE, MVT::i64, Promote);
219 AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
221 setOperationAction(ISD::STORE, MVT::v2i64, Promote);
222 AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32);
224 setOperationAction(ISD::STORE, MVT::f64, Promote);
225 AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32);
227 setOperationAction(ISD::STORE, MVT::v2f64, Promote);
228 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32);
230 setOperationAction(ISD::STORE, MVT::v4i64, Promote);
231 AddPromotedToType(ISD::STORE, MVT::v4i64, MVT::v8i32);
233 setOperationAction(ISD::STORE, MVT::v4f64, Promote);
234 AddPromotedToType(ISD::STORE, MVT::v4f64, MVT::v8i32);
236 setOperationAction(ISD::STORE, MVT::v8i64, Promote);
237 AddPromotedToType(ISD::STORE, MVT::v8i64, MVT::v16i32);
239 setOperationAction(ISD::STORE, MVT::v8f64, Promote);
240 AddPromotedToType(ISD::STORE, MVT::v8f64, MVT::v16i32);
242 setOperationAction(ISD::STORE, MVT::v16i64, Promote);
243 AddPromotedToType(ISD::STORE, MVT::v16i64, MVT::v32i32);
245 setOperationAction(ISD::STORE, MVT::v16f64, Promote);
246 AddPromotedToType(ISD::STORE, MVT::v16f64, MVT::v32i32);
248 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
249 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
250 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
251 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
253 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
254 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand);
255 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand);
256 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand);
258 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
259 setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
260 setTruncStoreAction(MVT::v3f32, MVT::v3f16, Expand);
261 setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
262 setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
263 setTruncStoreAction(MVT::v16f32, MVT::v16f16, Expand);
264 setTruncStoreAction(MVT::v32f32, MVT::v32f16, Expand);
266 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
267 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
269 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
270 setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand);
272 setTruncStoreAction(MVT::v4i64, MVT::v4i32, Expand);
273 setTruncStoreAction(MVT::v4i64, MVT::v4i16, Expand);
274 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand);
275 setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand);
277 setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand);
278 setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand);
280 setTruncStoreAction(MVT::v16f64, MVT::v16f32, Expand);
281 setTruncStoreAction(MVT::v16f64, MVT::v16f16, Expand);
282 setTruncStoreAction(MVT::v16i64, MVT::v16i16, Expand);
283 setTruncStoreAction(MVT::v16i64, MVT::v16i16, Expand);
284 setTruncStoreAction(MVT::v16i64, MVT::v16i8, Expand);
285 setTruncStoreAction(MVT::v16i64, MVT::v16i8, Expand);
286 setTruncStoreAction(MVT::v16i64, MVT::v16i1, Expand);
288 setOperationAction(ISD::Constant, MVT::i32, Legal);
289 setOperationAction(ISD::Constant, MVT::i64, Legal);
290 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
291 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
293 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
294 setOperationAction(ISD::BRIND, MVT::Other, Expand);
296 // This is totally unsupported, just custom lower to produce an error.
297 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
299 // Library functions. These default to Expand, but we have instructions
301 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
302 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
303 setOperationAction(ISD::FPOW, MVT::f32, Legal);
304 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
305 setOperationAction(ISD::FABS, MVT::f32, Legal);
306 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
307 setOperationAction(ISD::FRINT, MVT::f32, Legal);
308 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
309 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
310 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
312 setOperationAction(ISD::FROUND, MVT::f32, Custom);
313 setOperationAction(ISD::FROUND, MVT::f64, Custom);
315 setOperationAction(ISD::FLOG, MVT::f32, Custom);
316 setOperationAction(ISD::FLOG10, MVT::f32, Custom);
317 setOperationAction(ISD::FEXP, MVT::f32, Custom);
320 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
321 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
323 setOperationAction(ISD::FREM, MVT::f32, Custom);
324 setOperationAction(ISD::FREM, MVT::f64, Custom);
326 // Expand to fneg + fadd.
327 setOperationAction(ISD::FSUB, MVT::f64, Expand);
329 setOperationAction(ISD::CONCAT_VECTORS, MVT::v3i32, Custom);
330 setOperationAction(ISD::CONCAT_VECTORS, MVT::v3f32, Custom);
331 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
332 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
333 setOperationAction(ISD::CONCAT_VECTORS, MVT::v5i32, Custom);
334 setOperationAction(ISD::CONCAT_VECTORS, MVT::v5f32, Custom);
335 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
336 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
337 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
338 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
339 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3f32, Custom);
340 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3i32, Custom);
341 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
342 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
343 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v5f32, Custom);
344 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v5i32, Custom);
345 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
346 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
347 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16f32, Custom);
348 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16i32, Custom);
349 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32f32, Custom);
350 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32i32, Custom);
351 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f64, Custom);
352 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i64, Custom);
353 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f64, Custom);
354 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i64, Custom);
355 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f64, Custom);
356 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i64, Custom);
357 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16f64, Custom);
358 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16i64, Custom);
360 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
361 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Custom);
362 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Custom);
364 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
365 for (MVT VT : ScalarIntVTs) {
366 // These should use [SU]DIVREM, so set them to expand
367 setOperationAction(ISD::SDIV, VT, Expand);
368 setOperationAction(ISD::UDIV, VT, Expand);
369 setOperationAction(ISD::SREM, VT, Expand);
370 setOperationAction(ISD::UREM, VT, Expand);
372 // GPU does not have divrem function for signed or unsigned.
373 setOperationAction(ISD::SDIVREM, VT, Custom);
374 setOperationAction(ISD::UDIVREM, VT, Custom);
376 // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
377 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
378 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
380 setOperationAction(ISD::BSWAP, VT, Expand);
381 setOperationAction(ISD::CTTZ, VT, Expand);
382 setOperationAction(ISD::CTLZ, VT, Expand);
384 // AMDGPU uses ADDC/SUBC/ADDE/SUBE
385 setOperationAction(ISD::ADDC, VT, Legal);
386 setOperationAction(ISD::SUBC, VT, Legal);
387 setOperationAction(ISD::ADDE, VT, Legal);
388 setOperationAction(ISD::SUBE, VT, Legal);
391 // The hardware supports 32-bit FSHR, but not FSHL.
392 setOperationAction(ISD::FSHR, MVT::i32, Legal);
394 // The hardware supports 32-bit ROTR, but not ROTL.
395 setOperationAction(ISD::ROTL, MVT::i32, Expand);
396 setOperationAction(ISD::ROTL, MVT::i64, Expand);
397 setOperationAction(ISD::ROTR, MVT::i64, Expand);
399 setOperationAction(ISD::MUL, MVT::i64, Expand);
400 setOperationAction(ISD::MULHU, MVT::i64, Expand);
401 setOperationAction(ISD::MULHS, MVT::i64, Expand);
402 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
403 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
404 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
405 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
406 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
408 setOperationAction(ISD::SMIN, MVT::i32, Legal);
409 setOperationAction(ISD::UMIN, MVT::i32, Legal);
410 setOperationAction(ISD::SMAX, MVT::i32, Legal);
411 setOperationAction(ISD::UMAX, MVT::i32, Legal);
413 setOperationAction(ISD::CTTZ, MVT::i64, Custom);
414 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom);
415 setOperationAction(ISD::CTLZ, MVT::i64, Custom);
416 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
418 static const MVT::SimpleValueType VectorIntTypes[] = {
419 MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32
422 for (MVT VT : VectorIntTypes) {
423 // Expand the following operations for the current type by default.
424 setOperationAction(ISD::ADD, VT, Expand);
425 setOperationAction(ISD::AND, VT, Expand);
426 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
427 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
428 setOperationAction(ISD::MUL, VT, Expand);
429 setOperationAction(ISD::MULHU, VT, Expand);
430 setOperationAction(ISD::MULHS, VT, Expand);
431 setOperationAction(ISD::OR, VT, Expand);
432 setOperationAction(ISD::SHL, VT, Expand);
433 setOperationAction(ISD::SRA, VT, Expand);
434 setOperationAction(ISD::SRL, VT, Expand);
435 setOperationAction(ISD::ROTL, VT, Expand);
436 setOperationAction(ISD::ROTR, VT, Expand);
437 setOperationAction(ISD::SUB, VT, Expand);
438 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
439 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
440 setOperationAction(ISD::SDIV, VT, Expand);
441 setOperationAction(ISD::UDIV, VT, Expand);
442 setOperationAction(ISD::SREM, VT, Expand);
443 setOperationAction(ISD::UREM, VT, Expand);
444 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
445 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
446 setOperationAction(ISD::SDIVREM, VT, Expand);
447 setOperationAction(ISD::UDIVREM, VT, Expand);
448 setOperationAction(ISD::SELECT, VT, Expand);
449 setOperationAction(ISD::VSELECT, VT, Expand);
450 setOperationAction(ISD::SELECT_CC, VT, Expand);
451 setOperationAction(ISD::XOR, VT, Expand);
452 setOperationAction(ISD::BSWAP, VT, Expand);
453 setOperationAction(ISD::CTPOP, VT, Expand);
454 setOperationAction(ISD::CTTZ, VT, Expand);
455 setOperationAction(ISD::CTLZ, VT, Expand);
456 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
457 setOperationAction(ISD::SETCC, VT, Expand);
460 static const MVT::SimpleValueType FloatVectorTypes[] = {
461 MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32
464 for (MVT VT : FloatVectorTypes) {
465 setOperationAction(ISD::FABS, VT, Expand);
466 setOperationAction(ISD::FMINNUM, VT, Expand);
467 setOperationAction(ISD::FMAXNUM, VT, Expand);
468 setOperationAction(ISD::FADD, VT, Expand);
469 setOperationAction(ISD::FCEIL, VT, Expand);
470 setOperationAction(ISD::FCOS, VT, Expand);
471 setOperationAction(ISD::FDIV, VT, Expand);
472 setOperationAction(ISD::FEXP2, VT, Expand);
473 setOperationAction(ISD::FEXP, VT, Expand);
474 setOperationAction(ISD::FLOG2, VT, Expand);
475 setOperationAction(ISD::FREM, VT, Expand);
476 setOperationAction(ISD::FLOG, VT, Expand);
477 setOperationAction(ISD::FLOG10, VT, Expand);
478 setOperationAction(ISD::FPOW, VT, Expand);
479 setOperationAction(ISD::FFLOOR, VT, Expand);
480 setOperationAction(ISD::FTRUNC, VT, Expand);
481 setOperationAction(ISD::FMUL, VT, Expand);
482 setOperationAction(ISD::FMA, VT, Expand);
483 setOperationAction(ISD::FRINT, VT, Expand);
484 setOperationAction(ISD::FNEARBYINT, VT, Expand);
485 setOperationAction(ISD::FSQRT, VT, Expand);
486 setOperationAction(ISD::FSIN, VT, Expand);
487 setOperationAction(ISD::FSUB, VT, Expand);
488 setOperationAction(ISD::FNEG, VT, Expand);
489 setOperationAction(ISD::VSELECT, VT, Expand);
490 setOperationAction(ISD::SELECT_CC, VT, Expand);
491 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
492 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
493 setOperationAction(ISD::SETCC, VT, Expand);
494 setOperationAction(ISD::FCANONICALIZE, VT, Expand);
497 // This causes using an unrolled select operation rather than expansion with
498 // bit operations. This is in general better, but the alternative using BFI
499 // instructions may be better if the select sources are SGPRs.
500 setOperationAction(ISD::SELECT, MVT::v2f32, Promote);
501 AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32);
503 setOperationAction(ISD::SELECT, MVT::v3f32, Promote);
504 AddPromotedToType(ISD::SELECT, MVT::v3f32, MVT::v3i32);
506 setOperationAction(ISD::SELECT, MVT::v4f32, Promote);
507 AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32);
509 setOperationAction(ISD::SELECT, MVT::v5f32, Promote);
510 AddPromotedToType(ISD::SELECT, MVT::v5f32, MVT::v5i32);
512 // There are no libcalls of any kind.
513 for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I)
514 setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr);
516 setSchedulingPreference(Sched::RegPressure);
517 setJumpIsExpensive(true);
519 // FIXME: This is only partially true. If we have to do vector compares, any
520 // SGPR pair can be a condition register. If we have a uniform condition, we
521 // are better off doing SALU operations, where there is only one SCC. For now,
522 // we don't have a way of knowing during instruction selection if a condition
523 // will be uniform and we always use vector compares. Assume we are using
524 // vector compares until that is fixed.
525 setHasMultipleConditionRegisters(true);
527 setMinCmpXchgSizeInBits(32);
528 setSupportsUnalignedAtomics(false);
530 PredictableSelectIsExpensive = false;
532 // We want to find all load dependencies for long chains of stores to enable
533 // merging into very wide vectors. The problem is with vectors with > 4
534 // elements. MergeConsecutiveStores will attempt to merge these because x8/x16
535 // vectors are a legal type, even though we have to split the loads
536 // usually. When we can more precisely specify load legality per address
537 // space, we should be able to make FindBetterChain/MergeConsecutiveStores
538 // smarter so that they can figure out what to do in 2 iterations without all
539 // N > 4 stores on the same chain.
540 GatherAllAliasesMaxDepth = 16;
542 // memcpy/memmove/memset are expanded in the IR, so we shouldn't need to worry
543 // about these during lowering.
544 MaxStoresPerMemcpy = 0xffffffff;
545 MaxStoresPerMemmove = 0xffffffff;
546 MaxStoresPerMemset = 0xffffffff;
548 // The expansion for 64-bit division is enormous.
549 if (AMDGPUBypassSlowDiv)
550 addBypassSlowDiv(64, 32);
552 setTargetDAGCombine(ISD::BITCAST);
553 setTargetDAGCombine(ISD::SHL);
554 setTargetDAGCombine(ISD::SRA);
555 setTargetDAGCombine(ISD::SRL);
556 setTargetDAGCombine(ISD::TRUNCATE);
557 setTargetDAGCombine(ISD::MUL);
558 setTargetDAGCombine(ISD::MULHU);
559 setTargetDAGCombine(ISD::MULHS);
560 setTargetDAGCombine(ISD::SELECT);
561 setTargetDAGCombine(ISD::SELECT_CC);
562 setTargetDAGCombine(ISD::STORE);
563 setTargetDAGCombine(ISD::FADD);
564 setTargetDAGCombine(ISD::FSUB);
565 setTargetDAGCombine(ISD::FNEG);
566 setTargetDAGCombine(ISD::FABS);
567 setTargetDAGCombine(ISD::AssertZext);
568 setTargetDAGCombine(ISD::AssertSext);
569 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
572 //===----------------------------------------------------------------------===//
573 // Target Information
574 //===----------------------------------------------------------------------===//
577 static bool fnegFoldsIntoOp(unsigned Opc) {
586 case ISD::FMINNUM_IEEE:
587 case ISD::FMAXNUM_IEEE:
591 case ISD::FNEARBYINT:
592 case ISD::FCANONICALIZE:
594 case AMDGPUISD::RCP_LEGACY:
595 case AMDGPUISD::RCP_IFLAG:
596 case AMDGPUISD::SIN_HW:
597 case AMDGPUISD::FMUL_LEGACY:
598 case AMDGPUISD::FMIN_LEGACY:
599 case AMDGPUISD::FMAX_LEGACY:
600 case AMDGPUISD::FMED3:
607 /// \p returns true if the operation will definitely need to use a 64-bit
608 /// encoding, and thus will use a VOP3 encoding regardless of the source
611 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) {
612 return N->getNumOperands() > 2 || VT == MVT::f64;
615 // Most FP instructions support source modifiers, but this could be refined
618 static bool hasSourceMods(const SDNode *N) {
619 if (isa<MemSDNode>(N))
622 switch (N->getOpcode()) {
628 case ISD::INLINEASM_BR:
629 case AMDGPUISD::DIV_SCALE:
630 case ISD::INTRINSIC_W_CHAIN:
632 // TODO: Should really be looking at the users of the bitcast. These are
633 // problematic because bitcasts are used to legalize all stores to integer
637 case ISD::INTRINSIC_WO_CHAIN: {
638 switch (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()) {
639 case Intrinsic::amdgcn_interp_p1:
640 case Intrinsic::amdgcn_interp_p2:
641 case Intrinsic::amdgcn_interp_mov:
642 case Intrinsic::amdgcn_interp_p1_f16:
643 case Intrinsic::amdgcn_interp_p2_f16:
654 bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N,
655 unsigned CostThreshold) {
656 // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus
657 // it is truly free to use a source modifier in all cases. If there are
658 // multiple users but for each one will necessitate using VOP3, there will be
659 // a code size increase. Try to avoid increasing code size unless we know it
660 // will save on the instruction count.
661 unsigned NumMayIncreaseSize = 0;
662 MVT VT = N->getValueType(0).getScalarType().getSimpleVT();
664 // XXX - Should this limit number of uses to check?
665 for (const SDNode *U : N->uses()) {
666 if (!hasSourceMods(U))
669 if (!opMustUseVOP3Encoding(U, VT)) {
670 if (++NumMayIncreaseSize > CostThreshold)
678 EVT AMDGPUTargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
679 ISD::NodeType ExtendKind) const {
680 assert(!VT.isVector() && "only scalar expected");
682 // Round to the next multiple of 32-bits.
683 unsigned Size = VT.getSizeInBits();
686 return EVT::getIntegerVT(Context, 32 * ((Size + 31) / 32));
689 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const {
693 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
697 // The backend supports 32 and 64 bit floating point immediates.
698 // FIXME: Why are we reporting vectors of FP immediates as legal?
699 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
700 bool ForCodeSize) const {
701 EVT ScalarVT = VT.getScalarType();
702 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 ||
703 (ScalarVT == MVT::f16 && Subtarget->has16BitInsts()));
706 // We don't want to shrink f64 / f32 constants.
707 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
708 EVT ScalarVT = VT.getScalarType();
709 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
712 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
713 ISD::LoadExtType ExtTy,
715 // TODO: This may be worth removing. Check regression tests for diffs.
716 if (!TargetLoweringBase::shouldReduceLoadWidth(N, ExtTy, NewVT))
719 unsigned NewSize = NewVT.getStoreSizeInBits();
721 // If we are reducing to a 32-bit load or a smaller multi-dword load,
722 // this is always better.
726 EVT OldVT = N->getValueType(0);
727 unsigned OldSize = OldVT.getStoreSizeInBits();
729 MemSDNode *MN = cast<MemSDNode>(N);
730 unsigned AS = MN->getAddressSpace();
731 // Do not shrink an aligned scalar load to sub-dword.
732 // Scalar engine cannot do sub-dword loads.
733 if (OldSize >= 32 && NewSize < 32 && MN->getAlignment() >= 4 &&
734 (AS == AMDGPUAS::CONSTANT_ADDRESS ||
735 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
736 (isa<LoadSDNode>(N) &&
737 AS == AMDGPUAS::GLOBAL_ADDRESS && MN->isInvariant())) &&
738 AMDGPUInstrInfo::isUniformMMO(MN->getMemOperand()))
741 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
742 // extloads, so doing one requires using a buffer_load. In cases where we
743 // still couldn't use a scalar load, using the wider load shouldn't really
746 // If the old size already had to be an extload, there's no harm in continuing
747 // to reduce the width.
748 return (OldSize < 32);
751 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, EVT CastTy,
752 const SelectionDAG &DAG,
753 const MachineMemOperand &MMO) const {
755 assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits());
757 if (LoadTy.getScalarType() == MVT::i32)
760 unsigned LScalarSize = LoadTy.getScalarSizeInBits();
761 unsigned CastScalarSize = CastTy.getScalarSizeInBits();
763 if ((LScalarSize >= CastScalarSize) && (CastScalarSize < 32))
767 return allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
768 CastTy, MMO, &Fast) &&
772 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
773 // profitable with the expansion for 64-bit since it's generally good to
775 // FIXME: These should really have the size as a parameter.
776 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const {
780 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const {
784 bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode * N) const {
785 switch (N->getOpcode()) {
788 case ISD::EntryToken:
789 case ISD::TokenFactor:
791 case ISD::INTRINSIC_WO_CHAIN:
793 unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
797 case Intrinsic::amdgcn_readfirstlane:
798 case Intrinsic::amdgcn_readlane:
805 if (cast<LoadSDNode>(N)->getMemOperand()->getAddrSpace() ==
806 AMDGPUAS::CONSTANT_ADDRESS_32BIT)
814 SDValue AMDGPUTargetLowering::getNegatedExpression(
815 SDValue Op, SelectionDAG &DAG, bool LegalOperations, bool ForCodeSize,
816 NegatibleCost &Cost, unsigned Depth) const {
818 switch (Op.getOpcode()) {
821 // Negating a fma is not free if it has users without source mods.
822 if (!allUsesHaveSourceMods(Op.getNode()))
830 return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
831 ForCodeSize, Cost, Depth);
834 //===---------------------------------------------------------------------===//
836 //===---------------------------------------------------------------------===//
838 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
839 assert(VT.isFloatingPoint());
841 // Packed operations do not have a fabs modifier.
842 return VT == MVT::f32 || VT == MVT::f64 ||
843 (Subtarget->has16BitInsts() && VT == MVT::f16);
846 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
847 assert(VT.isFloatingPoint());
848 return VT == MVT::f32 || VT == MVT::f64 ||
849 (Subtarget->has16BitInsts() && VT == MVT::f16) ||
850 (Subtarget->hasVOP3PInsts() && VT == MVT::v2f16);
853 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT,
859 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const {
860 // There are few operations which truly have vector input operands. Any vector
861 // operation is going to involve operations on each component, and a
862 // build_vector will be a copy per element, so it always makes sense to use a
863 // build_vector input in place of the extracted element to avoid a copy into a
866 // We should probably only do this if all users are extracts only, but this
867 // should be the common case.
871 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
872 // Truncate is just accessing a subregister.
874 unsigned SrcSize = Source.getSizeInBits();
875 unsigned DestSize = Dest.getSizeInBits();
877 return DestSize < SrcSize && DestSize % 32 == 0 ;
880 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
881 // Truncate is just accessing a subregister.
883 unsigned SrcSize = Source->getScalarSizeInBits();
884 unsigned DestSize = Dest->getScalarSizeInBits();
886 if (DestSize== 16 && Subtarget->has16BitInsts())
887 return SrcSize >= 32;
889 return DestSize < SrcSize && DestSize % 32 == 0;
892 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
893 unsigned SrcSize = Src->getScalarSizeInBits();
894 unsigned DestSize = Dest->getScalarSizeInBits();
896 if (SrcSize == 16 && Subtarget->has16BitInsts())
897 return DestSize >= 32;
899 return SrcSize == 32 && DestSize == 64;
902 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
903 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
904 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
905 // this will enable reducing 64-bit operations the 32-bit, which is always
909 return Dest == MVT::i32 ||Dest == MVT::i64 ;
911 return Src == MVT::i32 && Dest == MVT::i64;
914 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
915 return isZExtFree(Val.getValueType(), VT2);
918 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
919 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
920 // limited number of native 64-bit operations. Shrinking an operation to fit
921 // in a single 32-bit register should always be helpful. As currently used,
922 // this is much less general than the name suggests, and is only used in
923 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
924 // not profitable, and may actually be harmful.
925 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
928 //===---------------------------------------------------------------------===//
929 // TargetLowering Callbacks
930 //===---------------------------------------------------------------------===//
932 CCAssignFn *AMDGPUCallLowering::CCAssignFnForCall(CallingConv::ID CC,
935 case CallingConv::AMDGPU_VS:
936 case CallingConv::AMDGPU_GS:
937 case CallingConv::AMDGPU_PS:
938 case CallingConv::AMDGPU_CS:
939 case CallingConv::AMDGPU_HS:
940 case CallingConv::AMDGPU_ES:
941 case CallingConv::AMDGPU_LS:
944 case CallingConv::Fast:
945 case CallingConv::Cold:
946 return CC_AMDGPU_Func;
947 case CallingConv::AMDGPU_KERNEL:
948 case CallingConv::SPIR_KERNEL:
950 report_fatal_error("Unsupported calling convention for call");
954 CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC,
957 case CallingConv::AMDGPU_KERNEL:
958 case CallingConv::SPIR_KERNEL:
959 llvm_unreachable("kernels should not be handled here");
960 case CallingConv::AMDGPU_VS:
961 case CallingConv::AMDGPU_GS:
962 case CallingConv::AMDGPU_PS:
963 case CallingConv::AMDGPU_CS:
964 case CallingConv::AMDGPU_HS:
965 case CallingConv::AMDGPU_ES:
966 case CallingConv::AMDGPU_LS:
967 return RetCC_SI_Shader;
969 case CallingConv::Fast:
970 case CallingConv::Cold:
971 return RetCC_AMDGPU_Func;
973 report_fatal_error("Unsupported calling convention.");
977 /// The SelectionDAGBuilder will automatically promote function arguments
978 /// with illegal types. However, this does not work for the AMDGPU targets
979 /// since the function arguments are stored in memory as these illegal types.
980 /// In order to handle this properly we need to get the original types sizes
981 /// from the LLVM IR Function and fixup the ISD:InputArg values before
982 /// passing them to AnalyzeFormalArguments()
984 /// When the SelectionDAGBuilder computes the Ins, it takes care of splitting
985 /// input values across multiple registers. Each item in the Ins array
986 /// represents a single value that will be stored in registers. Ins[x].VT is
987 /// the value type of the value that will be stored in the register, so
988 /// whatever SDNode we lower the argument to needs to be this type.
990 /// In order to correctly lower the arguments we need to know the size of each
991 /// argument. Since Ins[x].VT gives us the size of the register that will
992 /// hold the value, we need to look at Ins[x].ArgVT to see the 'real' type
993 /// for the orignal function argument so that we can deduce the correct memory
994 /// type to use for Ins[x]. In most cases the correct memory type will be
995 /// Ins[x].ArgVT. However, this will not always be the case. If, for example,
996 /// we have a kernel argument of type v8i8, this argument will be split into
997 /// 8 parts and each part will be represented by its own item in the Ins array.
998 /// For each part the Ins[x].ArgVT will be the v8i8, which is the full type of
999 /// the argument before it was split. From this, we deduce that the memory type
1000 /// for each individual part is i8. We pass the memory type as LocVT to the
1001 /// calling convention analysis function and the register type (Ins[x].VT) as
1003 void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(
1005 const SmallVectorImpl<ISD::InputArg> &Ins) const {
1006 const MachineFunction &MF = State.getMachineFunction();
1007 const Function &Fn = MF.getFunction();
1008 LLVMContext &Ctx = Fn.getParent()->getContext();
1009 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
1010 const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset(Fn);
1011 CallingConv::ID CC = Fn.getCallingConv();
1013 Align MaxAlign = Align(1);
1014 uint64_t ExplicitArgOffset = 0;
1015 const DataLayout &DL = Fn.getParent()->getDataLayout();
1017 unsigned InIndex = 0;
1019 for (const Argument &Arg : Fn.args()) {
1020 Type *BaseArgTy = Arg.getType();
1021 Align Alignment = DL.getABITypeAlign(BaseArgTy);
1022 MaxAlign = std::max(Alignment, MaxAlign);
1023 unsigned AllocSize = DL.getTypeAllocSize(BaseArgTy);
1025 uint64_t ArgOffset = alignTo(ExplicitArgOffset, Alignment) + ExplicitOffset;
1026 ExplicitArgOffset = alignTo(ExplicitArgOffset, Alignment) + AllocSize;
1028 // We're basically throwing away everything passed into us and starting over
1029 // to get accurate in-memory offsets. The "PartOffset" is completely useless
1030 // to us as computed in Ins.
1032 // We also need to figure out what type legalization is trying to do to get
1033 // the correct memory offsets.
1035 SmallVector<EVT, 16> ValueVTs;
1036 SmallVector<uint64_t, 16> Offsets;
1037 ComputeValueVTs(*this, DL, BaseArgTy, ValueVTs, &Offsets, ArgOffset);
1039 for (unsigned Value = 0, NumValues = ValueVTs.size();
1040 Value != NumValues; ++Value) {
1041 uint64_t BasePartOffset = Offsets[Value];
1043 EVT ArgVT = ValueVTs[Value];
1045 MVT RegisterVT = getRegisterTypeForCallingConv(Ctx, CC, ArgVT);
1046 unsigned NumRegs = getNumRegistersForCallingConv(Ctx, CC, ArgVT);
1049 // This argument is not split, so the IR type is the memory type.
1050 if (ArgVT.isExtended()) {
1051 // We have an extended type, like i24, so we should just use the
1057 } else if (ArgVT.isVector() && RegisterVT.isVector() &&
1058 ArgVT.getScalarType() == RegisterVT.getScalarType()) {
1059 assert(ArgVT.getVectorNumElements() > RegisterVT.getVectorNumElements());
1060 // We have a vector value which has been split into a vector with
1061 // the same scalar type, but fewer elements. This should handle
1062 // all the floating-point vector types.
1064 } else if (ArgVT.isVector() &&
1065 ArgVT.getVectorNumElements() == NumRegs) {
1066 // This arg has been split so that each element is stored in a separate
1068 MemVT = ArgVT.getScalarType();
1069 } else if (ArgVT.isExtended()) {
1070 // We have an extended type, like i65.
1073 unsigned MemoryBits = ArgVT.getStoreSizeInBits() / NumRegs;
1074 assert(ArgVT.getStoreSizeInBits() % NumRegs == 0);
1075 if (RegisterVT.isInteger()) {
1076 MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits);
1077 } else if (RegisterVT.isVector()) {
1078 assert(!RegisterVT.getScalarType().isFloatingPoint());
1079 unsigned NumElements = RegisterVT.getVectorNumElements();
1080 assert(MemoryBits % NumElements == 0);
1081 // This vector type has been split into another vector type with
1082 // a different elements size.
1083 EVT ScalarVT = EVT::getIntegerVT(State.getContext(),
1084 MemoryBits / NumElements);
1085 MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements);
1087 llvm_unreachable("cannot deduce memory type.");
1091 // Convert one element vectors to scalar.
1092 if (MemVT.isVector() && MemVT.getVectorNumElements() == 1)
1093 MemVT = MemVT.getScalarType();
1095 // Round up vec3/vec5 argument.
1096 if (MemVT.isVector() && !MemVT.isPow2VectorType()) {
1097 assert(MemVT.getVectorNumElements() == 3 ||
1098 MemVT.getVectorNumElements() == 5);
1099 MemVT = MemVT.getPow2VectorType(State.getContext());
1100 } else if (!MemVT.isSimple() && !MemVT.isVector()) {
1101 MemVT = MemVT.getRoundIntegerType(State.getContext());
1104 unsigned PartOffset = 0;
1105 for (unsigned i = 0; i != NumRegs; ++i) {
1106 State.addLoc(CCValAssign::getCustomMem(InIndex++, RegisterVT,
1107 BasePartOffset + PartOffset,
1108 MemVT.getSimpleVT(),
1109 CCValAssign::Full));
1110 PartOffset += MemVT.getStoreSize();
1116 SDValue AMDGPUTargetLowering::LowerReturn(
1117 SDValue Chain, CallingConv::ID CallConv,
1119 const SmallVectorImpl<ISD::OutputArg> &Outs,
1120 const SmallVectorImpl<SDValue> &OutVals,
1121 const SDLoc &DL, SelectionDAG &DAG) const {
1122 // FIXME: Fails for r600 tests
1123 //assert(!isVarArg && Outs.empty() && OutVals.empty() &&
1124 // "wave terminate should not have return values");
1125 return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain);
1128 //===---------------------------------------------------------------------===//
1129 // Target specific lowering
1130 //===---------------------------------------------------------------------===//
1132 /// Selects the correct CCAssignFn for a given CallingConvention value.
1133 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1135 return AMDGPUCallLowering::CCAssignFnForCall(CC, IsVarArg);
1138 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1140 return AMDGPUCallLowering::CCAssignFnForReturn(CC, IsVarArg);
1143 SDValue AMDGPUTargetLowering::addTokenForArgument(SDValue Chain,
1145 MachineFrameInfo &MFI,
1146 int ClobberedFI) const {
1147 SmallVector<SDValue, 8> ArgChains;
1148 int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
1149 int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
1151 // Include the original chain at the beginning of the list. When this is
1152 // used by target LowerCall hooks, this helps legalize find the
1153 // CALLSEQ_BEGIN node.
1154 ArgChains.push_back(Chain);
1156 // Add a chain value for each stack argument corresponding
1157 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(),
1158 UE = DAG.getEntryNode().getNode()->use_end();
1160 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) {
1161 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) {
1162 if (FI->getIndex() < 0) {
1163 int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
1164 int64_t InLastByte = InFirstByte;
1165 InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
1167 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1168 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1169 ArgChains.push_back(SDValue(L, 1));
1175 // Build a tokenfactor for all the chains.
1176 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
1179 SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI,
1180 SmallVectorImpl<SDValue> &InVals,
1181 StringRef Reason) const {
1182 SDValue Callee = CLI.Callee;
1183 SelectionDAG &DAG = CLI.DAG;
1185 const Function &Fn = DAG.getMachineFunction().getFunction();
1187 StringRef FuncName("<unknown>");
1189 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
1190 FuncName = G->getSymbol();
1191 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1192 FuncName = G->getGlobal()->getName();
1194 DiagnosticInfoUnsupported NoCalls(
1195 Fn, Reason + FuncName, CLI.DL.getDebugLoc());
1196 DAG.getContext()->diagnose(NoCalls);
1198 if (!CLI.IsTailCall) {
1199 for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
1200 InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
1203 return DAG.getEntryNode();
1206 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
1207 SmallVectorImpl<SDValue> &InVals) const {
1208 return lowerUnhandledCall(CLI, InVals, "unsupported call to function ");
1211 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
1212 SelectionDAG &DAG) const {
1213 const Function &Fn = DAG.getMachineFunction().getFunction();
1215 DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
1216 SDLoc(Op).getDebugLoc());
1217 DAG.getContext()->diagnose(NoDynamicAlloca);
1218 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
1219 return DAG.getMergeValues(Ops, SDLoc());
1222 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
1223 SelectionDAG &DAG) const {
1224 switch (Op.getOpcode()) {
1226 Op->print(errs(), &DAG);
1227 llvm_unreachable("Custom lowering code for this"
1228 "instruction is not implemented yet!");
1230 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
1231 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
1232 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
1233 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
1234 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
1235 case ISD::FREM: return LowerFREM(Op, DAG);
1236 case ISD::FCEIL: return LowerFCEIL(Op, DAG);
1237 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
1238 case ISD::FRINT: return LowerFRINT(Op, DAG);
1239 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
1240 case ISD::FROUND: return LowerFROUND(Op, DAG);
1241 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
1243 return LowerFLOG(Op, DAG, numbers::ln2f);
1245 return LowerFLOG(Op, DAG, numbers::ln2f / numbers::ln10f);
1247 return lowerFEXP(Op, DAG);
1248 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
1249 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
1250 case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
1251 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
1252 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
1254 case ISD::CTTZ_ZERO_UNDEF:
1256 case ISD::CTLZ_ZERO_UNDEF:
1257 return LowerCTLZ_CTTZ(Op, DAG);
1258 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
1263 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
1264 SmallVectorImpl<SDValue> &Results,
1265 SelectionDAG &DAG) const {
1266 switch (N->getOpcode()) {
1267 case ISD::SIGN_EXTEND_INREG:
1268 // Different parts of legalization seem to interpret which type of
1269 // sign_extend_inreg is the one to check for custom lowering. The extended
1270 // from type is what really matters, but some places check for custom
1271 // lowering of the result type. This results in trying to use
1272 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
1273 // nothing here and let the illegal result integer be handled normally.
1280 bool AMDGPUTargetLowering::hasDefinedInitializer(const GlobalValue *GV) {
1281 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
1282 if (!GVar || !GVar->hasInitializer())
1285 return !isa<UndefValue>(GVar->getInitializer());
1288 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
1290 SelectionDAG &DAG) const {
1292 const DataLayout &DL = DAG.getDataLayout();
1293 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
1294 const GlobalValue *GV = G->getGlobal();
1296 if (G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1297 G->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) {
1298 if (!MFI->isEntryFunction()) {
1300 const Function &Fn = DAG.getMachineFunction().getFunction();
1301 DiagnosticInfoUnsupported BadLDSDecl(
1302 Fn, "local memory global used by non-kernel function",
1303 DL.getDebugLoc(), DS_Warning);
1304 DAG.getContext()->diagnose(BadLDSDecl);
1306 // We currently don't have a way to correctly allocate LDS objects that
1307 // aren't directly associated with a kernel. We do force inlining of
1308 // functions that use local objects. However, if these dead functions are
1309 // not eliminated, we don't want a compile time error. Just emit a warning
1310 // and a trap, since there should be no callable path here.
1311 SDValue Trap = DAG.getNode(ISD::TRAP, DL, MVT::Other, DAG.getEntryNode());
1312 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1313 Trap, DAG.getRoot());
1314 DAG.setRoot(OutputChain);
1315 return DAG.getUNDEF(Op.getValueType());
1318 // XXX: What does the value of G->getOffset() mean?
1319 assert(G->getOffset() == 0 &&
1320 "Do not know what to do with an non-zero offset");
1322 // TODO: We could emit code to handle the initialization somewhere.
1323 if (!hasDefinedInitializer(GV)) {
1324 unsigned Offset = MFI->allocateLDSGlobal(DL, *cast<GlobalVariable>(GV));
1325 return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType());
1329 const Function &Fn = DAG.getMachineFunction().getFunction();
1330 DiagnosticInfoUnsupported BadInit(
1331 Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc());
1332 DAG.getContext()->diagnose(BadInit);
1336 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
1337 SelectionDAG &DAG) const {
1338 SmallVector<SDValue, 8> Args;
1340 EVT VT = Op.getValueType();
1341 if (VT == MVT::v4i16 || VT == MVT::v4f16) {
1343 SDValue Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(0));
1344 SDValue Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(1));
1346 SDValue BV = DAG.getBuildVector(MVT::v2i32, SL, { Lo, Hi });
1347 return DAG.getNode(ISD::BITCAST, SL, VT, BV);
1350 for (const SDUse &U : Op->ops())
1351 DAG.ExtractVectorElements(U.get(), Args);
1353 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1356 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
1357 SelectionDAG &DAG) const {
1359 SmallVector<SDValue, 8> Args;
1360 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1361 EVT VT = Op.getValueType();
1362 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
1363 VT.getVectorNumElements());
1365 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1368 /// Generate Min/Max node
1369 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT,
1370 SDValue LHS, SDValue RHS,
1371 SDValue True, SDValue False,
1373 DAGCombinerInfo &DCI) const {
1374 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
1377 SelectionDAG &DAG = DCI.DAG;
1378 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
1387 case ISD::SETFALSE2:
1396 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1397 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1403 // Ordered. Assume ordered for undefined.
1405 // Only do this after legalization to avoid interfering with other combines
1406 // which might occur.
1407 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1408 !DCI.isCalledByLegalizer())
1411 // We need to permute the operands to get the correct NaN behavior. The
1412 // selected operand is the second one based on the failing compare with NaN,
1413 // so permute it based on the compare type the hardware uses.
1415 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1416 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1421 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1422 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1428 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1429 !DCI.isCalledByLegalizer())
1433 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1434 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1436 case ISD::SETCC_INVALID:
1437 llvm_unreachable("Invalid setcc condcode!");
1442 std::pair<SDValue, SDValue>
1443 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const {
1446 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1448 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1449 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1451 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1452 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1454 return std::make_pair(Lo, Hi);
1457 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const {
1460 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1461 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1462 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1465 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const {
1468 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1469 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1470 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1473 // Split a vector type into two parts. The first part is a power of two vector.
1474 // The second part is whatever is left over, and is a scalar if it would
1475 // otherwise be a 1-vector.
1477 AMDGPUTargetLowering::getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const {
1479 EVT EltVT = VT.getVectorElementType();
1480 unsigned NumElts = VT.getVectorNumElements();
1481 unsigned LoNumElts = PowerOf2Ceil((NumElts + 1) / 2);
1482 LoVT = EVT::getVectorVT(*DAG.getContext(), EltVT, LoNumElts);
1483 HiVT = NumElts - LoNumElts == 1
1485 : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts - LoNumElts);
1486 return std::make_pair(LoVT, HiVT);
1489 // Split a vector value into two parts of types LoVT and HiVT. HiVT could be
1491 std::pair<SDValue, SDValue>
1492 AMDGPUTargetLowering::splitVector(const SDValue &N, const SDLoc &DL,
1493 const EVT &LoVT, const EVT &HiVT,
1494 SelectionDAG &DAG) const {
1495 assert(LoVT.getVectorNumElements() +
1496 (HiVT.isVector() ? HiVT.getVectorNumElements() : 1) <=
1497 N.getValueType().getVectorNumElements() &&
1498 "More vector elements requested than available!");
1499 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
1500 DAG.getVectorIdxConstant(0, DL));
1501 SDValue Hi = DAG.getNode(
1502 HiVT.isVector() ? ISD::EXTRACT_SUBVECTOR : ISD::EXTRACT_VECTOR_ELT, DL,
1503 HiVT, N, DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), DL));
1504 return std::make_pair(Lo, Hi);
1507 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
1508 SelectionDAG &DAG) const {
1509 LoadSDNode *Load = cast<LoadSDNode>(Op);
1510 EVT VT = Op.getValueType();
1514 // If this is a 2 element vector, we really want to scalarize and not create
1515 // weird 1 element vectors.
1516 if (VT.getVectorNumElements() == 2) {
1518 std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
1519 return DAG.getMergeValues(Ops, SL);
1522 SDValue BasePtr = Load->getBasePtr();
1523 EVT MemVT = Load->getMemoryVT();
1525 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1528 EVT LoMemVT, HiMemVT;
1531 std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1532 std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1533 std::tie(Lo, Hi) = splitVector(Op, SL, LoVT, HiVT, DAG);
1535 unsigned Size = LoMemVT.getStoreSize();
1536 unsigned BaseAlign = Load->getAlignment();
1537 unsigned HiAlign = MinAlign(BaseAlign, Size);
1539 SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1540 Load->getChain(), BasePtr, SrcValue, LoMemVT,
1541 BaseAlign, Load->getMemOperand()->getFlags());
1542 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, Size);
1544 DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(),
1545 HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1546 HiMemVT, HiAlign, Load->getMemOperand()->getFlags());
1550 // This is the case that the vector is power of two so was evenly split.
1551 Join = DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad);
1553 Join = DAG.getNode(ISD::INSERT_SUBVECTOR, SL, VT, DAG.getUNDEF(VT), LoLoad,
1554 DAG.getVectorIdxConstant(0, SL));
1556 HiVT.isVector() ? ISD::INSERT_SUBVECTOR : ISD::INSERT_VECTOR_ELT, SL,
1558 DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), SL));
1561 SDValue Ops[] = {Join, DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
1562 LoLoad.getValue(1), HiLoad.getValue(1))};
1564 return DAG.getMergeValues(Ops, SL);
1567 // Widen a vector load from vec3 to vec4.
1568 SDValue AMDGPUTargetLowering::WidenVectorLoad(SDValue Op,
1569 SelectionDAG &DAG) const {
1570 LoadSDNode *Load = cast<LoadSDNode>(Op);
1571 EVT VT = Op.getValueType();
1572 assert(VT.getVectorNumElements() == 3);
1573 SDValue BasePtr = Load->getBasePtr();
1574 EVT MemVT = Load->getMemoryVT();
1576 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1577 unsigned BaseAlign = Load->getAlignment();
1580 EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4);
1582 EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), 4);
1583 SDValue WideLoad = DAG.getExtLoad(
1584 Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue,
1585 WideMemVT, BaseAlign, Load->getMemOperand()->getFlags());
1586 return DAG.getMergeValues(
1587 {DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, VT, WideLoad,
1588 DAG.getVectorIdxConstant(0, SL)),
1589 WideLoad.getValue(1)},
1593 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1594 SelectionDAG &DAG) const {
1595 StoreSDNode *Store = cast<StoreSDNode>(Op);
1596 SDValue Val = Store->getValue();
1597 EVT VT = Val.getValueType();
1599 // If this is a 2 element vector, we really want to scalarize and not create
1600 // weird 1 element vectors.
1601 if (VT.getVectorNumElements() == 2)
1602 return scalarizeVectorStore(Store, DAG);
1604 EVT MemVT = Store->getMemoryVT();
1605 SDValue Chain = Store->getChain();
1606 SDValue BasePtr = Store->getBasePtr();
1610 EVT LoMemVT, HiMemVT;
1613 std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1614 std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1615 std::tie(Lo, Hi) = splitVector(Val, SL, LoVT, HiVT, DAG);
1617 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, LoMemVT.getStoreSize());
1619 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
1620 unsigned BaseAlign = Store->getAlignment();
1621 unsigned Size = LoMemVT.getStoreSize();
1622 unsigned HiAlign = MinAlign(BaseAlign, Size);
1625 DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign,
1626 Store->getMemOperand()->getFlags());
1628 DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size),
1629 HiMemVT, HiAlign, Store->getMemOperand()->getFlags());
1631 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1634 // This is a shortcut for integer division because we have fast i32<->f32
1635 // conversions, and fast f32 reciprocal instructions. The fractional part of a
1636 // float is enough to accurately represent up to a 24-bit signed integer.
1637 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG,
1640 EVT VT = Op.getValueType();
1641 SDValue LHS = Op.getOperand(0);
1642 SDValue RHS = Op.getOperand(1);
1643 MVT IntVT = MVT::i32;
1644 MVT FltVT = MVT::f32;
1646 unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS);
1647 if (LHSSignBits < 9)
1650 unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS);
1651 if (RHSSignBits < 9)
1654 unsigned BitSize = VT.getSizeInBits();
1655 unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1656 unsigned DivBits = BitSize - SignBits;
1660 ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
1661 ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
1663 SDValue jq = DAG.getConstant(1, DL, IntVT);
1666 // char|short jq = ia ^ ib;
1667 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
1669 // jq = jq >> (bitsize - 2)
1670 jq = DAG.getNode(ISD::SRA, DL, VT, jq,
1671 DAG.getConstant(BitSize - 2, DL, VT));
1674 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
1677 // int ia = (int)LHS;
1680 // int ib, (int)RHS;
1683 // float fa = (float)ia;
1684 SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
1686 // float fb = (float)ib;
1687 SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
1689 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1690 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
1693 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
1695 // float fqneg = -fq;
1696 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
1698 MachineFunction &MF = DAG.getMachineFunction();
1699 const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
1701 // float fr = mad(fqneg, fb, fa);
1702 unsigned OpCode = !Subtarget->hasMadMacF32Insts() ?
1703 (unsigned)ISD::FMA :
1704 !MFI->getMode().allFP32Denormals() ?
1705 (unsigned)ISD::FMAD :
1706 (unsigned)AMDGPUISD::FMAD_FTZ;
1707 SDValue fr = DAG.getNode(OpCode, DL, FltVT, fqneg, fb, fa);
1709 // int iq = (int)fq;
1710 SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
1713 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
1716 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1718 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1720 // int cv = fr >= fb;
1721 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1723 // jq = (cv ? jq : 0);
1724 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
1727 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1729 // Rem needs compensation, it's easier to recompute it
1730 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
1731 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
1733 // Truncate to number of bits this divide really is.
1736 = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits));
1737 Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize);
1738 Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize);
1740 SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT);
1741 Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask);
1742 Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask);
1745 return DAG.getMergeValues({ Div, Rem }, DL);
1748 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
1750 SmallVectorImpl<SDValue> &Results) const {
1752 EVT VT = Op.getValueType();
1754 assert(VT == MVT::i64 && "LowerUDIVREM64 expects an i64");
1756 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1758 SDValue One = DAG.getConstant(1, DL, HalfVT);
1759 SDValue Zero = DAG.getConstant(0, DL, HalfVT);
1762 SDValue LHS = Op.getOperand(0);
1763 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
1764 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, One);
1766 SDValue RHS = Op.getOperand(1);
1767 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
1768 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, One);
1770 if (DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
1771 DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
1773 SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1776 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), Zero});
1777 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), Zero});
1779 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV));
1780 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM));
1784 if (isTypeLegal(MVT::i64)) {
1785 MachineFunction &MF = DAG.getMachineFunction();
1786 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1788 // Compute denominator reciprocal.
1789 unsigned FMAD = !Subtarget->hasMadMacF32Insts() ?
1790 (unsigned)ISD::FMA :
1791 !MFI->getMode().allFP32Denormals() ?
1792 (unsigned)ISD::FMAD :
1793 (unsigned)AMDGPUISD::FMAD_FTZ;
1795 SDValue Cvt_Lo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Lo);
1796 SDValue Cvt_Hi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Hi);
1797 SDValue Mad1 = DAG.getNode(FMAD, DL, MVT::f32, Cvt_Hi,
1798 DAG.getConstantFP(APInt(32, 0x4f800000).bitsToFloat(), DL, MVT::f32),
1800 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, DL, MVT::f32, Mad1);
1801 SDValue Mul1 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Rcp,
1802 DAG.getConstantFP(APInt(32, 0x5f7ffffc).bitsToFloat(), DL, MVT::f32));
1803 SDValue Mul2 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Mul1,
1804 DAG.getConstantFP(APInt(32, 0x2f800000).bitsToFloat(), DL, MVT::f32));
1805 SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, MVT::f32, Mul2);
1806 SDValue Mad2 = DAG.getNode(FMAD, DL, MVT::f32, Trunc,
1807 DAG.getConstantFP(APInt(32, 0xcf800000).bitsToFloat(), DL, MVT::f32),
1809 SDValue Rcp_Lo = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Mad2);
1810 SDValue Rcp_Hi = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Trunc);
1811 SDValue Rcp64 = DAG.getBitcast(VT,
1812 DAG.getBuildVector(MVT::v2i32, DL, {Rcp_Lo, Rcp_Hi}));
1814 SDValue Zero64 = DAG.getConstant(0, DL, VT);
1815 SDValue One64 = DAG.getConstant(1, DL, VT);
1816 SDValue Zero1 = DAG.getConstant(0, DL, MVT::i1);
1817 SDVTList HalfCarryVT = DAG.getVTList(HalfVT, MVT::i1);
1819 SDValue Neg_RHS = DAG.getNode(ISD::SUB, DL, VT, Zero64, RHS);
1820 SDValue Mullo1 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Rcp64);
1821 SDValue Mulhi1 = DAG.getNode(ISD::MULHU, DL, VT, Rcp64, Mullo1);
1822 SDValue Mulhi1_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1,
1824 SDValue Mulhi1_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1,
1827 SDValue Add1_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Lo,
1829 SDValue Add1_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Hi,
1830 Mulhi1_Hi, Add1_Lo.getValue(1));
1831 SDValue Add1_HiNc = DAG.getNode(ISD::ADD, DL, HalfVT, Rcp_Hi, Mulhi1_Hi);
1832 SDValue Add1 = DAG.getBitcast(VT,
1833 DAG.getBuildVector(MVT::v2i32, DL, {Add1_Lo, Add1_Hi}));
1835 SDValue Mullo2 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Add1);
1836 SDValue Mulhi2 = DAG.getNode(ISD::MULHU, DL, VT, Add1, Mullo2);
1837 SDValue Mulhi2_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2,
1839 SDValue Mulhi2_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2,
1842 SDValue Add2_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_Lo,
1844 SDValue Add2_HiC = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_HiNc,
1845 Mulhi2_Hi, Add1_Lo.getValue(1));
1846 SDValue Add2_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add2_HiC,
1847 Zero, Add2_Lo.getValue(1));
1848 SDValue Add2 = DAG.getBitcast(VT,
1849 DAG.getBuildVector(MVT::v2i32, DL, {Add2_Lo, Add2_Hi}));
1850 SDValue Mulhi3 = DAG.getNode(ISD::MULHU, DL, VT, LHS, Add2);
1852 SDValue Mul3 = DAG.getNode(ISD::MUL, DL, VT, RHS, Mulhi3);
1854 SDValue Mul3_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, Zero);
1855 SDValue Mul3_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, One);
1856 SDValue Sub1_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Lo,
1858 SDValue Sub1_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Hi,
1859 Mul3_Hi, Sub1_Lo.getValue(1));
1860 SDValue Sub1_Mi = DAG.getNode(ISD::SUB, DL, HalfVT, LHS_Hi, Mul3_Hi);
1861 SDValue Sub1 = DAG.getBitcast(VT,
1862 DAG.getBuildVector(MVT::v2i32, DL, {Sub1_Lo, Sub1_Hi}));
1864 SDValue MinusOne = DAG.getConstant(0xffffffffu, DL, HalfVT);
1865 SDValue C1 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, MinusOne, Zero,
1867 SDValue C2 = DAG.getSelectCC(DL, Sub1_Lo, RHS_Lo, MinusOne, Zero,
1869 SDValue C3 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, C2, C1, ISD::SETEQ);
1871 // TODO: Here and below portions of the code can be enclosed into if/endif.
1872 // Currently control flow is unconditional and we have 4 selects after
1873 // potential endif to substitute PHIs.
1876 SDValue Sub2_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Lo,
1878 SDValue Sub2_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Mi,
1879 RHS_Hi, Sub1_Lo.getValue(1));
1880 SDValue Sub2_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi,
1881 Zero, Sub2_Lo.getValue(1));
1882 SDValue Sub2 = DAG.getBitcast(VT,
1883 DAG.getBuildVector(MVT::v2i32, DL, {Sub2_Lo, Sub2_Hi}));
1885 SDValue Add3 = DAG.getNode(ISD::ADD, DL, VT, Mulhi3, One64);
1887 SDValue C4 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, MinusOne, Zero,
1889 SDValue C5 = DAG.getSelectCC(DL, Sub2_Lo, RHS_Lo, MinusOne, Zero,
1891 SDValue C6 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, C5, C4, ISD::SETEQ);
1894 SDValue Add4 = DAG.getNode(ISD::ADD, DL, VT, Add3, One64);
1896 SDValue Sub3_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Lo,
1898 SDValue Sub3_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi,
1899 RHS_Hi, Sub2_Lo.getValue(1));
1900 SDValue Sub3_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub3_Mi,
1901 Zero, Sub3_Lo.getValue(1));
1902 SDValue Sub3 = DAG.getBitcast(VT,
1903 DAG.getBuildVector(MVT::v2i32, DL, {Sub3_Lo, Sub3_Hi}));
1908 SDValue Sel1 = DAG.getSelectCC(DL, C6, Zero, Add4, Add3, ISD::SETNE);
1909 SDValue Div = DAG.getSelectCC(DL, C3, Zero, Sel1, Mulhi3, ISD::SETNE);
1911 SDValue Sel2 = DAG.getSelectCC(DL, C6, Zero, Sub3, Sub2, ISD::SETNE);
1912 SDValue Rem = DAG.getSelectCC(DL, C3, Zero, Sel2, Sub1, ISD::SETNE);
1914 Results.push_back(Div);
1915 Results.push_back(Rem);
1921 // Get Speculative values
1922 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
1923 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
1925 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, Zero, REM_Part, LHS_Hi, ISD::SETEQ);
1926 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, Zero});
1927 REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM);
1929 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, Zero, DIV_Part, Zero, ISD::SETEQ);
1930 SDValue DIV_Lo = Zero;
1932 const unsigned halfBitWidth = HalfVT.getSizeInBits();
1934 for (unsigned i = 0; i < halfBitWidth; ++i) {
1935 const unsigned bitPos = halfBitWidth - i - 1;
1936 SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
1937 // Get value of high bit
1938 SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
1939 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, One);
1940 HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
1943 REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
1945 REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
1947 SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT);
1948 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, Zero, ISD::SETUGE);
1950 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
1953 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
1954 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
1957 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi});
1958 DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV);
1959 Results.push_back(DIV);
1960 Results.push_back(REM);
1963 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1964 SelectionDAG &DAG) const {
1966 EVT VT = Op.getValueType();
1968 if (VT == MVT::i64) {
1969 SmallVector<SDValue, 2> Results;
1970 LowerUDIVREM64(Op, DAG, Results);
1971 return DAG.getMergeValues(Results, DL);
1974 if (VT == MVT::i32) {
1975 if (SDValue Res = LowerDIVREM24(Op, DAG, false))
1979 SDValue X = Op.getOperand(0);
1980 SDValue Y = Op.getOperand(1);
1982 // See AMDGPUCodeGenPrepare::expandDivRem32 for a description of the
1983 // algorithm used here.
1985 // Initial estimate of inv(y).
1986 SDValue Z = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Y);
1988 // One round of UNR.
1989 SDValue NegY = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Y);
1990 SDValue NegYZ = DAG.getNode(ISD::MUL, DL, VT, NegY, Z);
1991 Z = DAG.getNode(ISD::ADD, DL, VT, Z,
1992 DAG.getNode(ISD::MULHU, DL, VT, Z, NegYZ));
1994 // Quotient/remainder estimate.
1995 SDValue Q = DAG.getNode(ISD::MULHU, DL, VT, X, Z);
1997 DAG.getNode(ISD::SUB, DL, VT, X, DAG.getNode(ISD::MUL, DL, VT, Q, Y));
1999 // First quotient/remainder refinement.
2000 EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2001 SDValue One = DAG.getConstant(1, DL, VT);
2002 SDValue Cond = DAG.getSetCC(DL, CCVT, R, Y, ISD::SETUGE);
2003 Q = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2004 DAG.getNode(ISD::ADD, DL, VT, Q, One), Q);
2005 R = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2006 DAG.getNode(ISD::SUB, DL, VT, R, Y), R);
2008 // Second quotient/remainder refinement.
2009 Cond = DAG.getSetCC(DL, CCVT, R, Y, ISD::SETUGE);
2010 Q = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2011 DAG.getNode(ISD::ADD, DL, VT, Q, One), Q);
2012 R = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2013 DAG.getNode(ISD::SUB, DL, VT, R, Y), R);
2015 return DAG.getMergeValues({Q, R}, DL);
2018 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
2019 SelectionDAG &DAG) const {
2021 EVT VT = Op.getValueType();
2023 SDValue LHS = Op.getOperand(0);
2024 SDValue RHS = Op.getOperand(1);
2026 SDValue Zero = DAG.getConstant(0, DL, VT);
2027 SDValue NegOne = DAG.getConstant(-1, DL, VT);
2029 if (VT == MVT::i32) {
2030 if (SDValue Res = LowerDIVREM24(Op, DAG, true))
2034 if (VT == MVT::i64 &&
2035 DAG.ComputeNumSignBits(LHS) > 32 &&
2036 DAG.ComputeNumSignBits(RHS) > 32) {
2037 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
2040 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
2041 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
2042 SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
2045 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
2046 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
2048 return DAG.getMergeValues(Res, DL);
2051 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
2052 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
2053 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
2054 SDValue RSign = LHSign; // Remainder sign is the same as LHS
2056 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
2057 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
2059 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
2060 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
2062 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
2063 SDValue Rem = Div.getValue(1);
2065 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
2066 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
2068 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
2069 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
2075 return DAG.getMergeValues(Res, DL);
2078 // (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y))
2079 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
2081 EVT VT = Op.getValueType();
2082 SDValue X = Op.getOperand(0);
2083 SDValue Y = Op.getOperand(1);
2085 // TODO: Should this propagate fast-math-flags?
2087 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y);
2088 SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div);
2089 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y);
2091 return DAG.getNode(ISD::FSUB, SL, VT, X, Mul);
2094 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
2096 SDValue Src = Op.getOperand(0);
2098 // result = trunc(src)
2099 // if (src > 0.0 && src != result)
2102 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2104 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2105 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
2108 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2110 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
2111 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2112 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2114 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
2115 // TODO: Should this propagate fast-math-flags?
2116 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2119 static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL,
2120 SelectionDAG &DAG) {
2121 const unsigned FractBits = 52;
2122 const unsigned ExpBits = 11;
2124 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
2126 DAG.getConstant(FractBits - 32, SL, MVT::i32),
2127 DAG.getConstant(ExpBits, SL, MVT::i32));
2128 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
2129 DAG.getConstant(1023, SL, MVT::i32));
2134 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
2136 SDValue Src = Op.getOperand(0);
2138 assert(Op.getValueType() == MVT::f64);
2140 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2141 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2143 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2145 // Extract the upper half, since this is where we will find the sign and
2147 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
2149 SDValue Exp = extractF64Exponent(Hi, SL, DAG);
2151 const unsigned FractBits = 52;
2153 // Extract the sign bit.
2154 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
2155 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
2157 // Extend back to 64-bits.
2158 SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit});
2159 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
2161 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
2162 const SDValue FractMask
2163 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
2165 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
2166 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
2167 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
2170 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2172 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
2174 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2175 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2177 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
2178 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
2180 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
2183 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
2185 SDValue Src = Op.getOperand(0);
2187 assert(Op.getValueType() == MVT::f64);
2189 APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
2190 SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
2191 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
2193 // TODO: Should this propagate fast-math-flags?
2195 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
2196 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
2198 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
2200 APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
2201 SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
2204 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2205 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
2207 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
2210 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
2211 // FNEARBYINT and FRINT are the same, except in their handling of FP
2212 // exceptions. Those aren't really meaningful for us, and OpenCL only has
2213 // rint, so just treat them as equivalent.
2214 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
2217 // XXX - May require not supporting f32 denormals?
2219 // Don't handle v2f16. The extra instructions to scalarize and repack around the
2220 // compare and vselect end up producing worse code than scalarizing the whole
2222 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2224 SDValue X = Op.getOperand(0);
2225 EVT VT = Op.getValueType();
2227 SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X);
2229 // TODO: Should this propagate fast-math-flags?
2231 SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T);
2233 SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff);
2235 const SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
2236 const SDValue One = DAG.getConstantFP(1.0, SL, VT);
2237 const SDValue Half = DAG.getConstantFP(0.5, SL, VT);
2239 SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, VT, One, X);
2242 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2244 SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
2246 SDValue Sel = DAG.getNode(ISD::SELECT, SL, VT, Cmp, SignOne, Zero);
2248 return DAG.getNode(ISD::FADD, SL, VT, T, Sel);
2251 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
2253 SDValue Src = Op.getOperand(0);
2255 // result = trunc(src);
2256 // if (src < 0.0 && src != result)
2259 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2261 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2262 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
2265 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2267 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
2268 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2269 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2271 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
2272 // TODO: Should this propagate fast-math-flags?
2273 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2276 SDValue AMDGPUTargetLowering::LowerFLOG(SDValue Op, SelectionDAG &DAG,
2277 double Log2BaseInverted) const {
2278 EVT VT = Op.getValueType();
2281 SDValue Operand = Op.getOperand(0);
2282 SDValue Log2Operand = DAG.getNode(ISD::FLOG2, SL, VT, Operand);
2283 SDValue Log2BaseInvertedOperand = DAG.getConstantFP(Log2BaseInverted, SL, VT);
2285 return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand);
2288 // exp2(M_LOG2E_F * f);
2289 SDValue AMDGPUTargetLowering::lowerFEXP(SDValue Op, SelectionDAG &DAG) const {
2290 EVT VT = Op.getValueType();
2292 SDValue Src = Op.getOperand(0);
2294 const SDValue K = DAG.getConstantFP(numbers::log2e, SL, VT);
2295 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Src, K, Op->getFlags());
2296 return DAG.getNode(ISD::FEXP2, SL, VT, Mul, Op->getFlags());
2299 static bool isCtlzOpc(unsigned Opc) {
2300 return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF;
2303 static bool isCttzOpc(unsigned Opc) {
2304 return Opc == ISD::CTTZ || Opc == ISD::CTTZ_ZERO_UNDEF;
2307 SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const {
2309 SDValue Src = Op.getOperand(0);
2310 bool ZeroUndef = Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
2311 Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF;
2313 unsigned ISDOpc, NewOpc;
2314 if (isCtlzOpc(Op.getOpcode())) {
2315 ISDOpc = ISD::CTLZ_ZERO_UNDEF;
2316 NewOpc = AMDGPUISD::FFBH_U32;
2317 } else if (isCttzOpc(Op.getOpcode())) {
2318 ISDOpc = ISD::CTTZ_ZERO_UNDEF;
2319 NewOpc = AMDGPUISD::FFBL_B32;
2321 llvm_unreachable("Unexpected OPCode!!!");
2324 if (ZeroUndef && Src.getValueType() == MVT::i32)
2325 return DAG.getNode(NewOpc, SL, MVT::i32, Src);
2327 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2329 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2330 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2332 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
2333 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
2335 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
2336 *DAG.getContext(), MVT::i32);
2338 SDValue HiOrLo = isCtlzOpc(Op.getOpcode()) ? Hi : Lo;
2339 SDValue Hi0orLo0 = DAG.getSetCC(SL, SetCCVT, HiOrLo, Zero, ISD::SETEQ);
2341 SDValue OprLo = DAG.getNode(ISDOpc, SL, MVT::i32, Lo);
2342 SDValue OprHi = DAG.getNode(ISDOpc, SL, MVT::i32, Hi);
2344 const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32);
2345 SDValue Add, NewOpr;
2346 if (isCtlzOpc(Op.getOpcode())) {
2347 Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprLo, Bits32);
2348 // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x))
2349 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprHi);
2351 Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprHi, Bits32);
2352 // cttz(x) = lo_32(x) == 0 ? cttz(hi_32(x)) + 32 : cttz(lo_32(x))
2353 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprLo);
2357 // Test if the full 64-bit input is zero.
2359 // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32,
2360 // which we probably don't want.
2361 SDValue LoOrHi = isCtlzOpc(Op.getOpcode()) ? Lo : Hi;
2362 SDValue Lo0OrHi0 = DAG.getSetCC(SL, SetCCVT, LoOrHi, Zero, ISD::SETEQ);
2363 SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0OrHi0, Hi0orLo0);
2365 // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction
2366 // with the same cycles, otherwise it is slower.
2367 // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src,
2368 // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ);
2370 const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32);
2372 // The instruction returns -1 for 0 input, but the defined intrinsic
2373 // behavior is to return the number of bits.
2374 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32,
2375 SrcIsZero, Bits32, NewOpr);
2378 return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewOpr);
2381 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG,
2382 bool Signed) const {
2386 // uint lz = clz(u);
2387 // uint e = (u != 0) ? 127U + 63U - lz : 0;
2388 // u = (u << lz) & 0x7fffffffffffffffUL;
2389 // ulong t = u & 0xffffffffffUL;
2390 // uint v = (e << 23) | (uint)(u >> 40);
2391 // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U);
2392 // return as_float(v + r);
2397 // long s = l >> 63;
2398 // float r = cul2f((l + s) ^ s);
2399 // return s ? -r : r;
2403 SDValue Src = Op.getOperand(0);
2408 const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64);
2409 S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit);
2411 SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S);
2412 L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S);
2415 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
2416 *DAG.getContext(), MVT::f32);
2419 SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32);
2420 SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64);
2421 SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L);
2422 LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ);
2424 SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32);
2425 SDValue E = DAG.getSelect(SL, MVT::i32,
2426 DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE),
2427 DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ),
2430 SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64,
2431 DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ),
2432 DAG.getConstant((-1ULL) >> 1, SL, MVT::i64));
2434 SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U,
2435 DAG.getConstant(0xffffffffffULL, SL, MVT::i64));
2437 SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64,
2438 U, DAG.getConstant(40, SL, MVT::i64));
2440 SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32,
2441 DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)),
2442 DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, UShl));
2444 SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64);
2445 SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT);
2446 SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ);
2448 SDValue One = DAG.getConstant(1, SL, MVT::i32);
2450 SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One);
2452 SDValue R = DAG.getSelect(SL, MVT::i32,
2455 DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32));
2456 R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R);
2457 R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R);
2462 SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R);
2463 return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R);
2466 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
2467 bool Signed) const {
2469 SDValue Src = Op.getOperand(0);
2471 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2473 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2474 DAG.getConstant(0, SL, MVT::i32));
2475 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2476 DAG.getConstant(1, SL, MVT::i32));
2478 SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
2481 SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
2483 SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi,
2484 DAG.getConstant(32, SL, MVT::i32));
2485 // TODO: Should this propagate fast-math-flags?
2486 return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
2489 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
2490 SelectionDAG &DAG) const {
2491 // TODO: Factor out code common with LowerSINT_TO_FP.
2492 EVT DestVT = Op.getValueType();
2493 SDValue Src = Op.getOperand(0);
2494 EVT SrcVT = Src.getValueType();
2496 if (SrcVT == MVT::i16) {
2497 if (DestVT == MVT::f16)
2501 // Promote src to i32
2502 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Src);
2503 return DAG.getNode(ISD::UINT_TO_FP, DL, DestVT, Ext);
2506 assert(SrcVT == MVT::i64 && "operation should be legal");
2508 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2511 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2512 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2514 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2519 if (DestVT == MVT::f32)
2520 return LowerINT_TO_FP32(Op, DAG, false);
2522 assert(DestVT == MVT::f64);
2523 return LowerINT_TO_FP64(Op, DAG, false);
2526 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
2527 SelectionDAG &DAG) const {
2528 EVT DestVT = Op.getValueType();
2530 SDValue Src = Op.getOperand(0);
2531 EVT SrcVT = Src.getValueType();
2533 if (SrcVT == MVT::i16) {
2534 if (DestVT == MVT::f16)
2538 // Promote src to i32
2539 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32, Src);
2540 return DAG.getNode(ISD::SINT_TO_FP, DL, DestVT, Ext);
2543 assert(SrcVT == MVT::i64 && "operation should be legal");
2545 // TODO: Factor out code common with LowerUINT_TO_FP.
2547 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2549 SDValue Src = Op.getOperand(0);
2551 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2552 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2554 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2559 if (DestVT == MVT::f32)
2560 return LowerINT_TO_FP32(Op, DAG, true);
2562 assert(DestVT == MVT::f64);
2563 return LowerINT_TO_FP64(Op, DAG, true);
2566 SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG,
2567 bool Signed) const {
2570 SDValue Src = Op.getOperand(0);
2572 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2574 SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL,
2576 SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL,
2578 // TODO: Should this propagate fast-math-flags?
2579 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0);
2581 SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul);
2584 SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc);
2586 SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL,
2587 MVT::i32, FloorMul);
2588 SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
2590 SDValue Result = DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi});
2592 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result);
2595 SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const {
2597 SDValue N0 = Op.getOperand(0);
2599 // Convert to target node to get known bits
2600 if (N0.getValueType() == MVT::f32)
2601 return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0);
2603 if (getTargetMachine().Options.UnsafeFPMath) {
2604 // There is a generic expand for FP_TO_FP16 with unsafe fast math.
2608 assert(N0.getSimpleValueType() == MVT::f64);
2610 // f64 -> f16 conversion using round-to-nearest-even rounding mode.
2611 const unsigned ExpMask = 0x7ff;
2612 const unsigned ExpBiasf64 = 1023;
2613 const unsigned ExpBiasf16 = 15;
2614 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
2615 SDValue One = DAG.getConstant(1, DL, MVT::i32);
2616 SDValue U = DAG.getNode(ISD::BITCAST, DL, MVT::i64, N0);
2617 SDValue UH = DAG.getNode(ISD::SRL, DL, MVT::i64, U,
2618 DAG.getConstant(32, DL, MVT::i64));
2619 UH = DAG.getZExtOrTrunc(UH, DL, MVT::i32);
2620 U = DAG.getZExtOrTrunc(U, DL, MVT::i32);
2621 SDValue E = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2622 DAG.getConstant(20, DL, MVT::i64));
2623 E = DAG.getNode(ISD::AND, DL, MVT::i32, E,
2624 DAG.getConstant(ExpMask, DL, MVT::i32));
2625 // Subtract the fp64 exponent bias (1023) to get the real exponent and
2626 // add the f16 bias (15) to get the biased exponent for the f16 format.
2627 E = DAG.getNode(ISD::ADD, DL, MVT::i32, E,
2628 DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32));
2630 SDValue M = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2631 DAG.getConstant(8, DL, MVT::i32));
2632 M = DAG.getNode(ISD::AND, DL, MVT::i32, M,
2633 DAG.getConstant(0xffe, DL, MVT::i32));
2635 SDValue MaskedSig = DAG.getNode(ISD::AND, DL, MVT::i32, UH,
2636 DAG.getConstant(0x1ff, DL, MVT::i32));
2637 MaskedSig = DAG.getNode(ISD::OR, DL, MVT::i32, MaskedSig, U);
2639 SDValue Lo40Set = DAG.getSelectCC(DL, MaskedSig, Zero, Zero, One, ISD::SETEQ);
2640 M = DAG.getNode(ISD::OR, DL, MVT::i32, M, Lo40Set);
2642 // (M != 0 ? 0x0200 : 0) | 0x7c00;
2643 SDValue I = DAG.getNode(ISD::OR, DL, MVT::i32,
2644 DAG.getSelectCC(DL, M, Zero, DAG.getConstant(0x0200, DL, MVT::i32),
2645 Zero, ISD::SETNE), DAG.getConstant(0x7c00, DL, MVT::i32));
2647 // N = M | (E << 12);
2648 SDValue N = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2649 DAG.getNode(ISD::SHL, DL, MVT::i32, E,
2650 DAG.getConstant(12, DL, MVT::i32)));
2652 // B = clamp(1-E, 0, 13);
2653 SDValue OneSubExp = DAG.getNode(ISD::SUB, DL, MVT::i32,
2655 SDValue B = DAG.getNode(ISD::SMAX, DL, MVT::i32, OneSubExp, Zero);
2656 B = DAG.getNode(ISD::SMIN, DL, MVT::i32, B,
2657 DAG.getConstant(13, DL, MVT::i32));
2659 SDValue SigSetHigh = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2660 DAG.getConstant(0x1000, DL, MVT::i32));
2662 SDValue D = DAG.getNode(ISD::SRL, DL, MVT::i32, SigSetHigh, B);
2663 SDValue D0 = DAG.getNode(ISD::SHL, DL, MVT::i32, D, B);
2664 SDValue D1 = DAG.getSelectCC(DL, D0, SigSetHigh, One, Zero, ISD::SETNE);
2665 D = DAG.getNode(ISD::OR, DL, MVT::i32, D, D1);
2667 SDValue V = DAG.getSelectCC(DL, E, One, D, N, ISD::SETLT);
2668 SDValue VLow3 = DAG.getNode(ISD::AND, DL, MVT::i32, V,
2669 DAG.getConstant(0x7, DL, MVT::i32));
2670 V = DAG.getNode(ISD::SRL, DL, MVT::i32, V,
2671 DAG.getConstant(2, DL, MVT::i32));
2672 SDValue V0 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(3, DL, MVT::i32),
2673 One, Zero, ISD::SETEQ);
2674 SDValue V1 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(5, DL, MVT::i32),
2675 One, Zero, ISD::SETGT);
2676 V1 = DAG.getNode(ISD::OR, DL, MVT::i32, V0, V1);
2677 V = DAG.getNode(ISD::ADD, DL, MVT::i32, V, V1);
2679 V = DAG.getSelectCC(DL, E, DAG.getConstant(30, DL, MVT::i32),
2680 DAG.getConstant(0x7c00, DL, MVT::i32), V, ISD::SETGT);
2681 V = DAG.getSelectCC(DL, E, DAG.getConstant(1039, DL, MVT::i32),
2684 // Extract the sign bit.
2685 SDValue Sign = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2686 DAG.getConstant(16, DL, MVT::i32));
2687 Sign = DAG.getNode(ISD::AND, DL, MVT::i32, Sign,
2688 DAG.getConstant(0x8000, DL, MVT::i32));
2690 V = DAG.getNode(ISD::OR, DL, MVT::i32, Sign, V);
2691 return DAG.getZExtOrTrunc(V, DL, Op.getValueType());
2694 SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op,
2695 SelectionDAG &DAG) const {
2696 SDValue Src = Op.getOperand(0);
2698 // TODO: Factor out code common with LowerFP_TO_UINT.
2700 EVT SrcVT = Src.getValueType();
2701 if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) {
2704 SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src);
2706 DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend);
2711 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2712 return LowerFP64_TO_INT(Op, DAG, true);
2717 SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op,
2718 SelectionDAG &DAG) const {
2719 SDValue Src = Op.getOperand(0);
2721 // TODO: Factor out code common with LowerFP_TO_SINT.
2723 EVT SrcVT = Src.getValueType();
2724 if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) {
2727 SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src);
2729 DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend);
2734 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2735 return LowerFP64_TO_INT(Op, DAG, false);
2740 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
2741 SelectionDAG &DAG) const {
2742 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2743 MVT VT = Op.getSimpleValueType();
2744 MVT ScalarVT = VT.getScalarType();
2746 assert(VT.isVector());
2748 SDValue Src = Op.getOperand(0);
2751 // TODO: Don't scalarize on Evergreen?
2752 unsigned NElts = VT.getVectorNumElements();
2753 SmallVector<SDValue, 8> Args;
2754 DAG.ExtractVectorElements(Src, Args, 0, NElts);
2756 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
2757 for (unsigned I = 0; I < NElts; ++I)
2758 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
2760 return DAG.getBuildVector(VT, DL, Args);
2763 //===----------------------------------------------------------------------===//
2764 // Custom DAG optimizations
2765 //===----------------------------------------------------------------------===//
2767 static bool isU24(SDValue Op, SelectionDAG &DAG) {
2768 return AMDGPUTargetLowering::numBitsUnsigned(Op, DAG) <= 24;
2771 static bool isI24(SDValue Op, SelectionDAG &DAG) {
2772 EVT VT = Op.getValueType();
2773 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
2774 // as unsigned 24-bit values.
2775 AMDGPUTargetLowering::numBitsSigned(Op, DAG) < 24;
2778 static SDValue simplifyI24(SDNode *Node24,
2779 TargetLowering::DAGCombinerInfo &DCI) {
2780 SelectionDAG &DAG = DCI.DAG;
2781 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2782 bool IsIntrin = Node24->getOpcode() == ISD::INTRINSIC_WO_CHAIN;
2784 SDValue LHS = IsIntrin ? Node24->getOperand(1) : Node24->getOperand(0);
2785 SDValue RHS = IsIntrin ? Node24->getOperand(2) : Node24->getOperand(1);
2786 unsigned NewOpcode = Node24->getOpcode();
2788 unsigned IID = cast<ConstantSDNode>(Node24->getOperand(0))->getZExtValue();
2789 NewOpcode = IID == Intrinsic::amdgcn_mul_i24 ?
2790 AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
2793 APInt Demanded = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 24);
2795 // First try to simplify using SimplifyMultipleUseDemandedBits which allows
2796 // the operands to have other uses, but will only perform simplifications that
2797 // involve bypassing some nodes for this user.
2798 SDValue DemandedLHS = TLI.SimplifyMultipleUseDemandedBits(LHS, Demanded, DAG);
2799 SDValue DemandedRHS = TLI.SimplifyMultipleUseDemandedBits(RHS, Demanded, DAG);
2800 if (DemandedLHS || DemandedRHS)
2801 return DAG.getNode(NewOpcode, SDLoc(Node24), Node24->getVTList(),
2802 DemandedLHS ? DemandedLHS : LHS,
2803 DemandedRHS ? DemandedRHS : RHS);
2805 // Now try SimplifyDemandedBits which can simplify the nodes used by our
2806 // operands if this node is the only user.
2807 if (TLI.SimplifyDemandedBits(LHS, Demanded, DCI))
2808 return SDValue(Node24, 0);
2809 if (TLI.SimplifyDemandedBits(RHS, Demanded, DCI))
2810 return SDValue(Node24, 0);
2815 template <typename IntTy>
2816 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset,
2817 uint32_t Width, const SDLoc &DL) {
2818 if (Width + Offset < 32) {
2819 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
2820 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
2821 return DAG.getConstant(Result, DL, MVT::i32);
2824 return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
2827 static bool hasVolatileUser(SDNode *Val) {
2828 for (SDNode *U : Val->uses()) {
2829 if (MemSDNode *M = dyn_cast<MemSDNode>(U)) {
2830 if (M->isVolatile())
2838 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const {
2839 // i32 vectors are the canonical memory type.
2840 if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT))
2843 if (!VT.isByteSized())
2846 unsigned Size = VT.getStoreSize();
2848 if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector())
2851 if (Size == 3 || (Size > 4 && (Size % 4 != 0)))
2857 // Replace load of an illegal type with a store of a bitcast to a friendlier
2859 SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N,
2860 DAGCombinerInfo &DCI) const {
2861 if (!DCI.isBeforeLegalize())
2864 LoadSDNode *LN = cast<LoadSDNode>(N);
2865 if (!LN->isSimple() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN))
2869 SelectionDAG &DAG = DCI.DAG;
2870 EVT VT = LN->getMemoryVT();
2872 unsigned Size = VT.getStoreSize();
2873 Align Alignment = LN->getAlign();
2874 if (Alignment < Size && isTypeLegal(VT)) {
2876 unsigned AS = LN->getAddressSpace();
2878 // Expand unaligned loads earlier than legalization. Due to visitation order
2879 // problems during legalization, the emitted instructions to pack and unpack
2880 // the bytes again are not eliminated in the case of an unaligned copy.
2881 if (!allowsMisalignedMemoryAccesses(VT, AS, Alignment.value(),
2882 LN->getMemOperand()->getFlags(),
2887 std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(LN, DAG);
2889 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
2891 return DAG.getMergeValues(Ops, SDLoc(N));
2898 if (!shouldCombineMemoryType(VT))
2901 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2904 = DAG.getLoad(NewVT, SL, LN->getChain(),
2905 LN->getBasePtr(), LN->getMemOperand());
2907 SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad);
2908 DCI.CombineTo(N, BC, NewLoad.getValue(1));
2909 return SDValue(N, 0);
2912 // Replace store of an illegal type with a store of a bitcast to a friendlier
2914 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
2915 DAGCombinerInfo &DCI) const {
2916 if (!DCI.isBeforeLegalize())
2919 StoreSDNode *SN = cast<StoreSDNode>(N);
2920 if (!SN->isSimple() || !ISD::isNormalStore(SN))
2923 EVT VT = SN->getMemoryVT();
2924 unsigned Size = VT.getStoreSize();
2927 SelectionDAG &DAG = DCI.DAG;
2928 Align Alignment = SN->getAlign();
2929 if (Alignment < Size && isTypeLegal(VT)) {
2931 unsigned AS = SN->getAddressSpace();
2933 // Expand unaligned stores earlier than legalization. Due to visitation
2934 // order problems during legalization, the emitted instructions to pack and
2935 // unpack the bytes again are not eliminated in the case of an unaligned
2937 if (!allowsMisalignedMemoryAccesses(VT, AS, Alignment.value(),
2938 SN->getMemOperand()->getFlags(),
2941 return scalarizeVectorStore(SN, DAG);
2943 return expandUnalignedStore(SN, DAG);
2950 if (!shouldCombineMemoryType(VT))
2953 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2954 SDValue Val = SN->getValue();
2956 //DCI.AddToWorklist(Val.getNode());
2958 bool OtherUses = !Val.hasOneUse();
2959 SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val);
2961 SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal);
2962 DAG.ReplaceAllUsesOfValueWith(Val, CastBack);
2965 return DAG.getStore(SN->getChain(), SL, CastVal,
2966 SN->getBasePtr(), SN->getMemOperand());
2969 // FIXME: This should go in generic DAG combiner with an isTruncateFree check,
2970 // but isTruncateFree is inaccurate for i16 now because of SALU vs. VALU
2972 SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N,
2973 DAGCombinerInfo &DCI) const {
2974 SelectionDAG &DAG = DCI.DAG;
2975 SDValue N0 = N->getOperand(0);
2977 // (vt2 (assertzext (truncate vt0:x), vt1)) ->
2978 // (vt2 (truncate (assertzext vt0:x, vt1)))
2979 if (N0.getOpcode() == ISD::TRUNCATE) {
2980 SDValue N1 = N->getOperand(1);
2981 EVT ExtVT = cast<VTSDNode>(N1)->getVT();
2984 SDValue Src = N0.getOperand(0);
2985 EVT SrcVT = Src.getValueType();
2986 if (SrcVT.bitsGE(ExtVT)) {
2987 SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1);
2988 return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg);
2995 SDValue AMDGPUTargetLowering::performIntrinsicWOChainCombine(
2996 SDNode *N, DAGCombinerInfo &DCI) const {
2997 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
2999 case Intrinsic::amdgcn_mul_i24:
3000 case Intrinsic::amdgcn_mul_u24:
3001 return simplifyI24(N, DCI);
3002 case Intrinsic::amdgcn_fract:
3003 case Intrinsic::amdgcn_rsq:
3004 case Intrinsic::amdgcn_rcp_legacy:
3005 case Intrinsic::amdgcn_rsq_legacy:
3006 case Intrinsic::amdgcn_rsq_clamp:
3007 case Intrinsic::amdgcn_ldexp: {
3008 // FIXME: This is probably wrong. If src is an sNaN, it won't be quieted
3009 SDValue Src = N->getOperand(1);
3010 return Src.isUndef() ? Src : SDValue();
3017 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the
3018 /// binary operation \p Opc to it with the corresponding constant operands.
3019 SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl(
3020 DAGCombinerInfo &DCI, const SDLoc &SL,
3021 unsigned Opc, SDValue LHS,
3022 uint32_t ValLo, uint32_t ValHi) const {
3023 SelectionDAG &DAG = DCI.DAG;
3025 std::tie(Lo, Hi) = split64BitValue(LHS, DAG);
3027 SDValue LoRHS = DAG.getConstant(ValLo, SL, MVT::i32);
3028 SDValue HiRHS = DAG.getConstant(ValHi, SL, MVT::i32);
3030 SDValue LoAnd = DAG.getNode(Opc, SL, MVT::i32, Lo, LoRHS);
3031 SDValue HiAnd = DAG.getNode(Opc, SL, MVT::i32, Hi, HiRHS);
3033 // Re-visit the ands. It's possible we eliminated one of them and it could
3034 // simplify the vector.
3035 DCI.AddToWorklist(Lo.getNode());
3036 DCI.AddToWorklist(Hi.getNode());
3038 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd});
3039 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3042 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
3043 DAGCombinerInfo &DCI) const {
3044 EVT VT = N->getValueType(0);
3046 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3050 SDValue LHS = N->getOperand(0);
3051 unsigned RHSVal = RHS->getZExtValue();
3056 SelectionDAG &DAG = DCI.DAG;
3058 switch (LHS->getOpcode()) {
3061 case ISD::ZERO_EXTEND:
3062 case ISD::SIGN_EXTEND:
3063 case ISD::ANY_EXTEND: {
3064 SDValue X = LHS->getOperand(0);
3066 if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 &&
3067 isOperationLegal(ISD::BUILD_VECTOR, MVT::v2i16)) {
3068 // Prefer build_vector as the canonical form if packed types are legal.
3069 // (shl ([asz]ext i16:x), 16 -> build_vector 0, x
3070 SDValue Vec = DAG.getBuildVector(MVT::v2i16, SL,
3071 { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) });
3072 return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
3075 // shl (ext x) => zext (shl x), if shift does not overflow int
3078 KnownBits Known = DAG.computeKnownBits(X);
3079 unsigned LZ = Known.countMinLeadingZeros();
3082 EVT XVT = X.getValueType();
3083 SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0));
3084 return DAG.getZExtOrTrunc(Shl, SL, VT);
3091 // i64 (shl x, C) -> (build_pair 0, (shl x, C -32))
3093 // On some subtargets, 64-bit shift is a quarter rate instruction. In the
3094 // common case, splitting this into a move and a 32-bit shift is faster and
3095 // the same code size.
3099 SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32);
3101 SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
3102 SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
3104 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3106 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
3107 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3110 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
3111 DAGCombinerInfo &DCI) const {
3112 if (N->getValueType(0) != MVT::i64)
3115 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3119 SelectionDAG &DAG = DCI.DAG;
3121 unsigned RHSVal = RHS->getZExtValue();
3123 // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31)
3125 SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3126 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3127 DAG.getConstant(31, SL, MVT::i32));
3129 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift});
3130 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3133 // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31)
3135 SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3136 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3137 DAG.getConstant(31, SL, MVT::i32));
3138 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift});
3139 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3145 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
3146 DAGCombinerInfo &DCI) const {
3147 auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3151 EVT VT = N->getValueType(0);
3152 SDValue LHS = N->getOperand(0);
3153 unsigned ShiftAmt = RHS->getZExtValue();
3154 SelectionDAG &DAG = DCI.DAG;
3157 // fold (srl (and x, c1 << c2), c2) -> (and (srl(x, c2), c1)
3158 // this improves the ability to match BFE patterns in isel.
3159 if (LHS.getOpcode() == ISD::AND) {
3160 if (auto *Mask = dyn_cast<ConstantSDNode>(LHS.getOperand(1))) {
3161 if (Mask->getAPIntValue().isShiftedMask() &&
3162 Mask->getAPIntValue().countTrailingZeros() == ShiftAmt) {
3165 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(0), N->getOperand(1)),
3166 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(1), N->getOperand(1)));
3177 // srl i64:x, C for C >= 32
3179 // build_pair (srl hi_32(x), C - 32), 0
3180 SDValue One = DAG.getConstant(1, SL, MVT::i32);
3181 SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3183 SDValue VecOp = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, LHS);
3184 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecOp, One);
3186 SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
3187 SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst);
3189 SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero});
3191 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
3194 SDValue AMDGPUTargetLowering::performTruncateCombine(
3195 SDNode *N, DAGCombinerInfo &DCI) const {
3197 SelectionDAG &DAG = DCI.DAG;
3198 EVT VT = N->getValueType(0);
3199 SDValue Src = N->getOperand(0);
3201 // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x)
3202 if (Src.getOpcode() == ISD::BITCAST && !VT.isVector()) {
3203 SDValue Vec = Src.getOperand(0);
3204 if (Vec.getOpcode() == ISD::BUILD_VECTOR) {
3205 SDValue Elt0 = Vec.getOperand(0);
3206 EVT EltVT = Elt0.getValueType();
3207 if (VT.getSizeInBits() <= EltVT.getSizeInBits()) {
3208 if (EltVT.isFloatingPoint()) {
3209 Elt0 = DAG.getNode(ISD::BITCAST, SL,
3210 EltVT.changeTypeToInteger(), Elt0);
3213 return DAG.getNode(ISD::TRUNCATE, SL, VT, Elt0);
3218 // Equivalent of above for accessing the high element of a vector as an
3219 // integer operation.
3220 // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y)
3221 if (Src.getOpcode() == ISD::SRL && !VT.isVector()) {
3222 if (auto K = isConstOrConstSplat(Src.getOperand(1))) {
3223 if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) {
3224 SDValue BV = stripBitcast(Src.getOperand(0));
3225 if (BV.getOpcode() == ISD::BUILD_VECTOR &&
3226 BV.getValueType().getVectorNumElements() == 2) {
3227 SDValue SrcElt = BV.getOperand(1);
3228 EVT SrcEltVT = SrcElt.getValueType();
3229 if (SrcEltVT.isFloatingPoint()) {
3230 SrcElt = DAG.getNode(ISD::BITCAST, SL,
3231 SrcEltVT.changeTypeToInteger(), SrcElt);
3234 return DAG.getNode(ISD::TRUNCATE, SL, VT, SrcElt);
3240 // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit.
3242 // i16 (trunc (srl i64:x, K)), K <= 16 ->
3243 // i16 (trunc (srl (i32 (trunc x), K)))
3244 if (VT.getScalarSizeInBits() < 32) {
3245 EVT SrcVT = Src.getValueType();
3246 if (SrcVT.getScalarSizeInBits() > 32 &&
3247 (Src.getOpcode() == ISD::SRL ||
3248 Src.getOpcode() == ISD::SRA ||
3249 Src.getOpcode() == ISD::SHL)) {
3250 SDValue Amt = Src.getOperand(1);
3251 KnownBits Known = DAG.computeKnownBits(Amt);
3252 unsigned Size = VT.getScalarSizeInBits();
3253 if ((Known.isConstant() && Known.getConstant().ule(Size)) ||
3254 (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size))) {
3255 EVT MidVT = VT.isVector() ?
3256 EVT::getVectorVT(*DAG.getContext(), MVT::i32,
3257 VT.getVectorNumElements()) : MVT::i32;
3259 EVT NewShiftVT = getShiftAmountTy(MidVT, DAG.getDataLayout());
3260 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MidVT,
3262 DCI.AddToWorklist(Trunc.getNode());
3264 if (Amt.getValueType() != NewShiftVT) {
3265 Amt = DAG.getZExtOrTrunc(Amt, SL, NewShiftVT);
3266 DCI.AddToWorklist(Amt.getNode());
3269 SDValue ShrunkShift = DAG.getNode(Src.getOpcode(), SL, MidVT,
3271 return DAG.getNode(ISD::TRUNCATE, SL, VT, ShrunkShift);
3279 // We need to specifically handle i64 mul here to avoid unnecessary conversion
3280 // instructions. If we only match on the legalized i64 mul expansion,
3281 // SimplifyDemandedBits will be unable to remove them because there will be
3282 // multiple uses due to the separate mul + mulh[su].
3283 static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL,
3284 SDValue N0, SDValue N1, unsigned Size, bool Signed) {
3286 unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3287 return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1);
3290 // Because we want to eliminate extension instructions before the
3291 // operation, we need to create a single user here (i.e. not the separate
3292 // mul_lo + mul_hi) so that SimplifyDemandedBits will deal with it.
3294 unsigned MulOpc = Signed ? AMDGPUISD::MUL_LOHI_I24 : AMDGPUISD::MUL_LOHI_U24;
3296 SDValue Mul = DAG.getNode(MulOpc, SL,
3297 DAG.getVTList(MVT::i32, MVT::i32), N0, N1);
3299 return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64,
3300 Mul.getValue(0), Mul.getValue(1));
3303 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
3304 DAGCombinerInfo &DCI) const {
3305 EVT VT = N->getValueType(0);
3307 unsigned Size = VT.getSizeInBits();
3308 if (VT.isVector() || Size > 64)
3311 // There are i16 integer mul/mad.
3312 if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16))
3315 SelectionDAG &DAG = DCI.DAG;
3318 SDValue N0 = N->getOperand(0);
3319 SDValue N1 = N->getOperand(1);
3321 // SimplifyDemandedBits has the annoying habit of turning useful zero_extends
3322 // in the source into any_extends if the result of the mul is truncated. Since
3323 // we can assume the high bits are whatever we want, use the underlying value
3324 // to avoid the unknown high bits from interfering.
3325 if (N0.getOpcode() == ISD::ANY_EXTEND)
3326 N0 = N0.getOperand(0);
3328 if (N1.getOpcode() == ISD::ANY_EXTEND)
3329 N1 = N1.getOperand(0);
3333 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
3334 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3335 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3336 Mul = getMul24(DAG, DL, N0, N1, Size, false);
3337 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
3338 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3339 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3340 Mul = getMul24(DAG, DL, N0, N1, Size, true);
3345 // We need to use sext even for MUL_U24, because MUL_U24 is used
3346 // for signed multiply of 8 and 16-bit types.
3347 return DAG.getSExtOrTrunc(Mul, DL, VT);
3350 SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N,
3351 DAGCombinerInfo &DCI) const {
3352 EVT VT = N->getValueType(0);
3354 if (!Subtarget->hasMulI24() || VT.isVector())
3357 SelectionDAG &DAG = DCI.DAG;
3360 SDValue N0 = N->getOperand(0);
3361 SDValue N1 = N->getOperand(1);
3363 if (!isI24(N0, DAG) || !isI24(N1, DAG))
3366 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3367 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3369 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1);
3370 DCI.AddToWorklist(Mulhi.getNode());
3371 return DAG.getSExtOrTrunc(Mulhi, DL, VT);
3374 SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N,
3375 DAGCombinerInfo &DCI) const {
3376 EVT VT = N->getValueType(0);
3378 if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32)
3381 SelectionDAG &DAG = DCI.DAG;
3384 SDValue N0 = N->getOperand(0);
3385 SDValue N1 = N->getOperand(1);
3387 if (!isU24(N0, DAG) || !isU24(N1, DAG))
3390 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3391 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3393 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1);
3394 DCI.AddToWorklist(Mulhi.getNode());
3395 return DAG.getZExtOrTrunc(Mulhi, DL, VT);
3398 SDValue AMDGPUTargetLowering::performMulLoHi24Combine(
3399 SDNode *N, DAGCombinerInfo &DCI) const {
3400 SelectionDAG &DAG = DCI.DAG;
3402 // Simplify demanded bits before splitting into multiple users.
3403 if (SDValue V = simplifyI24(N, DCI))
3406 SDValue N0 = N->getOperand(0);
3407 SDValue N1 = N->getOperand(1);
3409 bool Signed = (N->getOpcode() == AMDGPUISD::MUL_LOHI_I24);
3411 unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3412 unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24;
3416 SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1);
3417 SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1);
3418 return DAG.getMergeValues({ MulLo, MulHi }, SL);
3421 static bool isNegativeOne(SDValue Val) {
3422 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val))
3423 return C->isAllOnesValue();
3427 SDValue AMDGPUTargetLowering::getFFBX_U32(SelectionDAG &DAG,
3430 unsigned Opc) const {
3431 EVT VT = Op.getValueType();
3432 EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT);
3433 if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() &&
3434 LegalVT != MVT::i16))
3438 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Op);
3440 SDValue FFBX = DAG.getNode(Opc, DL, MVT::i32, Op);
3442 FFBX = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBX);
3447 // The native instructions return -1 on 0 input. Optimize out a select that
3448 // produces -1 on 0.
3450 // TODO: If zero is not undef, we could also do this if the output is compared
3451 // against the bitwidth.
3453 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly.
3454 SDValue AMDGPUTargetLowering::performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond,
3455 SDValue LHS, SDValue RHS,
3456 DAGCombinerInfo &DCI) const {
3457 ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
3458 if (!CmpRhs || !CmpRhs->isNullValue())
3461 SelectionDAG &DAG = DCI.DAG;
3462 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
3463 SDValue CmpLHS = Cond.getOperand(0);
3465 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x
3466 // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x
3467 if (CCOpcode == ISD::SETEQ &&
3468 (isCtlzOpc(RHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) &&
3469 RHS.getOperand(0) == CmpLHS && isNegativeOne(LHS)) {
3471 isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
3472 return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3475 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x
3476 // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x
3477 if (CCOpcode == ISD::SETNE &&
3478 (isCtlzOpc(LHS.getOpcode()) || isCttzOpc(LHS.getOpcode())) &&
3479 LHS.getOperand(0) == CmpLHS && isNegativeOne(RHS)) {
3481 isCttzOpc(LHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
3483 return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3489 static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI,
3495 SelectionDAG &DAG = DCI.DAG;
3496 EVT VT = N1.getValueType();
3498 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond,
3499 N1.getOperand(0), N2.getOperand(0));
3500 DCI.AddToWorklist(NewSelect.getNode());
3501 return DAG.getNode(Op, SL, VT, NewSelect);
3504 // Pull a free FP operation out of a select so it may fold into uses.
3506 // select c, (fneg x), (fneg y) -> fneg (select c, x, y)
3507 // select c, (fneg x), k -> fneg (select c, x, (fneg k))
3509 // select c, (fabs x), (fabs y) -> fabs (select c, x, y)
3510 // select c, (fabs x), +k -> fabs (select c, x, k)
3511 static SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI,
3513 SelectionDAG &DAG = DCI.DAG;
3514 SDValue Cond = N.getOperand(0);
3515 SDValue LHS = N.getOperand(1);
3516 SDValue RHS = N.getOperand(2);
3518 EVT VT = N.getValueType();
3519 if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) ||
3520 (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) {
3521 return distributeOpThroughSelect(DCI, LHS.getOpcode(),
3522 SDLoc(N), Cond, LHS, RHS);
3526 if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) {
3527 std::swap(LHS, RHS);
3531 // TODO: Support vector constants.
3532 ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
3533 if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS) {
3535 // If one side is an fneg/fabs and the other is a constant, we can push the
3536 // fneg/fabs down. If it's an fabs, the constant needs to be non-negative.
3537 SDValue NewLHS = LHS.getOperand(0);
3538 SDValue NewRHS = RHS;
3540 // Careful: if the neg can be folded up, don't try to pull it back down.
3541 bool ShouldFoldNeg = true;
3543 if (NewLHS.hasOneUse()) {
3544 unsigned Opc = NewLHS.getOpcode();
3545 if (LHS.getOpcode() == ISD::FNEG && fnegFoldsIntoOp(Opc))
3546 ShouldFoldNeg = false;
3547 if (LHS.getOpcode() == ISD::FABS && Opc == ISD::FMUL)
3548 ShouldFoldNeg = false;
3551 if (ShouldFoldNeg) {
3552 if (LHS.getOpcode() == ISD::FNEG)
3553 NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3554 else if (CRHS->isNegative())
3558 std::swap(NewLHS, NewRHS);
3560 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT,
3561 Cond, NewLHS, NewRHS);
3562 DCI.AddToWorklist(NewSelect.getNode());
3563 return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect);
3571 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N,
3572 DAGCombinerInfo &DCI) const {
3573 if (SDValue Folded = foldFreeOpFromSelect(DCI, SDValue(N, 0)))
3576 SDValue Cond = N->getOperand(0);
3577 if (Cond.getOpcode() != ISD::SETCC)
3580 EVT VT = N->getValueType(0);
3581 SDValue LHS = Cond.getOperand(0);
3582 SDValue RHS = Cond.getOperand(1);
3583 SDValue CC = Cond.getOperand(2);
3585 SDValue True = N->getOperand(1);
3586 SDValue False = N->getOperand(2);
3588 if (Cond.hasOneUse()) { // TODO: Look for multiple select uses.
3589 SelectionDAG &DAG = DCI.DAG;
3590 if (DAG.isConstantValueOfAnyType(True) &&
3591 !DAG.isConstantValueOfAnyType(False)) {
3592 // Swap cmp + select pair to move constant to false input.
3593 // This will allow using VOPC cndmasks more often.
3594 // select (setcc x, y), k, x -> select (setccinv x, y), x, k
3597 ISD::CondCode NewCC =
3598 getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), LHS.getValueType());
3600 SDValue NewCond = DAG.getSetCC(SL, Cond.getValueType(), LHS, RHS, NewCC);
3601 return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True);
3604 if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) {
3606 = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI);
3607 // Revisit this node so we can catch min3/max3/med3 patterns.
3608 //DCI.AddToWorklist(MinMax.getNode());
3613 // There's no reason to not do this if the condition has other uses.
3614 return performCtlz_CttzCombine(SDLoc(N), Cond, True, False, DCI);
3617 static bool isInv2Pi(const APFloat &APF) {
3618 static const APFloat KF16(APFloat::IEEEhalf(), APInt(16, 0x3118));
3619 static const APFloat KF32(APFloat::IEEEsingle(), APInt(32, 0x3e22f983));
3620 static const APFloat KF64(APFloat::IEEEdouble(), APInt(64, 0x3fc45f306dc9c882));
3622 return APF.bitwiseIsEqual(KF16) ||
3623 APF.bitwiseIsEqual(KF32) ||
3624 APF.bitwiseIsEqual(KF64);
3627 // 0 and 1.0 / (0.5 * pi) do not have inline immmediates, so there is an
3628 // additional cost to negate them.
3629 bool AMDGPUTargetLowering::isConstantCostlierToNegate(SDValue N) const {
3630 if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N)) {
3631 if (C->isZero() && !C->isNegative())
3634 if (Subtarget->hasInv2PiInlineImm() && isInv2Pi(C->getValueAPF()))
3641 static unsigned inverseMinMax(unsigned Opc) {
3644 return ISD::FMINNUM;
3646 return ISD::FMAXNUM;
3647 case ISD::FMAXNUM_IEEE:
3648 return ISD::FMINNUM_IEEE;
3649 case ISD::FMINNUM_IEEE:
3650 return ISD::FMAXNUM_IEEE;
3651 case AMDGPUISD::FMAX_LEGACY:
3652 return AMDGPUISD::FMIN_LEGACY;
3653 case AMDGPUISD::FMIN_LEGACY:
3654 return AMDGPUISD::FMAX_LEGACY;
3656 llvm_unreachable("invalid min/max opcode");
3660 SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N,
3661 DAGCombinerInfo &DCI) const {
3662 SelectionDAG &DAG = DCI.DAG;
3663 SDValue N0 = N->getOperand(0);
3664 EVT VT = N->getValueType(0);
3666 unsigned Opc = N0.getOpcode();
3668 // If the input has multiple uses and we can either fold the negate down, or
3669 // the other uses cannot, give up. This both prevents unprofitable
3670 // transformations and infinite loops: we won't repeatedly try to fold around
3671 // a negate that has no 'good' form.
3672 if (N0.hasOneUse()) {
3673 // This may be able to fold into the source, but at a code size cost. Don't
3674 // fold if the fold into the user is free.
3675 if (allUsesHaveSourceMods(N, 0))
3678 if (fnegFoldsIntoOp(Opc) &&
3679 (allUsesHaveSourceMods(N) || !allUsesHaveSourceMods(N0.getNode())))
3686 if (!mayIgnoreSignedZero(N0))
3689 // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y))
3690 SDValue LHS = N0.getOperand(0);
3691 SDValue RHS = N0.getOperand(1);
3693 if (LHS.getOpcode() != ISD::FNEG)
3694 LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3696 LHS = LHS.getOperand(0);
3698 if (RHS.getOpcode() != ISD::FNEG)
3699 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3701 RHS = RHS.getOperand(0);
3703 SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags());
3704 if (Res.getOpcode() != ISD::FADD)
3705 return SDValue(); // Op got folded away.
3706 if (!N0.hasOneUse())
3707 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3711 case AMDGPUISD::FMUL_LEGACY: {
3712 // (fneg (fmul x, y)) -> (fmul x, (fneg y))
3713 // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y))
3714 SDValue LHS = N0.getOperand(0);
3715 SDValue RHS = N0.getOperand(1);
3717 if (LHS.getOpcode() == ISD::FNEG)
3718 LHS = LHS.getOperand(0);
3719 else if (RHS.getOpcode() == ISD::FNEG)
3720 RHS = RHS.getOperand(0);
3722 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3724 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags());
3725 if (Res.getOpcode() != Opc)
3726 return SDValue(); // Op got folded away.
3727 if (!N0.hasOneUse())
3728 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3733 if (!mayIgnoreSignedZero(N0))
3736 // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z))
3737 SDValue LHS = N0.getOperand(0);
3738 SDValue MHS = N0.getOperand(1);
3739 SDValue RHS = N0.getOperand(2);
3741 if (LHS.getOpcode() == ISD::FNEG)
3742 LHS = LHS.getOperand(0);
3743 else if (MHS.getOpcode() == ISD::FNEG)
3744 MHS = MHS.getOperand(0);
3746 MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS);
3748 if (RHS.getOpcode() != ISD::FNEG)
3749 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3751 RHS = RHS.getOperand(0);
3753 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS);
3754 if (Res.getOpcode() != Opc)
3755 return SDValue(); // Op got folded away.
3756 if (!N0.hasOneUse())
3757 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3762 case ISD::FMAXNUM_IEEE:
3763 case ISD::FMINNUM_IEEE:
3764 case AMDGPUISD::FMAX_LEGACY:
3765 case AMDGPUISD::FMIN_LEGACY: {
3766 // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y)
3767 // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y)
3768 // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y)
3769 // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y)
3771 SDValue LHS = N0.getOperand(0);
3772 SDValue RHS = N0.getOperand(1);
3774 // 0 doesn't have a negated inline immediate.
3775 // TODO: This constant check should be generalized to other operations.
3776 if (isConstantCostlierToNegate(RHS))
3779 SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3780 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3781 unsigned Opposite = inverseMinMax(Opc);
3783 SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags());
3784 if (Res.getOpcode() != Opposite)
3785 return SDValue(); // Op got folded away.
3786 if (!N0.hasOneUse())
3787 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3790 case AMDGPUISD::FMED3: {
3792 for (unsigned I = 0; I < 3; ++I)
3793 Ops[I] = DAG.getNode(ISD::FNEG, SL, VT, N0->getOperand(I), N0->getFlags());
3795 SDValue Res = DAG.getNode(AMDGPUISD::FMED3, SL, VT, Ops, N0->getFlags());
3796 if (Res.getOpcode() != AMDGPUISD::FMED3)
3797 return SDValue(); // Op got folded away.
3798 if (!N0.hasOneUse())
3799 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3802 case ISD::FP_EXTEND:
3805 case ISD::FNEARBYINT: // XXX - Should fround be handled?
3807 case ISD::FCANONICALIZE:
3808 case AMDGPUISD::RCP:
3809 case AMDGPUISD::RCP_LEGACY:
3810 case AMDGPUISD::RCP_IFLAG:
3811 case AMDGPUISD::SIN_HW: {
3812 SDValue CvtSrc = N0.getOperand(0);
3813 if (CvtSrc.getOpcode() == ISD::FNEG) {
3814 // (fneg (fp_extend (fneg x))) -> (fp_extend x)
3815 // (fneg (rcp (fneg x))) -> (rcp x)
3816 return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0));
3819 if (!N0.hasOneUse())
3822 // (fneg (fp_extend x)) -> (fp_extend (fneg x))
3823 // (fneg (rcp x)) -> (rcp (fneg x))
3824 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3825 return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags());
3827 case ISD::FP_ROUND: {
3828 SDValue CvtSrc = N0.getOperand(0);
3830 if (CvtSrc.getOpcode() == ISD::FNEG) {
3831 // (fneg (fp_round (fneg x))) -> (fp_round x)
3832 return DAG.getNode(ISD::FP_ROUND, SL, VT,
3833 CvtSrc.getOperand(0), N0.getOperand(1));
3836 if (!N0.hasOneUse())
3839 // (fneg (fp_round x)) -> (fp_round (fneg x))
3840 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3841 return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1));
3843 case ISD::FP16_TO_FP: {
3844 // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal
3845 // f16, but legalization of f16 fneg ends up pulling it out of the source.
3846 // Put the fneg back as a legal source operation that can be matched later.
3849 SDValue Src = N0.getOperand(0);
3850 EVT SrcVT = Src.getValueType();
3852 // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000)
3853 SDValue IntFNeg = DAG.getNode(ISD::XOR, SL, SrcVT, Src,
3854 DAG.getConstant(0x8000, SL, SrcVT));
3855 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg);
3862 SDValue AMDGPUTargetLowering::performFAbsCombine(SDNode *N,
3863 DAGCombinerInfo &DCI) const {
3864 SelectionDAG &DAG = DCI.DAG;
3865 SDValue N0 = N->getOperand(0);
3867 if (!N0.hasOneUse())
3870 switch (N0.getOpcode()) {
3871 case ISD::FP16_TO_FP: {
3872 assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal");
3874 SDValue Src = N0.getOperand(0);
3875 EVT SrcVT = Src.getValueType();
3877 // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff)
3878 SDValue IntFAbs = DAG.getNode(ISD::AND, SL, SrcVT, Src,
3879 DAG.getConstant(0x7fff, SL, SrcVT));
3880 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs);
3887 SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N,
3888 DAGCombinerInfo &DCI) const {
3889 const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
3893 // XXX - Should this flush denormals?
3894 const APFloat &Val = CFP->getValueAPF();
3895 APFloat One(Val.getSemantics(), "1.0");
3896 return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0));
3899 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
3900 DAGCombinerInfo &DCI) const {
3901 SelectionDAG &DAG = DCI.DAG;
3904 switch(N->getOpcode()) {
3907 case ISD::BITCAST: {
3908 EVT DestVT = N->getValueType(0);
3910 // Push casts through vector builds. This helps avoid emitting a large
3911 // number of copies when materializing floating point vector constants.
3913 // vNt1 bitcast (vNt0 (build_vector t0:x, t0:y)) =>
3914 // vnt1 = build_vector (t1 (bitcast t0:x)), (t1 (bitcast t0:y))
3915 if (DestVT.isVector()) {
3916 SDValue Src = N->getOperand(0);
3917 if (Src.getOpcode() == ISD::BUILD_VECTOR) {
3918 EVT SrcVT = Src.getValueType();
3919 unsigned NElts = DestVT.getVectorNumElements();
3921 if (SrcVT.getVectorNumElements() == NElts) {
3922 EVT DestEltVT = DestVT.getVectorElementType();
3924 SmallVector<SDValue, 8> CastedElts;
3926 for (unsigned I = 0, E = SrcVT.getVectorNumElements(); I != E; ++I) {
3927 SDValue Elt = Src.getOperand(I);
3928 CastedElts.push_back(DAG.getNode(ISD::BITCAST, DL, DestEltVT, Elt));
3931 return DAG.getBuildVector(DestVT, SL, CastedElts);
3936 if (DestVT.getSizeInBits() != 64 && !DestVT.isVector())
3939 // Fold bitcasts of constants.
3941 // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
3942 // TODO: Generalize and move to DAGCombiner
3943 SDValue Src = N->getOperand(0);
3944 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
3945 if (Src.getValueType() == MVT::i64) {
3947 uint64_t CVal = C->getZExtValue();
3948 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
3949 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
3950 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
3951 return DAG.getNode(ISD::BITCAST, SL, DestVT, BV);
3955 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) {
3956 const APInt &Val = C->getValueAPF().bitcastToAPInt();
3958 uint64_t CVal = Val.getZExtValue();
3959 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
3960 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
3961 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
3963 return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec);
3969 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3972 return performShlCombine(N, DCI);
3975 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3978 return performSrlCombine(N, DCI);
3981 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3984 return performSraCombine(N, DCI);
3987 return performTruncateCombine(N, DCI);
3989 return performMulCombine(N, DCI);
3991 return performMulhsCombine(N, DCI);
3993 return performMulhuCombine(N, DCI);
3994 case AMDGPUISD::MUL_I24:
3995 case AMDGPUISD::MUL_U24:
3996 case AMDGPUISD::MULHI_I24:
3997 case AMDGPUISD::MULHI_U24: {
3998 if (SDValue V = simplifyI24(N, DCI))
4002 case AMDGPUISD::MUL_LOHI_I24:
4003 case AMDGPUISD::MUL_LOHI_U24:
4004 return performMulLoHi24Combine(N, DCI);
4006 return performSelectCombine(N, DCI);
4008 return performFNegCombine(N, DCI);
4010 return performFAbsCombine(N, DCI);
4011 case AMDGPUISD::BFE_I32:
4012 case AMDGPUISD::BFE_U32: {
4013 assert(!N->getValueType(0).isVector() &&
4014 "Vector handling of BFE not implemented");
4015 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
4019 uint32_t WidthVal = Width->getZExtValue() & 0x1f;
4021 return DAG.getConstant(0, DL, MVT::i32);
4023 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
4027 SDValue BitsFrom = N->getOperand(0);
4028 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
4030 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
4032 if (OffsetVal == 0) {
4033 // This is already sign / zero extended, so try to fold away extra BFEs.
4034 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
4036 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
4037 if (OpSignBits >= SignBits)
4040 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
4042 // This is a sign_extend_inreg. Replace it to take advantage of existing
4043 // DAG Combines. If not eliminated, we will match back to BFE during
4046 // TODO: The sext_inreg of extended types ends, although we can could
4047 // handle them in a single BFE.
4048 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
4049 DAG.getValueType(SmallVT));
4052 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
4055 if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) {
4057 return constantFoldBFE<int32_t>(DAG,
4058 CVal->getSExtValue(),
4064 return constantFoldBFE<uint32_t>(DAG,
4065 CVal->getZExtValue(),
4071 if ((OffsetVal + WidthVal) >= 32 &&
4072 !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) {
4073 SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
4074 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
4075 BitsFrom, ShiftVal);
4078 if (BitsFrom.hasOneUse()) {
4079 APInt Demanded = APInt::getBitsSet(32,
4081 OffsetVal + WidthVal);
4084 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
4085 !DCI.isBeforeLegalizeOps());
4086 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4087 if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) ||
4088 TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) {
4089 DCI.CommitTargetLoweringOpt(TLO);
4096 return performLoadCombine(N, DCI);
4098 return performStoreCombine(N, DCI);
4099 case AMDGPUISD::RCP:
4100 case AMDGPUISD::RCP_IFLAG:
4101 return performRcpCombine(N, DCI);
4102 case ISD::AssertZext:
4103 case ISD::AssertSext:
4104 return performAssertSZExtCombine(N, DCI);
4105 case ISD::INTRINSIC_WO_CHAIN:
4106 return performIntrinsicWOChainCombine(N, DCI);
4111 //===----------------------------------------------------------------------===//
4113 //===----------------------------------------------------------------------===//
4115 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
4116 const TargetRegisterClass *RC,
4117 Register Reg, EVT VT,
4119 bool RawReg) const {
4120 MachineFunction &MF = DAG.getMachineFunction();
4121 MachineRegisterInfo &MRI = MF.getRegInfo();
4124 if (!MRI.isLiveIn(Reg)) {
4125 VReg = MRI.createVirtualRegister(RC);
4126 MRI.addLiveIn(Reg, VReg);
4128 VReg = MRI.getLiveInVirtReg(Reg);
4132 return DAG.getRegister(VReg, VT);
4134 return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT);
4137 // This may be called multiple times, and nothing prevents creating multiple
4138 // objects at the same offset. See if we already defined this object.
4139 static int getOrCreateFixedStackObject(MachineFrameInfo &MFI, unsigned Size,
4141 for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) {
4142 if (MFI.getObjectOffset(I) == Offset) {
4143 assert(MFI.getObjectSize(I) == Size);
4148 return MFI.CreateFixedObject(Size, Offset, true);
4151 SDValue AMDGPUTargetLowering::loadStackInputValue(SelectionDAG &DAG,
4154 int64_t Offset) const {
4155 MachineFunction &MF = DAG.getMachineFunction();
4156 MachineFrameInfo &MFI = MF.getFrameInfo();
4157 int FI = getOrCreateFixedStackObject(MFI, VT.getStoreSize(), Offset);
4159 auto SrcPtrInfo = MachinePointerInfo::getStack(MF, Offset);
4160 SDValue Ptr = DAG.getFrameIndex(FI, MVT::i32);
4162 return DAG.getLoad(VT, SL, DAG.getEntryNode(), Ptr, SrcPtrInfo, 4,
4163 MachineMemOperand::MODereferenceable |
4164 MachineMemOperand::MOInvariant);
4167 SDValue AMDGPUTargetLowering::storeStackInputValue(SelectionDAG &DAG,
4171 int64_t Offset) const {
4172 MachineFunction &MF = DAG.getMachineFunction();
4173 MachinePointerInfo DstInfo = MachinePointerInfo::getStack(MF, Offset);
4175 SDValue Ptr = DAG.getConstant(Offset, SL, MVT::i32);
4176 SDValue Store = DAG.getStore(Chain, SL, ArgVal, Ptr, DstInfo, 4,
4177 MachineMemOperand::MODereferenceable);
4181 SDValue AMDGPUTargetLowering::loadInputValue(SelectionDAG &DAG,
4182 const TargetRegisterClass *RC,
4183 EVT VT, const SDLoc &SL,
4184 const ArgDescriptor &Arg) const {
4185 assert(Arg && "Attempting to load missing argument");
4187 SDValue V = Arg.isRegister() ?
4188 CreateLiveInRegister(DAG, RC, Arg.getRegister(), VT, SL) :
4189 loadStackInputValue(DAG, VT, SL, Arg.getStackOffset());
4191 if (!Arg.isMasked())
4194 unsigned Mask = Arg.getMask();
4195 unsigned Shift = countTrailingZeros<unsigned>(Mask);
4196 V = DAG.getNode(ISD::SRL, SL, VT, V,
4197 DAG.getShiftAmountConstant(Shift, VT, SL));
4198 return DAG.getNode(ISD::AND, SL, VT, V,
4199 DAG.getConstant(Mask >> Shift, SL, VT));
4202 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
4203 const MachineFunction &MF, const ImplicitParameter Param) const {
4204 const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
4205 const AMDGPUSubtarget &ST =
4206 AMDGPUSubtarget::get(getTargetMachine(), MF.getFunction());
4207 unsigned ExplicitArgOffset = ST.getExplicitKernelArgOffset(MF.getFunction());
4208 const Align Alignment = ST.getAlignmentForImplicitArgPtr();
4209 uint64_t ArgOffset = alignTo(MFI->getExplicitKernArgSize(), Alignment) +
4215 return ArgOffset + 4;
4217 llvm_unreachable("unexpected implicit parameter type");
4220 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
4222 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
4223 switch ((AMDGPUISD::NodeType)Opcode) {
4224 case AMDGPUISD::FIRST_NUMBER: break;
4226 NODE_NAME_CASE(UMUL);
4227 NODE_NAME_CASE(BRANCH_COND);
4231 NODE_NAME_CASE(ELSE)
4232 NODE_NAME_CASE(LOOP)
4233 NODE_NAME_CASE(CALL)
4234 NODE_NAME_CASE(TC_RETURN)
4235 NODE_NAME_CASE(TRAP)
4236 NODE_NAME_CASE(RET_FLAG)
4237 NODE_NAME_CASE(RETURN_TO_EPILOG)
4238 NODE_NAME_CASE(ENDPGM)
4239 NODE_NAME_CASE(DWORDADDR)
4240 NODE_NAME_CASE(FRACT)
4241 NODE_NAME_CASE(SETCC)
4242 NODE_NAME_CASE(SETREG)
4243 NODE_NAME_CASE(DENORM_MODE)
4244 NODE_NAME_CASE(FMA_W_CHAIN)
4245 NODE_NAME_CASE(FMUL_W_CHAIN)
4246 NODE_NAME_CASE(CLAMP)
4247 NODE_NAME_CASE(COS_HW)
4248 NODE_NAME_CASE(SIN_HW)
4249 NODE_NAME_CASE(FMAX_LEGACY)
4250 NODE_NAME_CASE(FMIN_LEGACY)
4251 NODE_NAME_CASE(FMAX3)
4252 NODE_NAME_CASE(SMAX3)
4253 NODE_NAME_CASE(UMAX3)
4254 NODE_NAME_CASE(FMIN3)
4255 NODE_NAME_CASE(SMIN3)
4256 NODE_NAME_CASE(UMIN3)
4257 NODE_NAME_CASE(FMED3)
4258 NODE_NAME_CASE(SMED3)
4259 NODE_NAME_CASE(UMED3)
4260 NODE_NAME_CASE(FDOT2)
4261 NODE_NAME_CASE(URECIP)
4262 NODE_NAME_CASE(DIV_SCALE)
4263 NODE_NAME_CASE(DIV_FMAS)
4264 NODE_NAME_CASE(DIV_FIXUP)
4265 NODE_NAME_CASE(FMAD_FTZ)
4268 NODE_NAME_CASE(RCP_LEGACY)
4269 NODE_NAME_CASE(RCP_IFLAG)
4270 NODE_NAME_CASE(FMUL_LEGACY)
4271 NODE_NAME_CASE(RSQ_CLAMP)
4272 NODE_NAME_CASE(LDEXP)
4273 NODE_NAME_CASE(FP_CLASS)
4274 NODE_NAME_CASE(DOT4)
4275 NODE_NAME_CASE(CARRY)
4276 NODE_NAME_CASE(BORROW)
4277 NODE_NAME_CASE(BFE_U32)
4278 NODE_NAME_CASE(BFE_I32)
4281 NODE_NAME_CASE(FFBH_U32)
4282 NODE_NAME_CASE(FFBH_I32)
4283 NODE_NAME_CASE(FFBL_B32)
4284 NODE_NAME_CASE(MUL_U24)
4285 NODE_NAME_CASE(MUL_I24)
4286 NODE_NAME_CASE(MULHI_U24)
4287 NODE_NAME_CASE(MULHI_I24)
4288 NODE_NAME_CASE(MUL_LOHI_U24)
4289 NODE_NAME_CASE(MUL_LOHI_I24)
4290 NODE_NAME_CASE(MAD_U24)
4291 NODE_NAME_CASE(MAD_I24)
4292 NODE_NAME_CASE(MAD_I64_I32)
4293 NODE_NAME_CASE(MAD_U64_U32)
4294 NODE_NAME_CASE(PERM)
4295 NODE_NAME_CASE(TEXTURE_FETCH)
4296 NODE_NAME_CASE(R600_EXPORT)
4297 NODE_NAME_CASE(CONST_ADDRESS)
4298 NODE_NAME_CASE(REGISTER_LOAD)
4299 NODE_NAME_CASE(REGISTER_STORE)
4300 NODE_NAME_CASE(SAMPLE)
4301 NODE_NAME_CASE(SAMPLEB)
4302 NODE_NAME_CASE(SAMPLED)
4303 NODE_NAME_CASE(SAMPLEL)
4304 NODE_NAME_CASE(CVT_F32_UBYTE0)
4305 NODE_NAME_CASE(CVT_F32_UBYTE1)
4306 NODE_NAME_CASE(CVT_F32_UBYTE2)
4307 NODE_NAME_CASE(CVT_F32_UBYTE3)
4308 NODE_NAME_CASE(CVT_PKRTZ_F16_F32)
4309 NODE_NAME_CASE(CVT_PKNORM_I16_F32)
4310 NODE_NAME_CASE(CVT_PKNORM_U16_F32)
4311 NODE_NAME_CASE(CVT_PK_I16_I32)
4312 NODE_NAME_CASE(CVT_PK_U16_U32)
4313 NODE_NAME_CASE(FP_TO_FP16)
4314 NODE_NAME_CASE(FP16_ZEXT)
4315 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
4316 NODE_NAME_CASE(CONST_DATA_PTR)
4317 NODE_NAME_CASE(PC_ADD_REL_OFFSET)
4319 NODE_NAME_CASE(DUMMY_CHAIN)
4320 case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
4321 NODE_NAME_CASE(LOAD_D16_HI)
4322 NODE_NAME_CASE(LOAD_D16_LO)
4323 NODE_NAME_CASE(LOAD_D16_HI_I8)
4324 NODE_NAME_CASE(LOAD_D16_HI_U8)
4325 NODE_NAME_CASE(LOAD_D16_LO_I8)
4326 NODE_NAME_CASE(LOAD_D16_LO_U8)
4327 NODE_NAME_CASE(STORE_MSKOR)
4328 NODE_NAME_CASE(LOAD_CONSTANT)
4329 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
4330 NODE_NAME_CASE(TBUFFER_STORE_FORMAT_D16)
4331 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT)
4332 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT_D16)
4333 NODE_NAME_CASE(DS_ORDERED_COUNT)
4334 NODE_NAME_CASE(ATOMIC_CMP_SWAP)
4335 NODE_NAME_CASE(ATOMIC_INC)
4336 NODE_NAME_CASE(ATOMIC_DEC)
4337 NODE_NAME_CASE(ATOMIC_LOAD_FMIN)
4338 NODE_NAME_CASE(ATOMIC_LOAD_FMAX)
4339 NODE_NAME_CASE(ATOMIC_LOAD_CSUB)
4340 NODE_NAME_CASE(BUFFER_LOAD)
4341 NODE_NAME_CASE(BUFFER_LOAD_UBYTE)
4342 NODE_NAME_CASE(BUFFER_LOAD_USHORT)
4343 NODE_NAME_CASE(BUFFER_LOAD_BYTE)
4344 NODE_NAME_CASE(BUFFER_LOAD_SHORT)
4345 NODE_NAME_CASE(BUFFER_LOAD_FORMAT)
4346 NODE_NAME_CASE(BUFFER_LOAD_FORMAT_D16)
4347 NODE_NAME_CASE(SBUFFER_LOAD)
4348 NODE_NAME_CASE(BUFFER_STORE)
4349 NODE_NAME_CASE(BUFFER_STORE_BYTE)
4350 NODE_NAME_CASE(BUFFER_STORE_SHORT)
4351 NODE_NAME_CASE(BUFFER_STORE_FORMAT)
4352 NODE_NAME_CASE(BUFFER_STORE_FORMAT_D16)
4353 NODE_NAME_CASE(BUFFER_ATOMIC_SWAP)
4354 NODE_NAME_CASE(BUFFER_ATOMIC_ADD)
4355 NODE_NAME_CASE(BUFFER_ATOMIC_SUB)
4356 NODE_NAME_CASE(BUFFER_ATOMIC_SMIN)
4357 NODE_NAME_CASE(BUFFER_ATOMIC_UMIN)
4358 NODE_NAME_CASE(BUFFER_ATOMIC_SMAX)
4359 NODE_NAME_CASE(BUFFER_ATOMIC_UMAX)
4360 NODE_NAME_CASE(BUFFER_ATOMIC_AND)
4361 NODE_NAME_CASE(BUFFER_ATOMIC_OR)
4362 NODE_NAME_CASE(BUFFER_ATOMIC_XOR)
4363 NODE_NAME_CASE(BUFFER_ATOMIC_INC)
4364 NODE_NAME_CASE(BUFFER_ATOMIC_DEC)
4365 NODE_NAME_CASE(BUFFER_ATOMIC_CMPSWAP)
4366 NODE_NAME_CASE(BUFFER_ATOMIC_CSUB)
4367 NODE_NAME_CASE(BUFFER_ATOMIC_FADD)
4368 NODE_NAME_CASE(BUFFER_ATOMIC_PK_FADD)
4369 NODE_NAME_CASE(ATOMIC_PK_FADD)
4371 case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
4376 SDValue AMDGPUTargetLowering::getSqrtEstimate(SDValue Operand,
4377 SelectionDAG &DAG, int Enabled,
4378 int &RefinementSteps,
4379 bool &UseOneConstNR,
4380 bool Reciprocal) const {
4381 EVT VT = Operand.getValueType();
4383 if (VT == MVT::f32) {
4384 RefinementSteps = 0;
4385 return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
4388 // TODO: There is also f64 rsq instruction, but the documentation is less
4389 // clear on its precision.
4394 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
4395 SelectionDAG &DAG, int Enabled,
4396 int &RefinementSteps) const {
4397 EVT VT = Operand.getValueType();
4399 if (VT == MVT::f32) {
4400 // Reciprocal, < 1 ulp error.
4402 // This reciprocal approximation converges to < 0.5 ulp error with one
4403 // newton rhapson performed with two fused multiple adds (FMAs).
4405 RefinementSteps = 0;
4406 return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
4409 // TODO: There is also f64 rcp instruction, but the documentation is less
4410 // clear on its precision.
4415 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
4416 const SDValue Op, KnownBits &Known,
4417 const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
4419 Known.resetAll(); // Don't know anything.
4421 unsigned Opc = Op.getOpcode();
4426 case AMDGPUISD::CARRY:
4427 case AMDGPUISD::BORROW: {
4428 Known.Zero = APInt::getHighBitsSet(32, 31);
4432 case AMDGPUISD::BFE_I32:
4433 case AMDGPUISD::BFE_U32: {
4434 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4438 uint32_t Width = CWidth->getZExtValue() & 0x1f;
4440 if (Opc == AMDGPUISD::BFE_U32)
4441 Known.Zero = APInt::getHighBitsSet(32, 32 - Width);
4445 case AMDGPUISD::FP_TO_FP16:
4446 case AMDGPUISD::FP16_ZEXT: {
4447 unsigned BitWidth = Known.getBitWidth();
4449 // High bits are zero.
4450 Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
4453 case AMDGPUISD::MUL_U24:
4454 case AMDGPUISD::MUL_I24: {
4455 KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
4456 KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
4457 unsigned TrailZ = LHSKnown.countMinTrailingZeros() +
4458 RHSKnown.countMinTrailingZeros();
4459 Known.Zero.setLowBits(std::min(TrailZ, 32u));
4460 // Skip extra check if all bits are known zeros.
4464 // Truncate to 24 bits.
4465 LHSKnown = LHSKnown.trunc(24);
4466 RHSKnown = RHSKnown.trunc(24);
4468 if (Opc == AMDGPUISD::MUL_I24) {
4469 unsigned LHSValBits = 24 - LHSKnown.countMinSignBits();
4470 unsigned RHSValBits = 24 - RHSKnown.countMinSignBits();
4471 unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u);
4472 if (MaxValBits >= 32)
4474 bool LHSNegative = LHSKnown.isNegative();
4475 bool LHSNonNegative = LHSKnown.isNonNegative();
4476 bool LHSPositive = LHSKnown.isStrictlyPositive();
4477 bool RHSNegative = RHSKnown.isNegative();
4478 bool RHSNonNegative = RHSKnown.isNonNegative();
4479 bool RHSPositive = RHSKnown.isStrictlyPositive();
4481 if ((LHSNonNegative && RHSNonNegative) || (LHSNegative && RHSNegative))
4482 Known.Zero.setHighBits(32 - MaxValBits);
4483 else if ((LHSNegative && RHSPositive) || (LHSPositive && RHSNegative))
4484 Known.One.setHighBits(32 - MaxValBits);
4486 unsigned LHSValBits = 24 - LHSKnown.countMinLeadingZeros();
4487 unsigned RHSValBits = 24 - RHSKnown.countMinLeadingZeros();
4488 unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u);
4489 if (MaxValBits >= 32)
4491 Known.Zero.setHighBits(32 - MaxValBits);
4495 case AMDGPUISD::PERM: {
4496 ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4500 KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
4501 KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
4502 unsigned Sel = CMask->getZExtValue();
4504 for (unsigned I = 0; I < 32; I += 8) {
4505 unsigned SelBits = Sel & 0xff;
4508 Known.One |= ((RHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
4509 Known.Zero |= ((RHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
4510 } else if (SelBits < 7) {
4511 SelBits = (SelBits & 3) * 8;
4512 Known.One |= ((LHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
4513 Known.Zero |= ((LHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
4514 } else if (SelBits == 0x0c) {
4515 Known.Zero |= 0xFFull << I;
4516 } else if (SelBits > 0x0c) {
4517 Known.One |= 0xFFull << I;
4523 case AMDGPUISD::BUFFER_LOAD_UBYTE: {
4524 Known.Zero.setHighBits(24);
4527 case AMDGPUISD::BUFFER_LOAD_USHORT: {
4528 Known.Zero.setHighBits(16);
4531 case AMDGPUISD::LDS: {
4532 auto GA = cast<GlobalAddressSDNode>(Op.getOperand(0).getNode());
4533 Align Alignment = GA->getGlobal()->getPointerAlignment(DAG.getDataLayout());
4535 Known.Zero.setHighBits(16);
4536 Known.Zero.setLowBits(Log2(Alignment));
4539 case ISD::INTRINSIC_WO_CHAIN: {
4540 unsigned IID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4542 case Intrinsic::amdgcn_mbcnt_lo:
4543 case Intrinsic::amdgcn_mbcnt_hi: {
4544 const GCNSubtarget &ST =
4545 DAG.getMachineFunction().getSubtarget<GCNSubtarget>();
4546 // These return at most the wavefront size - 1.
4547 unsigned Size = Op.getValueType().getSizeInBits();
4548 Known.Zero.setHighBits(Size - ST.getWavefrontSizeLog2());
4558 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
4559 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
4560 unsigned Depth) const {
4561 switch (Op.getOpcode()) {
4562 case AMDGPUISD::BFE_I32: {
4563 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4567 unsigned SignBits = 32 - Width->getZExtValue() + 1;
4568 if (!isNullConstant(Op.getOperand(1)))
4571 // TODO: Could probably figure something out with non-0 offsets.
4572 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4573 return std::max(SignBits, Op0SignBits);
4576 case AMDGPUISD::BFE_U32: {
4577 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4578 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
4581 case AMDGPUISD::CARRY:
4582 case AMDGPUISD::BORROW:
4584 case AMDGPUISD::BUFFER_LOAD_BYTE:
4586 case AMDGPUISD::BUFFER_LOAD_SHORT:
4588 case AMDGPUISD::BUFFER_LOAD_UBYTE:
4590 case AMDGPUISD::BUFFER_LOAD_USHORT:
4592 case AMDGPUISD::FP_TO_FP16:
4593 case AMDGPUISD::FP16_ZEXT:
4600 unsigned AMDGPUTargetLowering::computeNumSignBitsForTargetInstr(
4601 GISelKnownBits &Analysis, Register R,
4602 const APInt &DemandedElts, const MachineRegisterInfo &MRI,
4603 unsigned Depth) const {
4604 const MachineInstr *MI = MRI.getVRegDef(R);
4608 // TODO: Check range metadata on MMO.
4609 switch (MI->getOpcode()) {
4610 case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE:
4612 case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT:
4614 case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE:
4616 case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT:
4623 bool AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
4624 const SelectionDAG &DAG,
4626 unsigned Depth) const {
4627 unsigned Opcode = Op.getOpcode();
4629 case AMDGPUISD::FMIN_LEGACY:
4630 case AMDGPUISD::FMAX_LEGACY: {
4634 // TODO: Can check no nans on one of the operands for each one, but which
4638 case AMDGPUISD::FMUL_LEGACY:
4639 case AMDGPUISD::CVT_PKRTZ_F16_F32: {
4642 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4643 DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4645 case AMDGPUISD::FMED3:
4646 case AMDGPUISD::FMIN3:
4647 case AMDGPUISD::FMAX3:
4648 case AMDGPUISD::FMAD_FTZ: {
4651 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4652 DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4653 DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4655 case AMDGPUISD::CVT_F32_UBYTE0:
4656 case AMDGPUISD::CVT_F32_UBYTE1:
4657 case AMDGPUISD::CVT_F32_UBYTE2:
4658 case AMDGPUISD::CVT_F32_UBYTE3:
4661 case AMDGPUISD::RCP:
4662 case AMDGPUISD::RSQ:
4663 case AMDGPUISD::RCP_LEGACY:
4664 case AMDGPUISD::RSQ_CLAMP: {
4668 // TODO: Need is known positive check.
4671 case AMDGPUISD::LDEXP:
4672 case AMDGPUISD::FRACT: {
4675 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4677 case AMDGPUISD::DIV_SCALE:
4678 case AMDGPUISD::DIV_FMAS:
4679 case AMDGPUISD::DIV_FIXUP:
4680 // TODO: Refine on operands.
4682 case AMDGPUISD::SIN_HW:
4683 case AMDGPUISD::COS_HW: {
4684 // TODO: Need check for infinity
4687 case ISD::INTRINSIC_WO_CHAIN: {
4688 unsigned IntrinsicID
4689 = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4690 // TODO: Handle more intrinsics
4691 switch (IntrinsicID) {
4692 case Intrinsic::amdgcn_cubeid:
4695 case Intrinsic::amdgcn_frexp_mant: {
4698 return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4700 case Intrinsic::amdgcn_cvt_pkrtz: {
4703 return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4704 DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4706 case Intrinsic::amdgcn_rcp:
4707 case Intrinsic::amdgcn_rsq:
4708 case Intrinsic::amdgcn_rcp_legacy:
4709 case Intrinsic::amdgcn_rsq_legacy:
4710 case Intrinsic::amdgcn_rsq_clamp: {
4714 // TODO: Need is known positive check.
4717 case Intrinsic::amdgcn_trig_preop:
4718 case Intrinsic::amdgcn_fdot2:
4719 // TODO: Refine on operand
4730 TargetLowering::AtomicExpansionKind
4731 AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
4732 switch (RMW->getOperation()) {
4733 case AtomicRMWInst::Nand:
4734 case AtomicRMWInst::FAdd:
4735 case AtomicRMWInst::FSub:
4736 return AtomicExpansionKind::CmpXChg;
4738 return AtomicExpansionKind::None;