1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This is the parent TargetLowering class for hardware code gen
13 //===----------------------------------------------------------------------===//
15 #define AMDGPU_LOG2E_F 1.44269504088896340735992468100189214f
16 #define AMDGPU_LN2_F 0.693147180559945309417232121458176568f
17 #define AMDGPU_LN10_F 2.30258509299404568401799145468436421f
19 #include "AMDGPUISelLowering.h"
21 #include "AMDGPUCallLowering.h"
22 #include "AMDGPUFrameLowering.h"
23 #include "AMDGPURegisterInfo.h"
24 #include "AMDGPUSubtarget.h"
25 #include "AMDGPUTargetMachine.h"
26 #include "Utils/AMDGPUBaseInfo.h"
27 #include "R600MachineFunctionInfo.h"
28 #include "SIInstrInfo.h"
29 #include "SIMachineFunctionInfo.h"
30 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
31 #include "llvm/CodeGen/Analysis.h"
32 #include "llvm/CodeGen/CallingConvLower.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/SelectionDAG.h"
36 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/DiagnosticInfo.h"
39 #include "llvm/Support/KnownBits.h"
42 static bool allocateCCRegs(unsigned ValNo, MVT ValVT, MVT LocVT,
43 CCValAssign::LocInfo LocInfo,
44 ISD::ArgFlagsTy ArgFlags, CCState &State,
45 const TargetRegisterClass *RC,
47 ArrayRef<MCPhysReg> RegList = makeArrayRef(RC->begin(), NumRegs);
48 unsigned RegResult = State.AllocateReg(RegList);
49 if (RegResult == AMDGPU::NoRegister)
52 State.addLoc(CCValAssign::getReg(ValNo, ValVT, RegResult, LocVT, LocInfo));
56 static bool allocateSGPRTuple(unsigned ValNo, MVT ValVT, MVT LocVT,
57 CCValAssign::LocInfo LocInfo,
58 ISD::ArgFlagsTy ArgFlags, CCState &State) {
59 switch (LocVT.SimpleTy) {
66 // Up to SGPR0-SGPR105
67 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
68 &AMDGPU::SGPR_64RegClass, 53);
75 // Allocate up to VGPR31.
77 // TODO: Since there are no VGPR alignent requirements would it be better to
78 // split into individual scalar registers?
79 static bool allocateVGPRTuple(unsigned ValNo, MVT ValVT, MVT LocVT,
80 CCValAssign::LocInfo LocInfo,
81 ISD::ArgFlagsTy ArgFlags, CCState &State) {
82 switch (LocVT.SimpleTy) {
89 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
90 &AMDGPU::VReg_64RegClass, 31);
96 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
97 &AMDGPU::VReg_128RegClass, 29);
101 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
102 &AMDGPU::VReg_256RegClass, 25);
107 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
108 &AMDGPU::VReg_512RegClass, 17);
116 #include "AMDGPUGenCallingConv.inc"
118 // Find a larger type to do a load / store of a vector with.
119 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
120 unsigned StoreSize = VT.getStoreSizeInBits();
122 return EVT::getIntegerVT(Ctx, StoreSize);
124 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
125 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
128 unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) {
129 EVT VT = Op.getValueType();
130 KnownBits Known = DAG.computeKnownBits(Op);
131 return VT.getSizeInBits() - Known.countMinLeadingZeros();
134 unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) {
135 EVT VT = Op.getValueType();
137 // In order for this to be a signed 24-bit value, bit 23, must
139 return VT.getSizeInBits() - DAG.ComputeNumSignBits(Op);
142 AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
143 const AMDGPUSubtarget &STI)
144 : TargetLowering(TM), Subtarget(&STI) {
145 // Lower floating point store/load to integer store/load to reduce the number
146 // of patterns in tablegen.
147 setOperationAction(ISD::LOAD, MVT::f32, Promote);
148 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
150 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
151 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
153 setOperationAction(ISD::LOAD, MVT::v3f32, Promote);
154 AddPromotedToType(ISD::LOAD, MVT::v3f32, MVT::v3i32);
156 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
157 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
159 setOperationAction(ISD::LOAD, MVT::v5f32, Promote);
160 AddPromotedToType(ISD::LOAD, MVT::v5f32, MVT::v5i32);
162 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
163 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
165 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
166 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
168 setOperationAction(ISD::LOAD, MVT::v32f32, Promote);
169 AddPromotedToType(ISD::LOAD, MVT::v32f32, MVT::v32i32);
171 setOperationAction(ISD::LOAD, MVT::i64, Promote);
172 AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
174 setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
175 AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32);
177 setOperationAction(ISD::LOAD, MVT::f64, Promote);
178 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32);
180 setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
181 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32);
183 // There are no 64-bit extloads. These should be done as a 32-bit extload and
184 // an extension to 64-bit.
185 for (MVT VT : MVT::integer_valuetypes()) {
186 setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand);
187 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand);
188 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand);
191 for (MVT VT : MVT::integer_valuetypes()) {
195 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
196 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal);
197 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal);
198 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
200 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
201 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal);
202 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal);
203 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
205 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
206 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal);
207 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal);
208 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
211 for (MVT VT : MVT::integer_vector_valuetypes()) {
212 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand);
213 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand);
214 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand);
215 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand);
216 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand);
217 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand);
218 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand);
219 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand);
220 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand);
221 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand);
222 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand);
223 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand);
226 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
227 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
228 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
229 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
231 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
232 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
233 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
234 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand);
236 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
237 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
238 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
239 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
241 setOperationAction(ISD::STORE, MVT::f32, Promote);
242 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
244 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
245 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
247 setOperationAction(ISD::STORE, MVT::v3f32, Promote);
248 AddPromotedToType(ISD::STORE, MVT::v3f32, MVT::v3i32);
250 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
251 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
253 setOperationAction(ISD::STORE, MVT::v5f32, Promote);
254 AddPromotedToType(ISD::STORE, MVT::v5f32, MVT::v5i32);
256 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
257 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
259 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
260 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
262 setOperationAction(ISD::STORE, MVT::v32f32, Promote);
263 AddPromotedToType(ISD::STORE, MVT::v32f32, MVT::v32i32);
265 setOperationAction(ISD::STORE, MVT::i64, Promote);
266 AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
268 setOperationAction(ISD::STORE, MVT::v2i64, Promote);
269 AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32);
271 setOperationAction(ISD::STORE, MVT::f64, Promote);
272 AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32);
274 setOperationAction(ISD::STORE, MVT::v2f64, Promote);
275 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32);
277 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
278 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
279 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
280 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
282 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
283 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand);
284 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand);
285 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand);
287 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
288 setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
289 setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
290 setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
292 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
293 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
295 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
296 setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand);
298 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand);
299 setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand);
301 setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand);
302 setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand);
305 setOperationAction(ISD::Constant, MVT::i32, Legal);
306 setOperationAction(ISD::Constant, MVT::i64, Legal);
307 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
308 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
310 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
311 setOperationAction(ISD::BRIND, MVT::Other, Expand);
313 // This is totally unsupported, just custom lower to produce an error.
314 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
316 // Library functions. These default to Expand, but we have instructions
318 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
319 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
320 setOperationAction(ISD::FPOW, MVT::f32, Legal);
321 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
322 setOperationAction(ISD::FABS, MVT::f32, Legal);
323 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
324 setOperationAction(ISD::FRINT, MVT::f32, Legal);
325 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
326 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
327 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
329 setOperationAction(ISD::FROUND, MVT::f32, Custom);
330 setOperationAction(ISD::FROUND, MVT::f64, Custom);
332 setOperationAction(ISD::FLOG, MVT::f32, Custom);
333 setOperationAction(ISD::FLOG10, MVT::f32, Custom);
334 setOperationAction(ISD::FEXP, MVT::f32, Custom);
337 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
338 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
340 setOperationAction(ISD::FREM, MVT::f32, Custom);
341 setOperationAction(ISD::FREM, MVT::f64, Custom);
343 // Expand to fneg + fadd.
344 setOperationAction(ISD::FSUB, MVT::f64, Expand);
346 setOperationAction(ISD::CONCAT_VECTORS, MVT::v3i32, Custom);
347 setOperationAction(ISD::CONCAT_VECTORS, MVT::v3f32, Custom);
348 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
349 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
350 setOperationAction(ISD::CONCAT_VECTORS, MVT::v5i32, Custom);
351 setOperationAction(ISD::CONCAT_VECTORS, MVT::v5f32, Custom);
352 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
353 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
354 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
355 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
356 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3f32, Custom);
357 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3i32, Custom);
358 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
359 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
360 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v5f32, Custom);
361 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v5i32, Custom);
362 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
363 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
364 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16f32, Custom);
365 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16i32, Custom);
366 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32f32, Custom);
367 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32i32, Custom);
369 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
370 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Custom);
371 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Custom);
373 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
374 for (MVT VT : ScalarIntVTs) {
375 // These should use [SU]DIVREM, so set them to expand
376 setOperationAction(ISD::SDIV, VT, Expand);
377 setOperationAction(ISD::UDIV, VT, Expand);
378 setOperationAction(ISD::SREM, VT, Expand);
379 setOperationAction(ISD::UREM, VT, Expand);
381 // GPU does not have divrem function for signed or unsigned.
382 setOperationAction(ISD::SDIVREM, VT, Custom);
383 setOperationAction(ISD::UDIVREM, VT, Custom);
385 // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
386 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
387 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
389 setOperationAction(ISD::BSWAP, VT, Expand);
390 setOperationAction(ISD::CTTZ, VT, Expand);
391 setOperationAction(ISD::CTLZ, VT, Expand);
393 // AMDGPU uses ADDC/SUBC/ADDE/SUBE
394 setOperationAction(ISD::ADDC, VT, Legal);
395 setOperationAction(ISD::SUBC, VT, Legal);
396 setOperationAction(ISD::ADDE, VT, Legal);
397 setOperationAction(ISD::SUBE, VT, Legal);
400 // The hardware supports 32-bit ROTR, but not ROTL.
401 setOperationAction(ISD::ROTL, MVT::i32, Expand);
402 setOperationAction(ISD::ROTL, MVT::i64, Expand);
403 setOperationAction(ISD::ROTR, MVT::i64, Expand);
405 setOperationAction(ISD::MUL, MVT::i64, Expand);
406 setOperationAction(ISD::MULHU, MVT::i64, Expand);
407 setOperationAction(ISD::MULHS, MVT::i64, Expand);
408 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
409 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
410 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
411 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
412 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
414 setOperationAction(ISD::SMIN, MVT::i32, Legal);
415 setOperationAction(ISD::UMIN, MVT::i32, Legal);
416 setOperationAction(ISD::SMAX, MVT::i32, Legal);
417 setOperationAction(ISD::UMAX, MVT::i32, Legal);
419 setOperationAction(ISD::CTTZ, MVT::i64, Custom);
420 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom);
421 setOperationAction(ISD::CTLZ, MVT::i64, Custom);
422 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
424 static const MVT::SimpleValueType VectorIntTypes[] = {
425 MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32
428 for (MVT VT : VectorIntTypes) {
429 // Expand the following operations for the current type by default.
430 setOperationAction(ISD::ADD, VT, Expand);
431 setOperationAction(ISD::AND, VT, Expand);
432 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
433 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
434 setOperationAction(ISD::MUL, VT, Expand);
435 setOperationAction(ISD::MULHU, VT, Expand);
436 setOperationAction(ISD::MULHS, VT, Expand);
437 setOperationAction(ISD::OR, VT, Expand);
438 setOperationAction(ISD::SHL, VT, Expand);
439 setOperationAction(ISD::SRA, VT, Expand);
440 setOperationAction(ISD::SRL, VT, Expand);
441 setOperationAction(ISD::ROTL, VT, Expand);
442 setOperationAction(ISD::ROTR, VT, Expand);
443 setOperationAction(ISD::SUB, VT, Expand);
444 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
445 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
446 setOperationAction(ISD::SDIV, VT, Expand);
447 setOperationAction(ISD::UDIV, VT, Expand);
448 setOperationAction(ISD::SREM, VT, Expand);
449 setOperationAction(ISD::UREM, VT, Expand);
450 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
451 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
452 setOperationAction(ISD::SDIVREM, VT, Custom);
453 setOperationAction(ISD::UDIVREM, VT, Expand);
454 setOperationAction(ISD::SELECT, VT, Expand);
455 setOperationAction(ISD::VSELECT, VT, Expand);
456 setOperationAction(ISD::SELECT_CC, VT, Expand);
457 setOperationAction(ISD::XOR, VT, Expand);
458 setOperationAction(ISD::BSWAP, VT, Expand);
459 setOperationAction(ISD::CTPOP, VT, Expand);
460 setOperationAction(ISD::CTTZ, VT, Expand);
461 setOperationAction(ISD::CTLZ, VT, Expand);
462 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
463 setOperationAction(ISD::SETCC, VT, Expand);
466 static const MVT::SimpleValueType FloatVectorTypes[] = {
467 MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32
470 for (MVT VT : FloatVectorTypes) {
471 setOperationAction(ISD::FABS, VT, Expand);
472 setOperationAction(ISD::FMINNUM, VT, Expand);
473 setOperationAction(ISD::FMAXNUM, VT, Expand);
474 setOperationAction(ISD::FADD, VT, Expand);
475 setOperationAction(ISD::FCEIL, VT, Expand);
476 setOperationAction(ISD::FCOS, VT, Expand);
477 setOperationAction(ISD::FDIV, VT, Expand);
478 setOperationAction(ISD::FEXP2, VT, Expand);
479 setOperationAction(ISD::FEXP, VT, Expand);
480 setOperationAction(ISD::FLOG2, VT, Expand);
481 setOperationAction(ISD::FREM, VT, Expand);
482 setOperationAction(ISD::FLOG, VT, Expand);
483 setOperationAction(ISD::FLOG10, VT, Expand);
484 setOperationAction(ISD::FPOW, VT, Expand);
485 setOperationAction(ISD::FFLOOR, VT, Expand);
486 setOperationAction(ISD::FTRUNC, VT, Expand);
487 setOperationAction(ISD::FMUL, VT, Expand);
488 setOperationAction(ISD::FMA, VT, Expand);
489 setOperationAction(ISD::FRINT, VT, Expand);
490 setOperationAction(ISD::FNEARBYINT, VT, Expand);
491 setOperationAction(ISD::FSQRT, VT, Expand);
492 setOperationAction(ISD::FSIN, VT, Expand);
493 setOperationAction(ISD::FSUB, VT, Expand);
494 setOperationAction(ISD::FNEG, VT, Expand);
495 setOperationAction(ISD::VSELECT, VT, Expand);
496 setOperationAction(ISD::SELECT_CC, VT, Expand);
497 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
498 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
499 setOperationAction(ISD::SETCC, VT, Expand);
500 setOperationAction(ISD::FCANONICALIZE, VT, Expand);
503 // This causes using an unrolled select operation rather than expansion with
504 // bit operations. This is in general better, but the alternative using BFI
505 // instructions may be better if the select sources are SGPRs.
506 setOperationAction(ISD::SELECT, MVT::v2f32, Promote);
507 AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32);
509 setOperationAction(ISD::SELECT, MVT::v3f32, Promote);
510 AddPromotedToType(ISD::SELECT, MVT::v3f32, MVT::v3i32);
512 setOperationAction(ISD::SELECT, MVT::v4f32, Promote);
513 AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32);
515 setOperationAction(ISD::SELECT, MVT::v5f32, Promote);
516 AddPromotedToType(ISD::SELECT, MVT::v5f32, MVT::v5i32);
518 // There are no libcalls of any kind.
519 for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I)
520 setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr);
522 setBooleanContents(ZeroOrNegativeOneBooleanContent);
523 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
525 setSchedulingPreference(Sched::RegPressure);
526 setJumpIsExpensive(true);
528 // FIXME: This is only partially true. If we have to do vector compares, any
529 // SGPR pair can be a condition register. If we have a uniform condition, we
530 // are better off doing SALU operations, where there is only one SCC. For now,
531 // we don't have a way of knowing during instruction selection if a condition
532 // will be uniform and we always use vector compares. Assume we are using
533 // vector compares until that is fixed.
534 setHasMultipleConditionRegisters(true);
536 setMinCmpXchgSizeInBits(32);
537 setSupportsUnalignedAtomics(false);
539 PredictableSelectIsExpensive = false;
541 // We want to find all load dependencies for long chains of stores to enable
542 // merging into very wide vectors. The problem is with vectors with > 4
543 // elements. MergeConsecutiveStores will attempt to merge these because x8/x16
544 // vectors are a legal type, even though we have to split the loads
545 // usually. When we can more precisely specify load legality per address
546 // space, we should be able to make FindBetterChain/MergeConsecutiveStores
547 // smarter so that they can figure out what to do in 2 iterations without all
548 // N > 4 stores on the same chain.
549 GatherAllAliasesMaxDepth = 16;
551 // memcpy/memmove/memset are expanded in the IR, so we shouldn't need to worry
552 // about these during lowering.
553 MaxStoresPerMemcpy = 0xffffffff;
554 MaxStoresPerMemmove = 0xffffffff;
555 MaxStoresPerMemset = 0xffffffff;
557 setTargetDAGCombine(ISD::BITCAST);
558 setTargetDAGCombine(ISD::SHL);
559 setTargetDAGCombine(ISD::SRA);
560 setTargetDAGCombine(ISD::SRL);
561 setTargetDAGCombine(ISD::TRUNCATE);
562 setTargetDAGCombine(ISD::MUL);
563 setTargetDAGCombine(ISD::MULHU);
564 setTargetDAGCombine(ISD::MULHS);
565 setTargetDAGCombine(ISD::SELECT);
566 setTargetDAGCombine(ISD::SELECT_CC);
567 setTargetDAGCombine(ISD::STORE);
568 setTargetDAGCombine(ISD::FADD);
569 setTargetDAGCombine(ISD::FSUB);
570 setTargetDAGCombine(ISD::FNEG);
571 setTargetDAGCombine(ISD::FABS);
572 setTargetDAGCombine(ISD::AssertZext);
573 setTargetDAGCombine(ISD::AssertSext);
576 //===----------------------------------------------------------------------===//
577 // Target Information
578 //===----------------------------------------------------------------------===//
581 static bool fnegFoldsIntoOp(unsigned Opc) {
590 case ISD::FMINNUM_IEEE:
591 case ISD::FMAXNUM_IEEE:
595 case ISD::FNEARBYINT:
596 case ISD::FCANONICALIZE:
598 case AMDGPUISD::RCP_LEGACY:
599 case AMDGPUISD::RCP_IFLAG:
600 case AMDGPUISD::SIN_HW:
601 case AMDGPUISD::FMUL_LEGACY:
602 case AMDGPUISD::FMIN_LEGACY:
603 case AMDGPUISD::FMAX_LEGACY:
604 case AMDGPUISD::FMED3:
611 /// \p returns true if the operation will definitely need to use a 64-bit
612 /// encoding, and thus will use a VOP3 encoding regardless of the source
615 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) {
616 return N->getNumOperands() > 2 || VT == MVT::f64;
619 // Most FP instructions support source modifiers, but this could be refined
622 static bool hasSourceMods(const SDNode *N) {
623 if (isa<MemSDNode>(N))
626 switch (N->getOpcode()) {
632 case ISD::INLINEASM_BR:
633 case AMDGPUISD::INTERP_P1:
634 case AMDGPUISD::INTERP_P2:
635 case AMDGPUISD::DIV_SCALE:
637 // TODO: Should really be looking at the users of the bitcast. These are
638 // problematic because bitcasts are used to legalize all stores to integer
647 bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N,
648 unsigned CostThreshold) {
649 // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus
650 // it is truly free to use a source modifier in all cases. If there are
651 // multiple users but for each one will necessitate using VOP3, there will be
652 // a code size increase. Try to avoid increasing code size unless we know it
653 // will save on the instruction count.
654 unsigned NumMayIncreaseSize = 0;
655 MVT VT = N->getValueType(0).getScalarType().getSimpleVT();
657 // XXX - Should this limit number of uses to check?
658 for (const SDNode *U : N->uses()) {
659 if (!hasSourceMods(U))
662 if (!opMustUseVOP3Encoding(U, VT)) {
663 if (++NumMayIncreaseSize > CostThreshold)
671 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const {
675 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
679 // The backend supports 32 and 64 bit floating point immediates.
680 // FIXME: Why are we reporting vectors of FP immediates as legal?
681 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
682 bool ForCodeSize) const {
683 EVT ScalarVT = VT.getScalarType();
684 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 ||
685 (ScalarVT == MVT::f16 && Subtarget->has16BitInsts()));
688 // We don't want to shrink f64 / f32 constants.
689 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
690 EVT ScalarVT = VT.getScalarType();
691 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
694 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
695 ISD::LoadExtType ExtTy,
697 // TODO: This may be worth removing. Check regression tests for diffs.
698 if (!TargetLoweringBase::shouldReduceLoadWidth(N, ExtTy, NewVT))
701 unsigned NewSize = NewVT.getStoreSizeInBits();
703 // If we are reducing to a 32-bit load, this is always better.
707 EVT OldVT = N->getValueType(0);
708 unsigned OldSize = OldVT.getStoreSizeInBits();
710 MemSDNode *MN = cast<MemSDNode>(N);
711 unsigned AS = MN->getAddressSpace();
712 // Do not shrink an aligned scalar load to sub-dword.
713 // Scalar engine cannot do sub-dword loads.
714 if (OldSize >= 32 && NewSize < 32 && MN->getAlignment() >= 4 &&
715 (AS == AMDGPUAS::CONSTANT_ADDRESS ||
716 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
717 (isa<LoadSDNode>(N) &&
718 AS == AMDGPUAS::GLOBAL_ADDRESS && MN->isInvariant())) &&
719 AMDGPUInstrInfo::isUniformMMO(MN->getMemOperand()))
722 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
723 // extloads, so doing one requires using a buffer_load. In cases where we
724 // still couldn't use a scalar load, using the wider load shouldn't really
727 // If the old size already had to be an extload, there's no harm in continuing
728 // to reduce the width.
729 return (OldSize < 32);
732 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, EVT CastTy,
733 const SelectionDAG &DAG,
734 const MachineMemOperand &MMO) const {
736 assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits());
738 if (LoadTy.getScalarType() == MVT::i32)
741 unsigned LScalarSize = LoadTy.getScalarSizeInBits();
742 unsigned CastScalarSize = CastTy.getScalarSizeInBits();
744 if ((LScalarSize >= CastScalarSize) && (CastScalarSize < 32))
748 return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), CastTy,
752 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
753 // profitable with the expansion for 64-bit since it's generally good to
755 // FIXME: These should really have the size as a parameter.
756 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const {
760 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const {
764 bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode * N) const {
765 switch (N->getOpcode()) {
768 case ISD::EntryToken:
769 case ISD::TokenFactor:
771 case ISD::INTRINSIC_WO_CHAIN:
773 unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
777 case Intrinsic::amdgcn_readfirstlane:
778 case Intrinsic::amdgcn_readlane:
785 const LoadSDNode * L = dyn_cast<LoadSDNode>(N);
786 if (L->getMemOperand()->getAddrSpace()
787 == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
795 //===---------------------------------------------------------------------===//
797 //===---------------------------------------------------------------------===//
799 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
800 assert(VT.isFloatingPoint());
802 // Packed operations do not have a fabs modifier.
803 return VT == MVT::f32 || VT == MVT::f64 ||
804 (Subtarget->has16BitInsts() && VT == MVT::f16);
807 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
808 assert(VT.isFloatingPoint());
809 return VT == MVT::f32 || VT == MVT::f64 ||
810 (Subtarget->has16BitInsts() && VT == MVT::f16) ||
811 (Subtarget->hasVOP3PInsts() && VT == MVT::v2f16);
814 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT,
820 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const {
821 // There are few operations which truly have vector input operands. Any vector
822 // operation is going to involve operations on each component, and a
823 // build_vector will be a copy per element, so it always makes sense to use a
824 // build_vector input in place of the extracted element to avoid a copy into a
827 // We should probably only do this if all users are extracts only, but this
828 // should be the common case.
832 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
833 // Truncate is just accessing a subregister.
835 unsigned SrcSize = Source.getSizeInBits();
836 unsigned DestSize = Dest.getSizeInBits();
838 return DestSize < SrcSize && DestSize % 32 == 0 ;
841 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
842 // Truncate is just accessing a subregister.
844 unsigned SrcSize = Source->getScalarSizeInBits();
845 unsigned DestSize = Dest->getScalarSizeInBits();
847 if (DestSize== 16 && Subtarget->has16BitInsts())
848 return SrcSize >= 32;
850 return DestSize < SrcSize && DestSize % 32 == 0;
853 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
854 unsigned SrcSize = Src->getScalarSizeInBits();
855 unsigned DestSize = Dest->getScalarSizeInBits();
857 if (SrcSize == 16 && Subtarget->has16BitInsts())
858 return DestSize >= 32;
860 return SrcSize == 32 && DestSize == 64;
863 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
864 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
865 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
866 // this will enable reducing 64-bit operations the 32-bit, which is always
870 return Dest == MVT::i32 ||Dest == MVT::i64 ;
872 return Src == MVT::i32 && Dest == MVT::i64;
875 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
876 return isZExtFree(Val.getValueType(), VT2);
879 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
880 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
881 // limited number of native 64-bit operations. Shrinking an operation to fit
882 // in a single 32-bit register should always be helpful. As currently used,
883 // this is much less general than the name suggests, and is only used in
884 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
885 // not profitable, and may actually be harmful.
886 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
889 //===---------------------------------------------------------------------===//
890 // TargetLowering Callbacks
891 //===---------------------------------------------------------------------===//
893 CCAssignFn *AMDGPUCallLowering::CCAssignFnForCall(CallingConv::ID CC,
896 case CallingConv::AMDGPU_VS:
897 case CallingConv::AMDGPU_GS:
898 case CallingConv::AMDGPU_PS:
899 case CallingConv::AMDGPU_CS:
900 case CallingConv::AMDGPU_HS:
901 case CallingConv::AMDGPU_ES:
902 case CallingConv::AMDGPU_LS:
905 case CallingConv::Fast:
906 case CallingConv::Cold:
907 return CC_AMDGPU_Func;
908 case CallingConv::AMDGPU_KERNEL:
909 case CallingConv::SPIR_KERNEL:
911 report_fatal_error("Unsupported calling convention for call");
915 CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC,
918 case CallingConv::AMDGPU_KERNEL:
919 case CallingConv::SPIR_KERNEL:
920 llvm_unreachable("kernels should not be handled here");
921 case CallingConv::AMDGPU_VS:
922 case CallingConv::AMDGPU_GS:
923 case CallingConv::AMDGPU_PS:
924 case CallingConv::AMDGPU_CS:
925 case CallingConv::AMDGPU_HS:
926 case CallingConv::AMDGPU_ES:
927 case CallingConv::AMDGPU_LS:
928 return RetCC_SI_Shader;
930 case CallingConv::Fast:
931 case CallingConv::Cold:
932 return RetCC_AMDGPU_Func;
934 report_fatal_error("Unsupported calling convention.");
938 /// The SelectionDAGBuilder will automatically promote function arguments
939 /// with illegal types. However, this does not work for the AMDGPU targets
940 /// since the function arguments are stored in memory as these illegal types.
941 /// In order to handle this properly we need to get the original types sizes
942 /// from the LLVM IR Function and fixup the ISD:InputArg values before
943 /// passing them to AnalyzeFormalArguments()
945 /// When the SelectionDAGBuilder computes the Ins, it takes care of splitting
946 /// input values across multiple registers. Each item in the Ins array
947 /// represents a single value that will be stored in registers. Ins[x].VT is
948 /// the value type of the value that will be stored in the register, so
949 /// whatever SDNode we lower the argument to needs to be this type.
951 /// In order to correctly lower the arguments we need to know the size of each
952 /// argument. Since Ins[x].VT gives us the size of the register that will
953 /// hold the value, we need to look at Ins[x].ArgVT to see the 'real' type
954 /// for the orignal function argument so that we can deduce the correct memory
955 /// type to use for Ins[x]. In most cases the correct memory type will be
956 /// Ins[x].ArgVT. However, this will not always be the case. If, for example,
957 /// we have a kernel argument of type v8i8, this argument will be split into
958 /// 8 parts and each part will be represented by its own item in the Ins array.
959 /// For each part the Ins[x].ArgVT will be the v8i8, which is the full type of
960 /// the argument before it was split. From this, we deduce that the memory type
961 /// for each individual part is i8. We pass the memory type as LocVT to the
962 /// calling convention analysis function and the register type (Ins[x].VT) as
964 void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(
966 const SmallVectorImpl<ISD::InputArg> &Ins) const {
967 const MachineFunction &MF = State.getMachineFunction();
968 const Function &Fn = MF.getFunction();
969 LLVMContext &Ctx = Fn.getParent()->getContext();
970 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
971 const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset(Fn);
972 CallingConv::ID CC = Fn.getCallingConv();
974 unsigned MaxAlign = 1;
975 uint64_t ExplicitArgOffset = 0;
976 const DataLayout &DL = Fn.getParent()->getDataLayout();
978 unsigned InIndex = 0;
980 for (const Argument &Arg : Fn.args()) {
981 Type *BaseArgTy = Arg.getType();
982 unsigned Align = DL.getABITypeAlignment(BaseArgTy);
983 MaxAlign = std::max(Align, MaxAlign);
984 unsigned AllocSize = DL.getTypeAllocSize(BaseArgTy);
986 uint64_t ArgOffset = alignTo(ExplicitArgOffset, Align) + ExplicitOffset;
987 ExplicitArgOffset = alignTo(ExplicitArgOffset, Align) + AllocSize;
989 // We're basically throwing away everything passed into us and starting over
990 // to get accurate in-memory offsets. The "PartOffset" is completely useless
991 // to us as computed in Ins.
993 // We also need to figure out what type legalization is trying to do to get
994 // the correct memory offsets.
996 SmallVector<EVT, 16> ValueVTs;
997 SmallVector<uint64_t, 16> Offsets;
998 ComputeValueVTs(*this, DL, BaseArgTy, ValueVTs, &Offsets, ArgOffset);
1000 for (unsigned Value = 0, NumValues = ValueVTs.size();
1001 Value != NumValues; ++Value) {
1002 uint64_t BasePartOffset = Offsets[Value];
1004 EVT ArgVT = ValueVTs[Value];
1006 MVT RegisterVT = getRegisterTypeForCallingConv(Ctx, CC, ArgVT);
1007 unsigned NumRegs = getNumRegistersForCallingConv(Ctx, CC, ArgVT);
1010 // This argument is not split, so the IR type is the memory type.
1011 if (ArgVT.isExtended()) {
1012 // We have an extended type, like i24, so we should just use the
1018 } else if (ArgVT.isVector() && RegisterVT.isVector() &&
1019 ArgVT.getScalarType() == RegisterVT.getScalarType()) {
1020 assert(ArgVT.getVectorNumElements() > RegisterVT.getVectorNumElements());
1021 // We have a vector value which has been split into a vector with
1022 // the same scalar type, but fewer elements. This should handle
1023 // all the floating-point vector types.
1025 } else if (ArgVT.isVector() &&
1026 ArgVT.getVectorNumElements() == NumRegs) {
1027 // This arg has been split so that each element is stored in a separate
1029 MemVT = ArgVT.getScalarType();
1030 } else if (ArgVT.isExtended()) {
1031 // We have an extended type, like i65.
1034 unsigned MemoryBits = ArgVT.getStoreSizeInBits() / NumRegs;
1035 assert(ArgVT.getStoreSizeInBits() % NumRegs == 0);
1036 if (RegisterVT.isInteger()) {
1037 MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits);
1038 } else if (RegisterVT.isVector()) {
1039 assert(!RegisterVT.getScalarType().isFloatingPoint());
1040 unsigned NumElements = RegisterVT.getVectorNumElements();
1041 assert(MemoryBits % NumElements == 0);
1042 // This vector type has been split into another vector type with
1043 // a different elements size.
1044 EVT ScalarVT = EVT::getIntegerVT(State.getContext(),
1045 MemoryBits / NumElements);
1046 MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements);
1048 llvm_unreachable("cannot deduce memory type.");
1052 // Convert one element vectors to scalar.
1053 if (MemVT.isVector() && MemVT.getVectorNumElements() == 1)
1054 MemVT = MemVT.getScalarType();
1056 // Round up vec3/vec5 argument.
1057 if (MemVT.isVector() && !MemVT.isPow2VectorType()) {
1058 assert(MemVT.getVectorNumElements() == 3 ||
1059 MemVT.getVectorNumElements() == 5);
1060 MemVT = MemVT.getPow2VectorType(State.getContext());
1063 unsigned PartOffset = 0;
1064 for (unsigned i = 0; i != NumRegs; ++i) {
1065 State.addLoc(CCValAssign::getCustomMem(InIndex++, RegisterVT,
1066 BasePartOffset + PartOffset,
1067 MemVT.getSimpleVT(),
1068 CCValAssign::Full));
1069 PartOffset += MemVT.getStoreSize();
1075 SDValue AMDGPUTargetLowering::LowerReturn(
1076 SDValue Chain, CallingConv::ID CallConv,
1078 const SmallVectorImpl<ISD::OutputArg> &Outs,
1079 const SmallVectorImpl<SDValue> &OutVals,
1080 const SDLoc &DL, SelectionDAG &DAG) const {
1081 // FIXME: Fails for r600 tests
1082 //assert(!isVarArg && Outs.empty() && OutVals.empty() &&
1083 // "wave terminate should not have return values");
1084 return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain);
1087 //===---------------------------------------------------------------------===//
1088 // Target specific lowering
1089 //===---------------------------------------------------------------------===//
1091 /// Selects the correct CCAssignFn for a given CallingConvention value.
1092 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1094 return AMDGPUCallLowering::CCAssignFnForCall(CC, IsVarArg);
1097 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1099 return AMDGPUCallLowering::CCAssignFnForReturn(CC, IsVarArg);
1102 SDValue AMDGPUTargetLowering::addTokenForArgument(SDValue Chain,
1104 MachineFrameInfo &MFI,
1105 int ClobberedFI) const {
1106 SmallVector<SDValue, 8> ArgChains;
1107 int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
1108 int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
1110 // Include the original chain at the beginning of the list. When this is
1111 // used by target LowerCall hooks, this helps legalize find the
1112 // CALLSEQ_BEGIN node.
1113 ArgChains.push_back(Chain);
1115 // Add a chain value for each stack argument corresponding
1116 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(),
1117 UE = DAG.getEntryNode().getNode()->use_end();
1119 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) {
1120 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) {
1121 if (FI->getIndex() < 0) {
1122 int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
1123 int64_t InLastByte = InFirstByte;
1124 InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
1126 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1127 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1128 ArgChains.push_back(SDValue(L, 1));
1134 // Build a tokenfactor for all the chains.
1135 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
1138 SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI,
1139 SmallVectorImpl<SDValue> &InVals,
1140 StringRef Reason) const {
1141 SDValue Callee = CLI.Callee;
1142 SelectionDAG &DAG = CLI.DAG;
1144 const Function &Fn = DAG.getMachineFunction().getFunction();
1146 StringRef FuncName("<unknown>");
1148 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
1149 FuncName = G->getSymbol();
1150 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1151 FuncName = G->getGlobal()->getName();
1153 DiagnosticInfoUnsupported NoCalls(
1154 Fn, Reason + FuncName, CLI.DL.getDebugLoc());
1155 DAG.getContext()->diagnose(NoCalls);
1157 if (!CLI.IsTailCall) {
1158 for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
1159 InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
1162 return DAG.getEntryNode();
1165 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
1166 SmallVectorImpl<SDValue> &InVals) const {
1167 return lowerUnhandledCall(CLI, InVals, "unsupported call to function ");
1170 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
1171 SelectionDAG &DAG) const {
1172 const Function &Fn = DAG.getMachineFunction().getFunction();
1174 DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
1175 SDLoc(Op).getDebugLoc());
1176 DAG.getContext()->diagnose(NoDynamicAlloca);
1177 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
1178 return DAG.getMergeValues(Ops, SDLoc());
1181 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
1182 SelectionDAG &DAG) const {
1183 switch (Op.getOpcode()) {
1185 Op->print(errs(), &DAG);
1186 llvm_unreachable("Custom lowering code for this"
1187 "instruction is not implemented yet!");
1189 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
1190 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
1191 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
1192 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
1193 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
1194 case ISD::FREM: return LowerFREM(Op, DAG);
1195 case ISD::FCEIL: return LowerFCEIL(Op, DAG);
1196 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
1197 case ISD::FRINT: return LowerFRINT(Op, DAG);
1198 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
1199 case ISD::FROUND: return LowerFROUND(Op, DAG);
1200 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
1202 return LowerFLOG(Op, DAG, 1 / AMDGPU_LOG2E_F);
1204 return LowerFLOG(Op, DAG, AMDGPU_LN2_F / AMDGPU_LN10_F);
1206 return lowerFEXP(Op, DAG);
1207 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
1208 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
1209 case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
1210 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
1211 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
1213 case ISD::CTTZ_ZERO_UNDEF:
1215 case ISD::CTLZ_ZERO_UNDEF:
1216 return LowerCTLZ_CTTZ(Op, DAG);
1217 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
1222 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
1223 SmallVectorImpl<SDValue> &Results,
1224 SelectionDAG &DAG) const {
1225 switch (N->getOpcode()) {
1226 case ISD::SIGN_EXTEND_INREG:
1227 // Different parts of legalization seem to interpret which type of
1228 // sign_extend_inreg is the one to check for custom lowering. The extended
1229 // from type is what really matters, but some places check for custom
1230 // lowering of the result type. This results in trying to use
1231 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
1232 // nothing here and let the illegal result integer be handled normally.
1239 static bool hasDefinedInitializer(const GlobalValue *GV) {
1240 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
1241 if (!GVar || !GVar->hasInitializer())
1244 return !isa<UndefValue>(GVar->getInitializer());
1247 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
1249 SelectionDAG &DAG) const {
1251 const DataLayout &DL = DAG.getDataLayout();
1252 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
1253 const GlobalValue *GV = G->getGlobal();
1255 if (G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1256 G->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) {
1257 if (!MFI->isEntryFunction()) {
1258 const Function &Fn = DAG.getMachineFunction().getFunction();
1259 DiagnosticInfoUnsupported BadLDSDecl(
1260 Fn, "local memory global used by non-kernel function", SDLoc(Op).getDebugLoc());
1261 DAG.getContext()->diagnose(BadLDSDecl);
1264 // XXX: What does the value of G->getOffset() mean?
1265 assert(G->getOffset() == 0 &&
1266 "Do not know what to do with an non-zero offset");
1268 // TODO: We could emit code to handle the initialization somewhere.
1269 if (!hasDefinedInitializer(GV)) {
1270 unsigned Offset = MFI->allocateLDSGlobal(DL, *GV);
1271 return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType());
1275 const Function &Fn = DAG.getMachineFunction().getFunction();
1276 DiagnosticInfoUnsupported BadInit(
1277 Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc());
1278 DAG.getContext()->diagnose(BadInit);
1282 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
1283 SelectionDAG &DAG) const {
1284 SmallVector<SDValue, 8> Args;
1286 EVT VT = Op.getValueType();
1287 if (VT == MVT::v4i16 || VT == MVT::v4f16) {
1289 SDValue Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(0));
1290 SDValue Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(1));
1292 SDValue BV = DAG.getBuildVector(MVT::v2i32, SL, { Lo, Hi });
1293 return DAG.getNode(ISD::BITCAST, SL, VT, BV);
1296 for (const SDUse &U : Op->ops())
1297 DAG.ExtractVectorElements(U.get(), Args);
1299 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1302 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
1303 SelectionDAG &DAG) const {
1305 SmallVector<SDValue, 8> Args;
1306 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1307 EVT VT = Op.getValueType();
1308 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
1309 VT.getVectorNumElements());
1311 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1314 /// Generate Min/Max node
1315 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT,
1316 SDValue LHS, SDValue RHS,
1317 SDValue True, SDValue False,
1319 DAGCombinerInfo &DCI) const {
1320 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
1323 SelectionDAG &DAG = DCI.DAG;
1324 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
1333 case ISD::SETFALSE2:
1342 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1343 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1349 // Ordered. Assume ordered for undefined.
1351 // Only do this after legalization to avoid interfering with other combines
1352 // which might occur.
1353 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1354 !DCI.isCalledByLegalizer())
1357 // We need to permute the operands to get the correct NaN behavior. The
1358 // selected operand is the second one based on the failing compare with NaN,
1359 // so permute it based on the compare type the hardware uses.
1361 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1362 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1367 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1368 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1374 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1375 !DCI.isCalledByLegalizer())
1379 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1380 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1382 case ISD::SETCC_INVALID:
1383 llvm_unreachable("Invalid setcc condcode!");
1388 std::pair<SDValue, SDValue>
1389 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const {
1392 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1394 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1395 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1397 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1398 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1400 return std::make_pair(Lo, Hi);
1403 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const {
1406 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1407 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1408 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1411 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const {
1414 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1415 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1416 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1419 // Split a vector type into two parts. The first part is a power of two vector.
1420 // The second part is whatever is left over, and is a scalar if it would
1421 // otherwise be a 1-vector.
1423 AMDGPUTargetLowering::getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const {
1425 EVT EltVT = VT.getVectorElementType();
1426 unsigned NumElts = VT.getVectorNumElements();
1427 unsigned LoNumElts = PowerOf2Ceil((NumElts + 1) / 2);
1428 LoVT = EVT::getVectorVT(*DAG.getContext(), EltVT, LoNumElts);
1429 HiVT = NumElts - LoNumElts == 1
1431 : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts - LoNumElts);
1432 return std::make_pair(LoVT, HiVT);
1435 // Split a vector value into two parts of types LoVT and HiVT. HiVT could be
1437 std::pair<SDValue, SDValue>
1438 AMDGPUTargetLowering::splitVector(const SDValue &N, const SDLoc &DL,
1439 const EVT &LoVT, const EVT &HiVT,
1440 SelectionDAG &DAG) const {
1441 assert(LoVT.getVectorNumElements() +
1442 (HiVT.isVector() ? HiVT.getVectorNumElements() : 1) <=
1443 N.getValueType().getVectorNumElements() &&
1444 "More vector elements requested than available!");
1445 auto IdxTy = getVectorIdxTy(DAG.getDataLayout());
1446 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
1447 DAG.getConstant(0, DL, IdxTy));
1448 SDValue Hi = DAG.getNode(
1449 HiVT.isVector() ? ISD::EXTRACT_SUBVECTOR : ISD::EXTRACT_VECTOR_ELT, DL,
1450 HiVT, N, DAG.getConstant(LoVT.getVectorNumElements(), DL, IdxTy));
1451 return std::make_pair(Lo, Hi);
1454 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
1455 SelectionDAG &DAG) const {
1456 LoadSDNode *Load = cast<LoadSDNode>(Op);
1457 EVT VT = Op.getValueType();
1460 // If this is a 2 element vector, we really want to scalarize and not create
1461 // weird 1 element vectors.
1462 if (VT.getVectorNumElements() == 2)
1463 return scalarizeVectorLoad(Load, DAG);
1465 SDValue BasePtr = Load->getBasePtr();
1466 EVT MemVT = Load->getMemoryVT();
1469 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1472 EVT LoMemVT, HiMemVT;
1475 std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1476 std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1477 std::tie(Lo, Hi) = splitVector(Op, SL, LoVT, HiVT, DAG);
1479 unsigned Size = LoMemVT.getStoreSize();
1480 unsigned BaseAlign = Load->getAlignment();
1481 unsigned HiAlign = MinAlign(BaseAlign, Size);
1483 SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1484 Load->getChain(), BasePtr, SrcValue, LoMemVT,
1485 BaseAlign, Load->getMemOperand()->getFlags());
1486 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, Size);
1488 DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(),
1489 HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1490 HiMemVT, HiAlign, Load->getMemOperand()->getFlags());
1492 auto IdxTy = getVectorIdxTy(DAG.getDataLayout());
1495 // This is the case that the vector is power of two so was evenly split.
1496 Join = DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad);
1498 Join = DAG.getNode(ISD::INSERT_SUBVECTOR, SL, VT, DAG.getUNDEF(VT), LoLoad,
1499 DAG.getConstant(0, SL, IdxTy));
1500 Join = DAG.getNode(HiVT.isVector() ? ISD::INSERT_SUBVECTOR
1501 : ISD::INSERT_VECTOR_ELT,
1502 SL, VT, Join, HiLoad,
1503 DAG.getConstant(LoVT.getVectorNumElements(), SL, IdxTy));
1506 SDValue Ops[] = {Join, DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
1507 LoLoad.getValue(1), HiLoad.getValue(1))};
1509 return DAG.getMergeValues(Ops, SL);
1512 // Widen a vector load from vec3 to vec4.
1513 SDValue AMDGPUTargetLowering::WidenVectorLoad(SDValue Op,
1514 SelectionDAG &DAG) const {
1515 LoadSDNode *Load = cast<LoadSDNode>(Op);
1516 EVT VT = Op.getValueType();
1517 assert(VT.getVectorNumElements() == 3);
1518 SDValue BasePtr = Load->getBasePtr();
1519 EVT MemVT = Load->getMemoryVT();
1521 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1522 unsigned BaseAlign = Load->getAlignment();
1525 EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4);
1527 EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), 4);
1528 SDValue WideLoad = DAG.getExtLoad(
1529 Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue,
1530 WideMemVT, BaseAlign, Load->getMemOperand()->getFlags());
1531 return DAG.getMergeValues(
1532 {DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, VT, WideLoad,
1533 DAG.getConstant(0, SL, getVectorIdxTy(DAG.getDataLayout()))),
1534 WideLoad.getValue(1)},
1538 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1539 SelectionDAG &DAG) const {
1540 StoreSDNode *Store = cast<StoreSDNode>(Op);
1541 SDValue Val = Store->getValue();
1542 EVT VT = Val.getValueType();
1544 // If this is a 2 element vector, we really want to scalarize and not create
1545 // weird 1 element vectors.
1546 if (VT.getVectorNumElements() == 2)
1547 return scalarizeVectorStore(Store, DAG);
1549 EVT MemVT = Store->getMemoryVT();
1550 SDValue Chain = Store->getChain();
1551 SDValue BasePtr = Store->getBasePtr();
1555 EVT LoMemVT, HiMemVT;
1558 std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1559 std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1560 std::tie(Lo, Hi) = splitVector(Val, SL, LoVT, HiVT, DAG);
1562 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, LoMemVT.getStoreSize());
1564 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
1565 unsigned BaseAlign = Store->getAlignment();
1566 unsigned Size = LoMemVT.getStoreSize();
1567 unsigned HiAlign = MinAlign(BaseAlign, Size);
1570 DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign,
1571 Store->getMemOperand()->getFlags());
1573 DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size),
1574 HiMemVT, HiAlign, Store->getMemOperand()->getFlags());
1576 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1579 // This is a shortcut for integer division because we have fast i32<->f32
1580 // conversions, and fast f32 reciprocal instructions. The fractional part of a
1581 // float is enough to accurately represent up to a 24-bit signed integer.
1582 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG,
1585 EVT VT = Op.getValueType();
1586 SDValue LHS = Op.getOperand(0);
1587 SDValue RHS = Op.getOperand(1);
1588 MVT IntVT = MVT::i32;
1589 MVT FltVT = MVT::f32;
1591 unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS);
1592 if (LHSSignBits < 9)
1595 unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS);
1596 if (RHSSignBits < 9)
1599 unsigned BitSize = VT.getSizeInBits();
1600 unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1601 unsigned DivBits = BitSize - SignBits;
1605 ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
1606 ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
1608 SDValue jq = DAG.getConstant(1, DL, IntVT);
1611 // char|short jq = ia ^ ib;
1612 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
1614 // jq = jq >> (bitsize - 2)
1615 jq = DAG.getNode(ISD::SRA, DL, VT, jq,
1616 DAG.getConstant(BitSize - 2, DL, VT));
1619 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
1622 // int ia = (int)LHS;
1625 // int ib, (int)RHS;
1628 // float fa = (float)ia;
1629 SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
1631 // float fb = (float)ib;
1632 SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
1634 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1635 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
1638 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
1640 // float fqneg = -fq;
1641 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
1643 // float fr = mad(fqneg, fb, fa);
1644 unsigned OpCode = Subtarget->hasFP32Denormals() ?
1645 (unsigned)AMDGPUISD::FMAD_FTZ :
1646 (unsigned)ISD::FMAD;
1647 SDValue fr = DAG.getNode(OpCode, DL, FltVT, fqneg, fb, fa);
1649 // int iq = (int)fq;
1650 SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
1653 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
1656 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1658 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1660 // int cv = fr >= fb;
1661 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1663 // jq = (cv ? jq : 0);
1664 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
1667 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1669 // Rem needs compensation, it's easier to recompute it
1670 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
1671 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
1673 // Truncate to number of bits this divide really is.
1676 = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits));
1677 Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize);
1678 Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize);
1680 SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT);
1681 Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask);
1682 Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask);
1685 return DAG.getMergeValues({ Div, Rem }, DL);
1688 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
1690 SmallVectorImpl<SDValue> &Results) const {
1692 EVT VT = Op.getValueType();
1694 assert(VT == MVT::i64 && "LowerUDIVREM64 expects an i64");
1696 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1698 SDValue One = DAG.getConstant(1, DL, HalfVT);
1699 SDValue Zero = DAG.getConstant(0, DL, HalfVT);
1702 SDValue LHS = Op.getOperand(0);
1703 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
1704 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, One);
1706 SDValue RHS = Op.getOperand(1);
1707 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
1708 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, One);
1710 if (DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
1711 DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
1713 SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1716 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), Zero});
1717 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), Zero});
1719 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV));
1720 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM));
1724 if (isTypeLegal(MVT::i64)) {
1725 // Compute denominator reciprocal.
1726 unsigned FMAD = Subtarget->hasFP32Denormals() ?
1727 (unsigned)AMDGPUISD::FMAD_FTZ :
1728 (unsigned)ISD::FMAD;
1730 SDValue Cvt_Lo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Lo);
1731 SDValue Cvt_Hi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Hi);
1732 SDValue Mad1 = DAG.getNode(FMAD, DL, MVT::f32, Cvt_Hi,
1733 DAG.getConstantFP(APInt(32, 0x4f800000).bitsToFloat(), DL, MVT::f32),
1735 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, DL, MVT::f32, Mad1);
1736 SDValue Mul1 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Rcp,
1737 DAG.getConstantFP(APInt(32, 0x5f7ffffc).bitsToFloat(), DL, MVT::f32));
1738 SDValue Mul2 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Mul1,
1739 DAG.getConstantFP(APInt(32, 0x2f800000).bitsToFloat(), DL, MVT::f32));
1740 SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, MVT::f32, Mul2);
1741 SDValue Mad2 = DAG.getNode(FMAD, DL, MVT::f32, Trunc,
1742 DAG.getConstantFP(APInt(32, 0xcf800000).bitsToFloat(), DL, MVT::f32),
1744 SDValue Rcp_Lo = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Mad2);
1745 SDValue Rcp_Hi = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Trunc);
1746 SDValue Rcp64 = DAG.getBitcast(VT,
1747 DAG.getBuildVector(MVT::v2i32, DL, {Rcp_Lo, Rcp_Hi}));
1749 SDValue Zero64 = DAG.getConstant(0, DL, VT);
1750 SDValue One64 = DAG.getConstant(1, DL, VT);
1751 SDValue Zero1 = DAG.getConstant(0, DL, MVT::i1);
1752 SDVTList HalfCarryVT = DAG.getVTList(HalfVT, MVT::i1);
1754 SDValue Neg_RHS = DAG.getNode(ISD::SUB, DL, VT, Zero64, RHS);
1755 SDValue Mullo1 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Rcp64);
1756 SDValue Mulhi1 = DAG.getNode(ISD::MULHU, DL, VT, Rcp64, Mullo1);
1757 SDValue Mulhi1_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1,
1759 SDValue Mulhi1_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1,
1762 SDValue Add1_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Lo,
1764 SDValue Add1_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Hi,
1765 Mulhi1_Hi, Add1_Lo.getValue(1));
1766 SDValue Add1_HiNc = DAG.getNode(ISD::ADD, DL, HalfVT, Rcp_Hi, Mulhi1_Hi);
1767 SDValue Add1 = DAG.getBitcast(VT,
1768 DAG.getBuildVector(MVT::v2i32, DL, {Add1_Lo, Add1_Hi}));
1770 SDValue Mullo2 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Add1);
1771 SDValue Mulhi2 = DAG.getNode(ISD::MULHU, DL, VT, Add1, Mullo2);
1772 SDValue Mulhi2_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2,
1774 SDValue Mulhi2_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2,
1777 SDValue Add2_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_Lo,
1779 SDValue Add2_HiC = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_HiNc,
1780 Mulhi2_Hi, Add1_Lo.getValue(1));
1781 SDValue Add2_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add2_HiC,
1782 Zero, Add2_Lo.getValue(1));
1783 SDValue Add2 = DAG.getBitcast(VT,
1784 DAG.getBuildVector(MVT::v2i32, DL, {Add2_Lo, Add2_Hi}));
1785 SDValue Mulhi3 = DAG.getNode(ISD::MULHU, DL, VT, LHS, Add2);
1787 SDValue Mul3 = DAG.getNode(ISD::MUL, DL, VT, RHS, Mulhi3);
1789 SDValue Mul3_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, Zero);
1790 SDValue Mul3_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, One);
1791 SDValue Sub1_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Lo,
1793 SDValue Sub1_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Hi,
1794 Mul3_Hi, Sub1_Lo.getValue(1));
1795 SDValue Sub1_Mi = DAG.getNode(ISD::SUB, DL, HalfVT, LHS_Hi, Mul3_Hi);
1796 SDValue Sub1 = DAG.getBitcast(VT,
1797 DAG.getBuildVector(MVT::v2i32, DL, {Sub1_Lo, Sub1_Hi}));
1799 SDValue MinusOne = DAG.getConstant(0xffffffffu, DL, HalfVT);
1800 SDValue C1 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, MinusOne, Zero,
1802 SDValue C2 = DAG.getSelectCC(DL, Sub1_Lo, RHS_Lo, MinusOne, Zero,
1804 SDValue C3 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, C2, C1, ISD::SETEQ);
1806 // TODO: Here and below portions of the code can be enclosed into if/endif.
1807 // Currently control flow is unconditional and we have 4 selects after
1808 // potential endif to substitute PHIs.
1811 SDValue Sub2_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Lo,
1813 SDValue Sub2_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Mi,
1814 RHS_Hi, Sub1_Lo.getValue(1));
1815 SDValue Sub2_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi,
1816 Zero, Sub2_Lo.getValue(1));
1817 SDValue Sub2 = DAG.getBitcast(VT,
1818 DAG.getBuildVector(MVT::v2i32, DL, {Sub2_Lo, Sub2_Hi}));
1820 SDValue Add3 = DAG.getNode(ISD::ADD, DL, VT, Mulhi3, One64);
1822 SDValue C4 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, MinusOne, Zero,
1824 SDValue C5 = DAG.getSelectCC(DL, Sub2_Lo, RHS_Lo, MinusOne, Zero,
1826 SDValue C6 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, C5, C4, ISD::SETEQ);
1829 SDValue Add4 = DAG.getNode(ISD::ADD, DL, VT, Add3, One64);
1831 SDValue Sub3_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Lo,
1833 SDValue Sub3_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi,
1834 RHS_Hi, Sub2_Lo.getValue(1));
1835 SDValue Sub3_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub3_Mi,
1836 Zero, Sub3_Lo.getValue(1));
1837 SDValue Sub3 = DAG.getBitcast(VT,
1838 DAG.getBuildVector(MVT::v2i32, DL, {Sub3_Lo, Sub3_Hi}));
1843 SDValue Sel1 = DAG.getSelectCC(DL, C6, Zero, Add4, Add3, ISD::SETNE);
1844 SDValue Div = DAG.getSelectCC(DL, C3, Zero, Sel1, Mulhi3, ISD::SETNE);
1846 SDValue Sel2 = DAG.getSelectCC(DL, C6, Zero, Sub3, Sub2, ISD::SETNE);
1847 SDValue Rem = DAG.getSelectCC(DL, C3, Zero, Sel2, Sub1, ISD::SETNE);
1849 Results.push_back(Div);
1850 Results.push_back(Rem);
1856 // Get Speculative values
1857 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
1858 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
1860 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, Zero, REM_Part, LHS_Hi, ISD::SETEQ);
1861 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, Zero});
1862 REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM);
1864 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, Zero, DIV_Part, Zero, ISD::SETEQ);
1865 SDValue DIV_Lo = Zero;
1867 const unsigned halfBitWidth = HalfVT.getSizeInBits();
1869 for (unsigned i = 0; i < halfBitWidth; ++i) {
1870 const unsigned bitPos = halfBitWidth - i - 1;
1871 SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
1872 // Get value of high bit
1873 SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
1874 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, One);
1875 HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
1878 REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
1880 REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
1882 SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT);
1883 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, Zero, ISD::SETUGE);
1885 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
1888 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
1889 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
1892 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi});
1893 DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV);
1894 Results.push_back(DIV);
1895 Results.push_back(REM);
1898 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1899 SelectionDAG &DAG) const {
1901 EVT VT = Op.getValueType();
1903 if (VT == MVT::i64) {
1904 SmallVector<SDValue, 2> Results;
1905 LowerUDIVREM64(Op, DAG, Results);
1906 return DAG.getMergeValues(Results, DL);
1909 if (VT == MVT::i32) {
1910 if (SDValue Res = LowerDIVREM24(Op, DAG, false))
1914 SDValue Num = Op.getOperand(0);
1915 SDValue Den = Op.getOperand(1);
1917 // RCP = URECIP(Den) = 2^32 / Den + e
1918 // e is rounding error.
1919 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
1921 // RCP_LO = mul(RCP, Den) */
1922 SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den);
1924 // RCP_HI = mulhu (RCP, Den) */
1925 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
1927 // NEG_RCP_LO = -RCP_LO
1928 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
1931 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
1932 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
1935 // Calculate the rounding error from the URECIP instruction
1936 // E = mulhu(ABS_RCP_LO, RCP)
1937 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
1939 // RCP_A_E = RCP + E
1940 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
1942 // RCP_S_E = RCP - E
1943 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
1945 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
1946 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
1949 // Quotient = mulhu(Tmp0, Num)
1950 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
1952 // Num_S_Remainder = Quotient * Den
1953 SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den);
1955 // Remainder = Num - Num_S_Remainder
1956 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
1958 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
1959 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
1960 DAG.getConstant(-1, DL, VT),
1961 DAG.getConstant(0, DL, VT),
1963 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
1964 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
1966 DAG.getConstant(-1, DL, VT),
1967 DAG.getConstant(0, DL, VT),
1969 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
1970 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
1973 // Calculate Division result:
1975 // Quotient_A_One = Quotient + 1
1976 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
1977 DAG.getConstant(1, DL, VT));
1979 // Quotient_S_One = Quotient - 1
1980 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
1981 DAG.getConstant(1, DL, VT));
1983 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
1984 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
1985 Quotient, Quotient_A_One, ISD::SETEQ);
1987 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
1988 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
1989 Quotient_S_One, Div, ISD::SETEQ);
1991 // Calculate Rem result:
1993 // Remainder_S_Den = Remainder - Den
1994 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
1996 // Remainder_A_Den = Remainder + Den
1997 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
1999 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
2000 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
2001 Remainder, Remainder_S_Den, ISD::SETEQ);
2003 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
2004 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
2005 Remainder_A_Den, Rem, ISD::SETEQ);
2010 return DAG.getMergeValues(Ops, DL);
2013 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
2014 SelectionDAG &DAG) const {
2016 EVT VT = Op.getValueType();
2018 SDValue LHS = Op.getOperand(0);
2019 SDValue RHS = Op.getOperand(1);
2021 SDValue Zero = DAG.getConstant(0, DL, VT);
2022 SDValue NegOne = DAG.getConstant(-1, DL, VT);
2024 if (VT == MVT::i32) {
2025 if (SDValue Res = LowerDIVREM24(Op, DAG, true))
2029 if (VT == MVT::i64 &&
2030 DAG.ComputeNumSignBits(LHS) > 32 &&
2031 DAG.ComputeNumSignBits(RHS) > 32) {
2032 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
2035 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
2036 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
2037 SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
2040 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
2041 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
2043 return DAG.getMergeValues(Res, DL);
2046 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
2047 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
2048 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
2049 SDValue RSign = LHSign; // Remainder sign is the same as LHS
2051 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
2052 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
2054 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
2055 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
2057 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
2058 SDValue Rem = Div.getValue(1);
2060 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
2061 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
2063 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
2064 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
2070 return DAG.getMergeValues(Res, DL);
2073 // (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y))
2074 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
2076 EVT VT = Op.getValueType();
2077 SDValue X = Op.getOperand(0);
2078 SDValue Y = Op.getOperand(1);
2080 // TODO: Should this propagate fast-math-flags?
2082 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y);
2083 SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div);
2084 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y);
2086 return DAG.getNode(ISD::FSUB, SL, VT, X, Mul);
2089 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
2091 SDValue Src = Op.getOperand(0);
2093 // result = trunc(src)
2094 // if (src > 0.0 && src != result)
2097 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2099 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2100 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
2103 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2105 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
2106 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2107 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2109 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
2110 // TODO: Should this propagate fast-math-flags?
2111 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2114 static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL,
2115 SelectionDAG &DAG) {
2116 const unsigned FractBits = 52;
2117 const unsigned ExpBits = 11;
2119 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
2121 DAG.getConstant(FractBits - 32, SL, MVT::i32),
2122 DAG.getConstant(ExpBits, SL, MVT::i32));
2123 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
2124 DAG.getConstant(1023, SL, MVT::i32));
2129 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
2131 SDValue Src = Op.getOperand(0);
2133 assert(Op.getValueType() == MVT::f64);
2135 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2136 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2138 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2140 // Extract the upper half, since this is where we will find the sign and
2142 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
2144 SDValue Exp = extractF64Exponent(Hi, SL, DAG);
2146 const unsigned FractBits = 52;
2148 // Extract the sign bit.
2149 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
2150 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
2152 // Extend back to 64-bits.
2153 SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit});
2154 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
2156 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
2157 const SDValue FractMask
2158 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
2160 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
2161 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
2162 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
2165 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2167 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
2169 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2170 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2172 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
2173 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
2175 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
2178 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
2180 SDValue Src = Op.getOperand(0);
2182 assert(Op.getValueType() == MVT::f64);
2184 APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
2185 SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
2186 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
2188 // TODO: Should this propagate fast-math-flags?
2190 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
2191 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
2193 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
2195 APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
2196 SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
2199 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2200 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
2202 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
2205 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
2206 // FNEARBYINT and FRINT are the same, except in their handling of FP
2207 // exceptions. Those aren't really meaningful for us, and OpenCL only has
2208 // rint, so just treat them as equivalent.
2209 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
2212 // XXX - May require not supporting f32 denormals?
2214 // Don't handle v2f16. The extra instructions to scalarize and repack around the
2215 // compare and vselect end up producing worse code than scalarizing the whole
2217 SDValue AMDGPUTargetLowering::LowerFROUND32_16(SDValue Op, SelectionDAG &DAG) const {
2219 SDValue X = Op.getOperand(0);
2220 EVT VT = Op.getValueType();
2222 SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X);
2224 // TODO: Should this propagate fast-math-flags?
2226 SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T);
2228 SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff);
2230 const SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
2231 const SDValue One = DAG.getConstantFP(1.0, SL, VT);
2232 const SDValue Half = DAG.getConstantFP(0.5, SL, VT);
2234 SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, VT, One, X);
2237 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2239 SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
2241 SDValue Sel = DAG.getNode(ISD::SELECT, SL, VT, Cmp, SignOne, Zero);
2243 return DAG.getNode(ISD::FADD, SL, VT, T, Sel);
2246 SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const {
2248 SDValue X = Op.getOperand(0);
2250 SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X);
2252 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2253 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2254 const SDValue NegOne = DAG.getConstant(-1, SL, MVT::i32);
2255 const SDValue FiftyOne = DAG.getConstant(51, SL, MVT::i32);
2257 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2259 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
2261 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, One);
2263 SDValue Exp = extractF64Exponent(Hi, SL, DAG);
2265 const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), SL,
2268 SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp);
2269 SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64,
2270 DAG.getConstant(INT64_C(0x0008000000000000), SL,
2274 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M);
2275 SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT,
2276 DAG.getConstant(0, SL, MVT::i64), Tmp0,
2279 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1,
2280 D, DAG.getConstant(0, SL, MVT::i64));
2281 SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2);
2283 K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64));
2284 K = DAG.getNode(ISD::BITCAST, SL, MVT::f64, K);
2286 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2287 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2288 SDValue ExpEqNegOne = DAG.getSetCC(SL, SetCCVT, NegOne, Exp, ISD::SETEQ);
2290 SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64,
2292 DAG.getConstantFP(1.0, SL, MVT::f64),
2293 DAG.getConstantFP(0.0, SL, MVT::f64));
2295 SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X);
2297 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpLt0, S, K);
2298 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpGt51, X, K);
2303 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2304 EVT VT = Op.getValueType();
2306 if (VT == MVT::f32 || VT == MVT::f16)
2307 return LowerFROUND32_16(Op, DAG);
2310 return LowerFROUND64(Op, DAG);
2312 llvm_unreachable("unhandled type");
2315 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
2317 SDValue Src = Op.getOperand(0);
2319 // result = trunc(src);
2320 // if (src < 0.0 && src != result)
2323 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2325 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2326 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
2329 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2331 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
2332 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2333 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2335 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
2336 // TODO: Should this propagate fast-math-flags?
2337 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2340 SDValue AMDGPUTargetLowering::LowerFLOG(SDValue Op, SelectionDAG &DAG,
2341 double Log2BaseInverted) const {
2342 EVT VT = Op.getValueType();
2345 SDValue Operand = Op.getOperand(0);
2346 SDValue Log2Operand = DAG.getNode(ISD::FLOG2, SL, VT, Operand);
2347 SDValue Log2BaseInvertedOperand = DAG.getConstantFP(Log2BaseInverted, SL, VT);
2349 return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand);
2352 // Return M_LOG2E of appropriate type
2353 static SDValue getLog2EVal(SelectionDAG &DAG, const SDLoc &SL, EVT VT) {
2354 switch (VT.getScalarType().getSimpleVT().SimpleTy) {
2356 return DAG.getConstantFP(1.44269504088896340735992468100189214f, SL, VT);
2358 return DAG.getConstantFP(
2359 APFloat(APFloat::IEEEhalf(), "1.44269504088896340735992468100189214"),
2362 return DAG.getConstantFP(
2363 APFloat(APFloat::IEEEdouble(), "0x1.71547652b82fep+0"), SL, VT);
2365 llvm_unreachable("unsupported fp type");
2369 // exp2(M_LOG2E_F * f);
2370 SDValue AMDGPUTargetLowering::lowerFEXP(SDValue Op, SelectionDAG &DAG) const {
2371 EVT VT = Op.getValueType();
2373 SDValue Src = Op.getOperand(0);
2375 const SDValue K = getLog2EVal(DAG, SL, VT);
2376 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Src, K, Op->getFlags());
2377 return DAG.getNode(ISD::FEXP2, SL, VT, Mul, Op->getFlags());
2380 static bool isCtlzOpc(unsigned Opc) {
2381 return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF;
2384 static bool isCttzOpc(unsigned Opc) {
2385 return Opc == ISD::CTTZ || Opc == ISD::CTTZ_ZERO_UNDEF;
2388 SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const {
2390 SDValue Src = Op.getOperand(0);
2391 bool ZeroUndef = Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
2392 Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF;
2394 unsigned ISDOpc, NewOpc;
2395 if (isCtlzOpc(Op.getOpcode())) {
2396 ISDOpc = ISD::CTLZ_ZERO_UNDEF;
2397 NewOpc = AMDGPUISD::FFBH_U32;
2398 } else if (isCttzOpc(Op.getOpcode())) {
2399 ISDOpc = ISD::CTTZ_ZERO_UNDEF;
2400 NewOpc = AMDGPUISD::FFBL_B32;
2402 llvm_unreachable("Unexpected OPCode!!!");
2405 if (ZeroUndef && Src.getValueType() == MVT::i32)
2406 return DAG.getNode(NewOpc, SL, MVT::i32, Src);
2408 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2410 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2411 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2413 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
2414 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
2416 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
2417 *DAG.getContext(), MVT::i32);
2419 SDValue HiOrLo = isCtlzOpc(Op.getOpcode()) ? Hi : Lo;
2420 SDValue Hi0orLo0 = DAG.getSetCC(SL, SetCCVT, HiOrLo, Zero, ISD::SETEQ);
2422 SDValue OprLo = DAG.getNode(ISDOpc, SL, MVT::i32, Lo);
2423 SDValue OprHi = DAG.getNode(ISDOpc, SL, MVT::i32, Hi);
2425 const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32);
2426 SDValue Add, NewOpr;
2427 if (isCtlzOpc(Op.getOpcode())) {
2428 Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprLo, Bits32);
2429 // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x))
2430 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprHi);
2432 Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprHi, Bits32);
2433 // cttz(x) = lo_32(x) == 0 ? cttz(hi_32(x)) + 32 : cttz(lo_32(x))
2434 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprLo);
2438 // Test if the full 64-bit input is zero.
2440 // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32,
2441 // which we probably don't want.
2442 SDValue LoOrHi = isCtlzOpc(Op.getOpcode()) ? Lo : Hi;
2443 SDValue Lo0OrHi0 = DAG.getSetCC(SL, SetCCVT, LoOrHi, Zero, ISD::SETEQ);
2444 SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0OrHi0, Hi0orLo0);
2446 // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction
2447 // with the same cycles, otherwise it is slower.
2448 // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src,
2449 // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ);
2451 const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32);
2453 // The instruction returns -1 for 0 input, but the defined intrinsic
2454 // behavior is to return the number of bits.
2455 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32,
2456 SrcIsZero, Bits32, NewOpr);
2459 return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewOpr);
2462 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG,
2463 bool Signed) const {
2467 // uint lz = clz(u);
2468 // uint e = (u != 0) ? 127U + 63U - lz : 0;
2469 // u = (u << lz) & 0x7fffffffffffffffUL;
2470 // ulong t = u & 0xffffffffffUL;
2471 // uint v = (e << 23) | (uint)(u >> 40);
2472 // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U);
2473 // return as_float(v + r);
2478 // long s = l >> 63;
2479 // float r = cul2f((l + s) ^ s);
2480 // return s ? -r : r;
2484 SDValue Src = Op.getOperand(0);
2489 const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64);
2490 S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit);
2492 SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S);
2493 L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S);
2496 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
2497 *DAG.getContext(), MVT::f32);
2500 SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32);
2501 SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64);
2502 SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L);
2503 LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ);
2505 SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32);
2506 SDValue E = DAG.getSelect(SL, MVT::i32,
2507 DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE),
2508 DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ),
2511 SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64,
2512 DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ),
2513 DAG.getConstant((-1ULL) >> 1, SL, MVT::i64));
2515 SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U,
2516 DAG.getConstant(0xffffffffffULL, SL, MVT::i64));
2518 SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64,
2519 U, DAG.getConstant(40, SL, MVT::i64));
2521 SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32,
2522 DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)),
2523 DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, UShl));
2525 SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64);
2526 SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT);
2527 SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ);
2529 SDValue One = DAG.getConstant(1, SL, MVT::i32);
2531 SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One);
2533 SDValue R = DAG.getSelect(SL, MVT::i32,
2536 DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32));
2537 R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R);
2538 R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R);
2543 SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R);
2544 return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R);
2547 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
2548 bool Signed) const {
2550 SDValue Src = Op.getOperand(0);
2552 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2554 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2555 DAG.getConstant(0, SL, MVT::i32));
2556 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2557 DAG.getConstant(1, SL, MVT::i32));
2559 SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
2562 SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
2564 SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi,
2565 DAG.getConstant(32, SL, MVT::i32));
2566 // TODO: Should this propagate fast-math-flags?
2567 return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
2570 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
2571 SelectionDAG &DAG) const {
2572 assert(Op.getOperand(0).getValueType() == MVT::i64 &&
2573 "operation should be legal");
2575 // TODO: Factor out code common with LowerSINT_TO_FP.
2577 EVT DestVT = Op.getValueType();
2578 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2580 SDValue Src = Op.getOperand(0);
2582 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2583 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2585 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2590 if (DestVT == MVT::f32)
2591 return LowerINT_TO_FP32(Op, DAG, false);
2593 assert(DestVT == MVT::f64);
2594 return LowerINT_TO_FP64(Op, DAG, false);
2597 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
2598 SelectionDAG &DAG) const {
2599 assert(Op.getOperand(0).getValueType() == MVT::i64 &&
2600 "operation should be legal");
2602 // TODO: Factor out code common with LowerUINT_TO_FP.
2604 EVT DestVT = Op.getValueType();
2605 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2607 SDValue Src = Op.getOperand(0);
2609 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2610 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2612 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2617 if (DestVT == MVT::f32)
2618 return LowerINT_TO_FP32(Op, DAG, true);
2620 assert(DestVT == MVT::f64);
2621 return LowerINT_TO_FP64(Op, DAG, true);
2624 SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG,
2625 bool Signed) const {
2628 SDValue Src = Op.getOperand(0);
2630 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2632 SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL,
2634 SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL,
2636 // TODO: Should this propagate fast-math-flags?
2637 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0);
2639 SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul);
2642 SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc);
2644 SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL,
2645 MVT::i32, FloorMul);
2646 SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
2648 SDValue Result = DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi});
2650 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result);
2653 SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const {
2655 SDValue N0 = Op.getOperand(0);
2657 // Convert to target node to get known bits
2658 if (N0.getValueType() == MVT::f32)
2659 return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0);
2661 if (getTargetMachine().Options.UnsafeFPMath) {
2662 // There is a generic expand for FP_TO_FP16 with unsafe fast math.
2666 assert(N0.getSimpleValueType() == MVT::f64);
2668 // f64 -> f16 conversion using round-to-nearest-even rounding mode.
2669 const unsigned ExpMask = 0x7ff;
2670 const unsigned ExpBiasf64 = 1023;
2671 const unsigned ExpBiasf16 = 15;
2672 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
2673 SDValue One = DAG.getConstant(1, DL, MVT::i32);
2674 SDValue U = DAG.getNode(ISD::BITCAST, DL, MVT::i64, N0);
2675 SDValue UH = DAG.getNode(ISD::SRL, DL, MVT::i64, U,
2676 DAG.getConstant(32, DL, MVT::i64));
2677 UH = DAG.getZExtOrTrunc(UH, DL, MVT::i32);
2678 U = DAG.getZExtOrTrunc(U, DL, MVT::i32);
2679 SDValue E = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2680 DAG.getConstant(20, DL, MVT::i64));
2681 E = DAG.getNode(ISD::AND, DL, MVT::i32, E,
2682 DAG.getConstant(ExpMask, DL, MVT::i32));
2683 // Subtract the fp64 exponent bias (1023) to get the real exponent and
2684 // add the f16 bias (15) to get the biased exponent for the f16 format.
2685 E = DAG.getNode(ISD::ADD, DL, MVT::i32, E,
2686 DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32));
2688 SDValue M = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2689 DAG.getConstant(8, DL, MVT::i32));
2690 M = DAG.getNode(ISD::AND, DL, MVT::i32, M,
2691 DAG.getConstant(0xffe, DL, MVT::i32));
2693 SDValue MaskedSig = DAG.getNode(ISD::AND, DL, MVT::i32, UH,
2694 DAG.getConstant(0x1ff, DL, MVT::i32));
2695 MaskedSig = DAG.getNode(ISD::OR, DL, MVT::i32, MaskedSig, U);
2697 SDValue Lo40Set = DAG.getSelectCC(DL, MaskedSig, Zero, Zero, One, ISD::SETEQ);
2698 M = DAG.getNode(ISD::OR, DL, MVT::i32, M, Lo40Set);
2700 // (M != 0 ? 0x0200 : 0) | 0x7c00;
2701 SDValue I = DAG.getNode(ISD::OR, DL, MVT::i32,
2702 DAG.getSelectCC(DL, M, Zero, DAG.getConstant(0x0200, DL, MVT::i32),
2703 Zero, ISD::SETNE), DAG.getConstant(0x7c00, DL, MVT::i32));
2705 // N = M | (E << 12);
2706 SDValue N = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2707 DAG.getNode(ISD::SHL, DL, MVT::i32, E,
2708 DAG.getConstant(12, DL, MVT::i32)));
2710 // B = clamp(1-E, 0, 13);
2711 SDValue OneSubExp = DAG.getNode(ISD::SUB, DL, MVT::i32,
2713 SDValue B = DAG.getNode(ISD::SMAX, DL, MVT::i32, OneSubExp, Zero);
2714 B = DAG.getNode(ISD::SMIN, DL, MVT::i32, B,
2715 DAG.getConstant(13, DL, MVT::i32));
2717 SDValue SigSetHigh = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2718 DAG.getConstant(0x1000, DL, MVT::i32));
2720 SDValue D = DAG.getNode(ISD::SRL, DL, MVT::i32, SigSetHigh, B);
2721 SDValue D0 = DAG.getNode(ISD::SHL, DL, MVT::i32, D, B);
2722 SDValue D1 = DAG.getSelectCC(DL, D0, SigSetHigh, One, Zero, ISD::SETNE);
2723 D = DAG.getNode(ISD::OR, DL, MVT::i32, D, D1);
2725 SDValue V = DAG.getSelectCC(DL, E, One, D, N, ISD::SETLT);
2726 SDValue VLow3 = DAG.getNode(ISD::AND, DL, MVT::i32, V,
2727 DAG.getConstant(0x7, DL, MVT::i32));
2728 V = DAG.getNode(ISD::SRL, DL, MVT::i32, V,
2729 DAG.getConstant(2, DL, MVT::i32));
2730 SDValue V0 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(3, DL, MVT::i32),
2731 One, Zero, ISD::SETEQ);
2732 SDValue V1 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(5, DL, MVT::i32),
2733 One, Zero, ISD::SETGT);
2734 V1 = DAG.getNode(ISD::OR, DL, MVT::i32, V0, V1);
2735 V = DAG.getNode(ISD::ADD, DL, MVT::i32, V, V1);
2737 V = DAG.getSelectCC(DL, E, DAG.getConstant(30, DL, MVT::i32),
2738 DAG.getConstant(0x7c00, DL, MVT::i32), V, ISD::SETGT);
2739 V = DAG.getSelectCC(DL, E, DAG.getConstant(1039, DL, MVT::i32),
2742 // Extract the sign bit.
2743 SDValue Sign = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2744 DAG.getConstant(16, DL, MVT::i32));
2745 Sign = DAG.getNode(ISD::AND, DL, MVT::i32, Sign,
2746 DAG.getConstant(0x8000, DL, MVT::i32));
2748 V = DAG.getNode(ISD::OR, DL, MVT::i32, Sign, V);
2749 return DAG.getZExtOrTrunc(V, DL, Op.getValueType());
2752 SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op,
2753 SelectionDAG &DAG) const {
2754 SDValue Src = Op.getOperand(0);
2756 // TODO: Factor out code common with LowerFP_TO_UINT.
2758 EVT SrcVT = Src.getValueType();
2759 if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) {
2762 SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src);
2764 DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend);
2769 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2770 return LowerFP64_TO_INT(Op, DAG, true);
2775 SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op,
2776 SelectionDAG &DAG) const {
2777 SDValue Src = Op.getOperand(0);
2779 // TODO: Factor out code common with LowerFP_TO_SINT.
2781 EVT SrcVT = Src.getValueType();
2782 if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) {
2785 SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src);
2787 DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend);
2792 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2793 return LowerFP64_TO_INT(Op, DAG, false);
2798 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
2799 SelectionDAG &DAG) const {
2800 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2801 MVT VT = Op.getSimpleValueType();
2802 MVT ScalarVT = VT.getScalarType();
2804 assert(VT.isVector());
2806 SDValue Src = Op.getOperand(0);
2809 // TODO: Don't scalarize on Evergreen?
2810 unsigned NElts = VT.getVectorNumElements();
2811 SmallVector<SDValue, 8> Args;
2812 DAG.ExtractVectorElements(Src, Args, 0, NElts);
2814 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
2815 for (unsigned I = 0; I < NElts; ++I)
2816 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
2818 return DAG.getBuildVector(VT, DL, Args);
2821 //===----------------------------------------------------------------------===//
2822 // Custom DAG optimizations
2823 //===----------------------------------------------------------------------===//
2825 static bool isU24(SDValue Op, SelectionDAG &DAG) {
2826 return AMDGPUTargetLowering::numBitsUnsigned(Op, DAG) <= 24;
2829 static bool isI24(SDValue Op, SelectionDAG &DAG) {
2830 EVT VT = Op.getValueType();
2831 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
2832 // as unsigned 24-bit values.
2833 AMDGPUTargetLowering::numBitsSigned(Op, DAG) < 24;
2836 static SDValue simplifyI24(SDNode *Node24,
2837 TargetLowering::DAGCombinerInfo &DCI) {
2838 SelectionDAG &DAG = DCI.DAG;
2839 SDValue LHS = Node24->getOperand(0);
2840 SDValue RHS = Node24->getOperand(1);
2842 APInt Demanded = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 24);
2844 // First try to simplify using GetDemandedBits which allows the operands to
2845 // have other uses, but will only perform simplifications that involve
2846 // bypassing some nodes for this user.
2847 SDValue DemandedLHS = DAG.GetDemandedBits(LHS, Demanded);
2848 SDValue DemandedRHS = DAG.GetDemandedBits(RHS, Demanded);
2849 if (DemandedLHS || DemandedRHS)
2850 return DAG.getNode(Node24->getOpcode(), SDLoc(Node24), Node24->getVTList(),
2851 DemandedLHS ? DemandedLHS : LHS,
2852 DemandedRHS ? DemandedRHS : RHS);
2854 // Now try SimplifyDemandedBits which can simplify the nodes used by our
2855 // operands if this node is the only user.
2856 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2857 if (TLI.SimplifyDemandedBits(LHS, Demanded, DCI))
2858 return SDValue(Node24, 0);
2859 if (TLI.SimplifyDemandedBits(RHS, Demanded, DCI))
2860 return SDValue(Node24, 0);
2865 template <typename IntTy>
2866 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset,
2867 uint32_t Width, const SDLoc &DL) {
2868 if (Width + Offset < 32) {
2869 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
2870 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
2871 return DAG.getConstant(Result, DL, MVT::i32);
2874 return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
2877 static bool hasVolatileUser(SDNode *Val) {
2878 for (SDNode *U : Val->uses()) {
2879 if (MemSDNode *M = dyn_cast<MemSDNode>(U)) {
2880 if (M->isVolatile())
2888 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const {
2889 // i32 vectors are the canonical memory type.
2890 if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT))
2893 if (!VT.isByteSized())
2896 unsigned Size = VT.getStoreSize();
2898 if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector())
2901 if (Size == 3 || (Size > 4 && (Size % 4 != 0)))
2907 // Find a load or store from corresponding pattern root.
2908 // Roots may be build_vector, bitconvert or their combinations.
2909 static MemSDNode* findMemSDNode(SDNode *N) {
2910 N = AMDGPUTargetLowering::stripBitcast(SDValue(N,0)).getNode();
2911 if (MemSDNode *MN = dyn_cast<MemSDNode>(N))
2913 assert(isa<BuildVectorSDNode>(N));
2914 for (SDValue V : N->op_values())
2916 dyn_cast<MemSDNode>(AMDGPUTargetLowering::stripBitcast(V)))
2918 llvm_unreachable("cannot find MemSDNode in the pattern!");
2921 bool AMDGPUTargetLowering::SelectFlatOffset(bool IsSigned,
2927 SDValue &SLC) const {
2928 const GCNSubtarget &ST =
2929 DAG.getMachineFunction().getSubtarget<GCNSubtarget>();
2930 int64_t OffsetVal = 0;
2932 if (ST.hasFlatInstOffsets() &&
2933 (!ST.hasFlatSegmentOffsetBug() ||
2934 findMemSDNode(N)->getAddressSpace() != AMDGPUAS::FLAT_ADDRESS) &&
2935 DAG.isBaseWithConstantOffset(Addr)) {
2936 SDValue N0 = Addr.getOperand(0);
2937 SDValue N1 = Addr.getOperand(1);
2938 int64_t COffsetVal = cast<ConstantSDNode>(N1)->getSExtValue();
2940 const SIInstrInfo *TII = ST.getInstrInfo();
2941 if (TII->isLegalFLATOffset(COffsetVal, findMemSDNode(N)->getAddressSpace(),
2944 OffsetVal = COffsetVal;
2949 Offset = DAG.getTargetConstant(OffsetVal, SDLoc(), MVT::i16);
2950 SLC = DAG.getTargetConstant(0, SDLoc(), MVT::i1);
2955 // Replace load of an illegal type with a store of a bitcast to a friendlier
2957 SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N,
2958 DAGCombinerInfo &DCI) const {
2959 if (!DCI.isBeforeLegalize())
2962 LoadSDNode *LN = cast<LoadSDNode>(N);
2963 if (LN->isVolatile() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN))
2967 SelectionDAG &DAG = DCI.DAG;
2968 EVT VT = LN->getMemoryVT();
2970 unsigned Size = VT.getStoreSize();
2971 unsigned Align = LN->getAlignment();
2972 if (Align < Size && isTypeLegal(VT)) {
2974 unsigned AS = LN->getAddressSpace();
2976 // Expand unaligned loads earlier than legalization. Due to visitation order
2977 // problems during legalization, the emitted instructions to pack and unpack
2978 // the bytes again are not eliminated in the case of an unaligned copy.
2979 if (!allowsMisalignedMemoryAccesses(
2980 VT, AS, Align, LN->getMemOperand()->getFlags(), &IsFast)) {
2982 return scalarizeVectorLoad(LN, DAG);
2985 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
2986 return DAG.getMergeValues(Ops, SDLoc(N));
2993 if (!shouldCombineMemoryType(VT))
2996 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2999 = DAG.getLoad(NewVT, SL, LN->getChain(),
3000 LN->getBasePtr(), LN->getMemOperand());
3002 SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad);
3003 DCI.CombineTo(N, BC, NewLoad.getValue(1));
3004 return SDValue(N, 0);
3007 // Replace store of an illegal type with a store of a bitcast to a friendlier
3009 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
3010 DAGCombinerInfo &DCI) const {
3011 if (!DCI.isBeforeLegalize())
3014 StoreSDNode *SN = cast<StoreSDNode>(N);
3015 if (SN->isVolatile() || !ISD::isNormalStore(SN))
3018 EVT VT = SN->getMemoryVT();
3019 unsigned Size = VT.getStoreSize();
3022 SelectionDAG &DAG = DCI.DAG;
3023 unsigned Align = SN->getAlignment();
3024 if (Align < Size && isTypeLegal(VT)) {
3026 unsigned AS = SN->getAddressSpace();
3028 // Expand unaligned stores earlier than legalization. Due to visitation
3029 // order problems during legalization, the emitted instructions to pack and
3030 // unpack the bytes again are not eliminated in the case of an unaligned
3032 if (!allowsMisalignedMemoryAccesses(
3033 VT, AS, Align, SN->getMemOperand()->getFlags(), &IsFast)) {
3035 return scalarizeVectorStore(SN, DAG);
3037 return expandUnalignedStore(SN, DAG);
3044 if (!shouldCombineMemoryType(VT))
3047 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3048 SDValue Val = SN->getValue();
3050 //DCI.AddToWorklist(Val.getNode());
3052 bool OtherUses = !Val.hasOneUse();
3053 SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val);
3055 SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal);
3056 DAG.ReplaceAllUsesOfValueWith(Val, CastBack);
3059 return DAG.getStore(SN->getChain(), SL, CastVal,
3060 SN->getBasePtr(), SN->getMemOperand());
3063 // FIXME: This should go in generic DAG combiner with an isTruncateFree check,
3064 // but isTruncateFree is inaccurate for i16 now because of SALU vs. VALU
3066 SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N,
3067 DAGCombinerInfo &DCI) const {
3068 SelectionDAG &DAG = DCI.DAG;
3069 SDValue N0 = N->getOperand(0);
3071 // (vt2 (assertzext (truncate vt0:x), vt1)) ->
3072 // (vt2 (truncate (assertzext vt0:x, vt1)))
3073 if (N0.getOpcode() == ISD::TRUNCATE) {
3074 SDValue N1 = N->getOperand(1);
3075 EVT ExtVT = cast<VTSDNode>(N1)->getVT();
3078 SDValue Src = N0.getOperand(0);
3079 EVT SrcVT = Src.getValueType();
3080 if (SrcVT.bitsGE(ExtVT)) {
3081 SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1);
3082 return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg);
3088 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the
3089 /// binary operation \p Opc to it with the corresponding constant operands.
3090 SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl(
3091 DAGCombinerInfo &DCI, const SDLoc &SL,
3092 unsigned Opc, SDValue LHS,
3093 uint32_t ValLo, uint32_t ValHi) const {
3094 SelectionDAG &DAG = DCI.DAG;
3096 std::tie(Lo, Hi) = split64BitValue(LHS, DAG);
3098 SDValue LoRHS = DAG.getConstant(ValLo, SL, MVT::i32);
3099 SDValue HiRHS = DAG.getConstant(ValHi, SL, MVT::i32);
3101 SDValue LoAnd = DAG.getNode(Opc, SL, MVT::i32, Lo, LoRHS);
3102 SDValue HiAnd = DAG.getNode(Opc, SL, MVT::i32, Hi, HiRHS);
3104 // Re-visit the ands. It's possible we eliminated one of them and it could
3105 // simplify the vector.
3106 DCI.AddToWorklist(Lo.getNode());
3107 DCI.AddToWorklist(Hi.getNode());
3109 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd});
3110 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3113 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
3114 DAGCombinerInfo &DCI) const {
3115 EVT VT = N->getValueType(0);
3117 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3121 SDValue LHS = N->getOperand(0);
3122 unsigned RHSVal = RHS->getZExtValue();
3127 SelectionDAG &DAG = DCI.DAG;
3129 switch (LHS->getOpcode()) {
3132 case ISD::ZERO_EXTEND:
3133 case ISD::SIGN_EXTEND:
3134 case ISD::ANY_EXTEND: {
3135 SDValue X = LHS->getOperand(0);
3137 if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 &&
3138 isOperationLegal(ISD::BUILD_VECTOR, MVT::v2i16)) {
3139 // Prefer build_vector as the canonical form if packed types are legal.
3140 // (shl ([asz]ext i16:x), 16 -> build_vector 0, x
3141 SDValue Vec = DAG.getBuildVector(MVT::v2i16, SL,
3142 { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) });
3143 return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
3146 // shl (ext x) => zext (shl x), if shift does not overflow int
3149 KnownBits Known = DAG.computeKnownBits(X);
3150 unsigned LZ = Known.countMinLeadingZeros();
3153 EVT XVT = X.getValueType();
3154 SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0));
3155 return DAG.getZExtOrTrunc(Shl, SL, VT);
3162 // i64 (shl x, C) -> (build_pair 0, (shl x, C -32))
3164 // On some subtargets, 64-bit shift is a quarter rate instruction. In the
3165 // common case, splitting this into a move and a 32-bit shift is faster and
3166 // the same code size.
3170 SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32);
3172 SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
3173 SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
3175 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3177 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
3178 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3181 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
3182 DAGCombinerInfo &DCI) const {
3183 if (N->getValueType(0) != MVT::i64)
3186 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3190 SelectionDAG &DAG = DCI.DAG;
3192 unsigned RHSVal = RHS->getZExtValue();
3194 // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31)
3196 SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3197 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3198 DAG.getConstant(31, SL, MVT::i32));
3200 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift});
3201 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3204 // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31)
3206 SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3207 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3208 DAG.getConstant(31, SL, MVT::i32));
3209 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift});
3210 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3216 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
3217 DAGCombinerInfo &DCI) const {
3218 auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3222 EVT VT = N->getValueType(0);
3223 SDValue LHS = N->getOperand(0);
3224 unsigned ShiftAmt = RHS->getZExtValue();
3225 SelectionDAG &DAG = DCI.DAG;
3228 // fold (srl (and x, c1 << c2), c2) -> (and (srl(x, c2), c1)
3229 // this improves the ability to match BFE patterns in isel.
3230 if (LHS.getOpcode() == ISD::AND) {
3231 if (auto *Mask = dyn_cast<ConstantSDNode>(LHS.getOperand(1))) {
3232 if (Mask->getAPIntValue().isShiftedMask() &&
3233 Mask->getAPIntValue().countTrailingZeros() == ShiftAmt) {
3236 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(0), N->getOperand(1)),
3237 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(1), N->getOperand(1)));
3248 // srl i64:x, C for C >= 32
3250 // build_pair (srl hi_32(x), C - 32), 0
3251 SDValue One = DAG.getConstant(1, SL, MVT::i32);
3252 SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3254 SDValue VecOp = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, LHS);
3255 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecOp, One);
3257 SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
3258 SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst);
3260 SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero});
3262 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
3265 SDValue AMDGPUTargetLowering::performTruncateCombine(
3266 SDNode *N, DAGCombinerInfo &DCI) const {
3268 SelectionDAG &DAG = DCI.DAG;
3269 EVT VT = N->getValueType(0);
3270 SDValue Src = N->getOperand(0);
3272 // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x)
3273 if (Src.getOpcode() == ISD::BITCAST && !VT.isVector()) {
3274 SDValue Vec = Src.getOperand(0);
3275 if (Vec.getOpcode() == ISD::BUILD_VECTOR) {
3276 SDValue Elt0 = Vec.getOperand(0);
3277 EVT EltVT = Elt0.getValueType();
3278 if (VT.getSizeInBits() <= EltVT.getSizeInBits()) {
3279 if (EltVT.isFloatingPoint()) {
3280 Elt0 = DAG.getNode(ISD::BITCAST, SL,
3281 EltVT.changeTypeToInteger(), Elt0);
3284 return DAG.getNode(ISD::TRUNCATE, SL, VT, Elt0);
3289 // Equivalent of above for accessing the high element of a vector as an
3290 // integer operation.
3291 // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y)
3292 if (Src.getOpcode() == ISD::SRL && !VT.isVector()) {
3293 if (auto K = isConstOrConstSplat(Src.getOperand(1))) {
3294 if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) {
3295 SDValue BV = stripBitcast(Src.getOperand(0));
3296 if (BV.getOpcode() == ISD::BUILD_VECTOR &&
3297 BV.getValueType().getVectorNumElements() == 2) {
3298 SDValue SrcElt = BV.getOperand(1);
3299 EVT SrcEltVT = SrcElt.getValueType();
3300 if (SrcEltVT.isFloatingPoint()) {
3301 SrcElt = DAG.getNode(ISD::BITCAST, SL,
3302 SrcEltVT.changeTypeToInteger(), SrcElt);
3305 return DAG.getNode(ISD::TRUNCATE, SL, VT, SrcElt);
3311 // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit.
3313 // i16 (trunc (srl i64:x, K)), K <= 16 ->
3314 // i16 (trunc (srl (i32 (trunc x), K)))
3315 if (VT.getScalarSizeInBits() < 32) {
3316 EVT SrcVT = Src.getValueType();
3317 if (SrcVT.getScalarSizeInBits() > 32 &&
3318 (Src.getOpcode() == ISD::SRL ||
3319 Src.getOpcode() == ISD::SRA ||
3320 Src.getOpcode() == ISD::SHL)) {
3321 SDValue Amt = Src.getOperand(1);
3322 KnownBits Known = DAG.computeKnownBits(Amt);
3323 unsigned Size = VT.getScalarSizeInBits();
3324 if ((Known.isConstant() && Known.getConstant().ule(Size)) ||
3325 (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size))) {
3326 EVT MidVT = VT.isVector() ?
3327 EVT::getVectorVT(*DAG.getContext(), MVT::i32,
3328 VT.getVectorNumElements()) : MVT::i32;
3330 EVT NewShiftVT = getShiftAmountTy(MidVT, DAG.getDataLayout());
3331 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MidVT,
3333 DCI.AddToWorklist(Trunc.getNode());
3335 if (Amt.getValueType() != NewShiftVT) {
3336 Amt = DAG.getZExtOrTrunc(Amt, SL, NewShiftVT);
3337 DCI.AddToWorklist(Amt.getNode());
3340 SDValue ShrunkShift = DAG.getNode(Src.getOpcode(), SL, MidVT,
3342 return DAG.getNode(ISD::TRUNCATE, SL, VT, ShrunkShift);
3350 // We need to specifically handle i64 mul here to avoid unnecessary conversion
3351 // instructions. If we only match on the legalized i64 mul expansion,
3352 // SimplifyDemandedBits will be unable to remove them because there will be
3353 // multiple uses due to the separate mul + mulh[su].
3354 static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL,
3355 SDValue N0, SDValue N1, unsigned Size, bool Signed) {
3357 unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3358 return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1);
3361 // Because we want to eliminate extension instructions before the
3362 // operation, we need to create a single user here (i.e. not the separate
3363 // mul_lo + mul_hi) so that SimplifyDemandedBits will deal with it.
3365 unsigned MulOpc = Signed ? AMDGPUISD::MUL_LOHI_I24 : AMDGPUISD::MUL_LOHI_U24;
3367 SDValue Mul = DAG.getNode(MulOpc, SL,
3368 DAG.getVTList(MVT::i32, MVT::i32), N0, N1);
3370 return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64,
3371 Mul.getValue(0), Mul.getValue(1));
3374 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
3375 DAGCombinerInfo &DCI) const {
3376 EVT VT = N->getValueType(0);
3378 unsigned Size = VT.getSizeInBits();
3379 if (VT.isVector() || Size > 64)
3382 // There are i16 integer mul/mad.
3383 if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16))
3386 SelectionDAG &DAG = DCI.DAG;
3389 SDValue N0 = N->getOperand(0);
3390 SDValue N1 = N->getOperand(1);
3392 // SimplifyDemandedBits has the annoying habit of turning useful zero_extends
3393 // in the source into any_extends if the result of the mul is truncated. Since
3394 // we can assume the high bits are whatever we want, use the underlying value
3395 // to avoid the unknown high bits from interfering.
3396 if (N0.getOpcode() == ISD::ANY_EXTEND)
3397 N0 = N0.getOperand(0);
3399 if (N1.getOpcode() == ISD::ANY_EXTEND)
3400 N1 = N1.getOperand(0);
3404 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
3405 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3406 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3407 Mul = getMul24(DAG, DL, N0, N1, Size, false);
3408 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
3409 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3410 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3411 Mul = getMul24(DAG, DL, N0, N1, Size, true);
3416 // We need to use sext even for MUL_U24, because MUL_U24 is used
3417 // for signed multiply of 8 and 16-bit types.
3418 return DAG.getSExtOrTrunc(Mul, DL, VT);
3421 SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N,
3422 DAGCombinerInfo &DCI) const {
3423 EVT VT = N->getValueType(0);
3425 if (!Subtarget->hasMulI24() || VT.isVector())
3428 SelectionDAG &DAG = DCI.DAG;
3431 SDValue N0 = N->getOperand(0);
3432 SDValue N1 = N->getOperand(1);
3434 if (!isI24(N0, DAG) || !isI24(N1, DAG))
3437 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3438 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3440 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1);
3441 DCI.AddToWorklist(Mulhi.getNode());
3442 return DAG.getSExtOrTrunc(Mulhi, DL, VT);
3445 SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N,
3446 DAGCombinerInfo &DCI) const {
3447 EVT VT = N->getValueType(0);
3449 if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32)
3452 SelectionDAG &DAG = DCI.DAG;
3455 SDValue N0 = N->getOperand(0);
3456 SDValue N1 = N->getOperand(1);
3458 if (!isU24(N0, DAG) || !isU24(N1, DAG))
3461 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3462 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3464 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1);
3465 DCI.AddToWorklist(Mulhi.getNode());
3466 return DAG.getZExtOrTrunc(Mulhi, DL, VT);
3469 SDValue AMDGPUTargetLowering::performMulLoHi24Combine(
3470 SDNode *N, DAGCombinerInfo &DCI) const {
3471 SelectionDAG &DAG = DCI.DAG;
3473 // Simplify demanded bits before splitting into multiple users.
3474 if (SDValue V = simplifyI24(N, DCI))
3477 SDValue N0 = N->getOperand(0);
3478 SDValue N1 = N->getOperand(1);
3480 bool Signed = (N->getOpcode() == AMDGPUISD::MUL_LOHI_I24);
3482 unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3483 unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24;
3487 SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1);
3488 SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1);
3489 return DAG.getMergeValues({ MulLo, MulHi }, SL);
3492 static bool isNegativeOne(SDValue Val) {
3493 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val))
3494 return C->isAllOnesValue();
3498 SDValue AMDGPUTargetLowering::getFFBX_U32(SelectionDAG &DAG,
3501 unsigned Opc) const {
3502 EVT VT = Op.getValueType();
3503 EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT);
3504 if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() &&
3505 LegalVT != MVT::i16))
3509 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Op);
3511 SDValue FFBX = DAG.getNode(Opc, DL, MVT::i32, Op);
3513 FFBX = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBX);
3518 // The native instructions return -1 on 0 input. Optimize out a select that
3519 // produces -1 on 0.
3521 // TODO: If zero is not undef, we could also do this if the output is compared
3522 // against the bitwidth.
3524 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly.
3525 SDValue AMDGPUTargetLowering::performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond,
3526 SDValue LHS, SDValue RHS,
3527 DAGCombinerInfo &DCI) const {
3528 ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
3529 if (!CmpRhs || !CmpRhs->isNullValue())
3532 SelectionDAG &DAG = DCI.DAG;
3533 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
3534 SDValue CmpLHS = Cond.getOperand(0);
3536 unsigned Opc = isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 :
3537 AMDGPUISD::FFBH_U32;
3539 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x
3540 // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x
3541 if (CCOpcode == ISD::SETEQ &&
3542 (isCtlzOpc(RHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) &&
3543 RHS.getOperand(0) == CmpLHS &&
3544 isNegativeOne(LHS)) {
3545 return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3548 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x
3549 // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x
3550 if (CCOpcode == ISD::SETNE &&
3551 (isCtlzOpc(LHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) &&
3552 LHS.getOperand(0) == CmpLHS &&
3553 isNegativeOne(RHS)) {
3554 return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3560 static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI,
3566 SelectionDAG &DAG = DCI.DAG;
3567 EVT VT = N1.getValueType();
3569 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond,
3570 N1.getOperand(0), N2.getOperand(0));
3571 DCI.AddToWorklist(NewSelect.getNode());
3572 return DAG.getNode(Op, SL, VT, NewSelect);
3575 // Pull a free FP operation out of a select so it may fold into uses.
3577 // select c, (fneg x), (fneg y) -> fneg (select c, x, y)
3578 // select c, (fneg x), k -> fneg (select c, x, (fneg k))
3580 // select c, (fabs x), (fabs y) -> fabs (select c, x, y)
3581 // select c, (fabs x), +k -> fabs (select c, x, k)
3582 static SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI,
3584 SelectionDAG &DAG = DCI.DAG;
3585 SDValue Cond = N.getOperand(0);
3586 SDValue LHS = N.getOperand(1);
3587 SDValue RHS = N.getOperand(2);
3589 EVT VT = N.getValueType();
3590 if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) ||
3591 (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) {
3592 return distributeOpThroughSelect(DCI, LHS.getOpcode(),
3593 SDLoc(N), Cond, LHS, RHS);
3597 if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) {
3598 std::swap(LHS, RHS);
3602 // TODO: Support vector constants.
3603 ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
3604 if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS) {
3606 // If one side is an fneg/fabs and the other is a constant, we can push the
3607 // fneg/fabs down. If it's an fabs, the constant needs to be non-negative.
3608 SDValue NewLHS = LHS.getOperand(0);
3609 SDValue NewRHS = RHS;
3611 // Careful: if the neg can be folded up, don't try to pull it back down.
3612 bool ShouldFoldNeg = true;
3614 if (NewLHS.hasOneUse()) {
3615 unsigned Opc = NewLHS.getOpcode();
3616 if (LHS.getOpcode() == ISD::FNEG && fnegFoldsIntoOp(Opc))
3617 ShouldFoldNeg = false;
3618 if (LHS.getOpcode() == ISD::FABS && Opc == ISD::FMUL)
3619 ShouldFoldNeg = false;
3622 if (ShouldFoldNeg) {
3623 if (LHS.getOpcode() == ISD::FNEG)
3624 NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3625 else if (CRHS->isNegative())
3629 std::swap(NewLHS, NewRHS);
3631 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT,
3632 Cond, NewLHS, NewRHS);
3633 DCI.AddToWorklist(NewSelect.getNode());
3634 return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect);
3642 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N,
3643 DAGCombinerInfo &DCI) const {
3644 if (SDValue Folded = foldFreeOpFromSelect(DCI, SDValue(N, 0)))
3647 SDValue Cond = N->getOperand(0);
3648 if (Cond.getOpcode() != ISD::SETCC)
3651 EVT VT = N->getValueType(0);
3652 SDValue LHS = Cond.getOperand(0);
3653 SDValue RHS = Cond.getOperand(1);
3654 SDValue CC = Cond.getOperand(2);
3656 SDValue True = N->getOperand(1);
3657 SDValue False = N->getOperand(2);
3659 if (Cond.hasOneUse()) { // TODO: Look for multiple select uses.
3660 SelectionDAG &DAG = DCI.DAG;
3661 if (DAG.isConstantValueOfAnyType(True) &&
3662 !DAG.isConstantValueOfAnyType(False)) {
3663 // Swap cmp + select pair to move constant to false input.
3664 // This will allow using VOPC cndmasks more often.
3665 // select (setcc x, y), k, x -> select (setccinv x, y), x, k
3668 ISD::CondCode NewCC = getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
3669 LHS.getValueType().isInteger());
3671 SDValue NewCond = DAG.getSetCC(SL, Cond.getValueType(), LHS, RHS, NewCC);
3672 return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True);
3675 if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) {
3677 = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI);
3678 // Revisit this node so we can catch min3/max3/med3 patterns.
3679 //DCI.AddToWorklist(MinMax.getNode());
3684 // There's no reason to not do this if the condition has other uses.
3685 return performCtlz_CttzCombine(SDLoc(N), Cond, True, False, DCI);
3688 static bool isInv2Pi(const APFloat &APF) {
3689 static const APFloat KF16(APFloat::IEEEhalf(), APInt(16, 0x3118));
3690 static const APFloat KF32(APFloat::IEEEsingle(), APInt(32, 0x3e22f983));
3691 static const APFloat KF64(APFloat::IEEEdouble(), APInt(64, 0x3fc45f306dc9c882));
3693 return APF.bitwiseIsEqual(KF16) ||
3694 APF.bitwiseIsEqual(KF32) ||
3695 APF.bitwiseIsEqual(KF64);
3698 // 0 and 1.0 / (0.5 * pi) do not have inline immmediates, so there is an
3699 // additional cost to negate them.
3700 bool AMDGPUTargetLowering::isConstantCostlierToNegate(SDValue N) const {
3701 if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N)) {
3702 if (C->isZero() && !C->isNegative())
3705 if (Subtarget->hasInv2PiInlineImm() && isInv2Pi(C->getValueAPF()))
3712 static unsigned inverseMinMax(unsigned Opc) {
3715 return ISD::FMINNUM;
3717 return ISD::FMAXNUM;
3718 case ISD::FMAXNUM_IEEE:
3719 return ISD::FMINNUM_IEEE;
3720 case ISD::FMINNUM_IEEE:
3721 return ISD::FMAXNUM_IEEE;
3722 case AMDGPUISD::FMAX_LEGACY:
3723 return AMDGPUISD::FMIN_LEGACY;
3724 case AMDGPUISD::FMIN_LEGACY:
3725 return AMDGPUISD::FMAX_LEGACY;
3727 llvm_unreachable("invalid min/max opcode");
3731 SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N,
3732 DAGCombinerInfo &DCI) const {
3733 SelectionDAG &DAG = DCI.DAG;
3734 SDValue N0 = N->getOperand(0);
3735 EVT VT = N->getValueType(0);
3737 unsigned Opc = N0.getOpcode();
3739 // If the input has multiple uses and we can either fold the negate down, or
3740 // the other uses cannot, give up. This both prevents unprofitable
3741 // transformations and infinite loops: we won't repeatedly try to fold around
3742 // a negate that has no 'good' form.
3743 if (N0.hasOneUse()) {
3744 // This may be able to fold into the source, but at a code size cost. Don't
3745 // fold if the fold into the user is free.
3746 if (allUsesHaveSourceMods(N, 0))
3749 if (fnegFoldsIntoOp(Opc) &&
3750 (allUsesHaveSourceMods(N) || !allUsesHaveSourceMods(N0.getNode())))
3757 if (!mayIgnoreSignedZero(N0))
3760 // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y))
3761 SDValue LHS = N0.getOperand(0);
3762 SDValue RHS = N0.getOperand(1);
3764 if (LHS.getOpcode() != ISD::FNEG)
3765 LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3767 LHS = LHS.getOperand(0);
3769 if (RHS.getOpcode() != ISD::FNEG)
3770 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3772 RHS = RHS.getOperand(0);
3774 SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags());
3775 if (Res.getOpcode() != ISD::FADD)
3776 return SDValue(); // Op got folded away.
3777 if (!N0.hasOneUse())
3778 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3782 case AMDGPUISD::FMUL_LEGACY: {
3783 // (fneg (fmul x, y)) -> (fmul x, (fneg y))
3784 // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y))
3785 SDValue LHS = N0.getOperand(0);
3786 SDValue RHS = N0.getOperand(1);
3788 if (LHS.getOpcode() == ISD::FNEG)
3789 LHS = LHS.getOperand(0);
3790 else if (RHS.getOpcode() == ISD::FNEG)
3791 RHS = RHS.getOperand(0);
3793 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3795 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags());
3796 if (Res.getOpcode() != Opc)
3797 return SDValue(); // Op got folded away.
3798 if (!N0.hasOneUse())
3799 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3804 if (!mayIgnoreSignedZero(N0))
3807 // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z))
3808 SDValue LHS = N0.getOperand(0);
3809 SDValue MHS = N0.getOperand(1);
3810 SDValue RHS = N0.getOperand(2);
3812 if (LHS.getOpcode() == ISD::FNEG)
3813 LHS = LHS.getOperand(0);
3814 else if (MHS.getOpcode() == ISD::FNEG)
3815 MHS = MHS.getOperand(0);
3817 MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS);
3819 if (RHS.getOpcode() != ISD::FNEG)
3820 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3822 RHS = RHS.getOperand(0);
3824 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS);
3825 if (Res.getOpcode() != Opc)
3826 return SDValue(); // Op got folded away.
3827 if (!N0.hasOneUse())
3828 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3833 case ISD::FMAXNUM_IEEE:
3834 case ISD::FMINNUM_IEEE:
3835 case AMDGPUISD::FMAX_LEGACY:
3836 case AMDGPUISD::FMIN_LEGACY: {
3837 // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y)
3838 // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y)
3839 // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y)
3840 // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y)
3842 SDValue LHS = N0.getOperand(0);
3843 SDValue RHS = N0.getOperand(1);
3845 // 0 doesn't have a negated inline immediate.
3846 // TODO: This constant check should be generalized to other operations.
3847 if (isConstantCostlierToNegate(RHS))
3850 SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3851 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3852 unsigned Opposite = inverseMinMax(Opc);
3854 SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags());
3855 if (Res.getOpcode() != Opposite)
3856 return SDValue(); // Op got folded away.
3857 if (!N0.hasOneUse())
3858 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3861 case AMDGPUISD::FMED3: {
3863 for (unsigned I = 0; I < 3; ++I)
3864 Ops[I] = DAG.getNode(ISD::FNEG, SL, VT, N0->getOperand(I), N0->getFlags());
3866 SDValue Res = DAG.getNode(AMDGPUISD::FMED3, SL, VT, Ops, N0->getFlags());
3867 if (Res.getOpcode() != AMDGPUISD::FMED3)
3868 return SDValue(); // Op got folded away.
3869 if (!N0.hasOneUse())
3870 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3873 case ISD::FP_EXTEND:
3876 case ISD::FNEARBYINT: // XXX - Should fround be handled?
3878 case ISD::FCANONICALIZE:
3879 case AMDGPUISD::RCP:
3880 case AMDGPUISD::RCP_LEGACY:
3881 case AMDGPUISD::RCP_IFLAG:
3882 case AMDGPUISD::SIN_HW: {
3883 SDValue CvtSrc = N0.getOperand(0);
3884 if (CvtSrc.getOpcode() == ISD::FNEG) {
3885 // (fneg (fp_extend (fneg x))) -> (fp_extend x)
3886 // (fneg (rcp (fneg x))) -> (rcp x)
3887 return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0));
3890 if (!N0.hasOneUse())
3893 // (fneg (fp_extend x)) -> (fp_extend (fneg x))
3894 // (fneg (rcp x)) -> (rcp (fneg x))
3895 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3896 return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags());
3898 case ISD::FP_ROUND: {
3899 SDValue CvtSrc = N0.getOperand(0);
3901 if (CvtSrc.getOpcode() == ISD::FNEG) {
3902 // (fneg (fp_round (fneg x))) -> (fp_round x)
3903 return DAG.getNode(ISD::FP_ROUND, SL, VT,
3904 CvtSrc.getOperand(0), N0.getOperand(1));
3907 if (!N0.hasOneUse())
3910 // (fneg (fp_round x)) -> (fp_round (fneg x))
3911 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3912 return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1));
3914 case ISD::FP16_TO_FP: {
3915 // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal
3916 // f16, but legalization of f16 fneg ends up pulling it out of the source.
3917 // Put the fneg back as a legal source operation that can be matched later.
3920 SDValue Src = N0.getOperand(0);
3921 EVT SrcVT = Src.getValueType();
3923 // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000)
3924 SDValue IntFNeg = DAG.getNode(ISD::XOR, SL, SrcVT, Src,
3925 DAG.getConstant(0x8000, SL, SrcVT));
3926 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg);
3933 SDValue AMDGPUTargetLowering::performFAbsCombine(SDNode *N,
3934 DAGCombinerInfo &DCI) const {
3935 SelectionDAG &DAG = DCI.DAG;
3936 SDValue N0 = N->getOperand(0);
3938 if (!N0.hasOneUse())
3941 switch (N0.getOpcode()) {
3942 case ISD::FP16_TO_FP: {
3943 assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal");
3945 SDValue Src = N0.getOperand(0);
3946 EVT SrcVT = Src.getValueType();
3948 // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff)
3949 SDValue IntFAbs = DAG.getNode(ISD::AND, SL, SrcVT, Src,
3950 DAG.getConstant(0x7fff, SL, SrcVT));
3951 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs);
3958 SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N,
3959 DAGCombinerInfo &DCI) const {
3960 const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
3964 // XXX - Should this flush denormals?
3965 const APFloat &Val = CFP->getValueAPF();
3966 APFloat One(Val.getSemantics(), "1.0");
3967 return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0));
3970 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
3971 DAGCombinerInfo &DCI) const {
3972 SelectionDAG &DAG = DCI.DAG;
3975 switch(N->getOpcode()) {
3978 case ISD::BITCAST: {
3979 EVT DestVT = N->getValueType(0);
3981 // Push casts through vector builds. This helps avoid emitting a large
3982 // number of copies when materializing floating point vector constants.
3984 // vNt1 bitcast (vNt0 (build_vector t0:x, t0:y)) =>
3985 // vnt1 = build_vector (t1 (bitcast t0:x)), (t1 (bitcast t0:y))
3986 if (DestVT.isVector()) {
3987 SDValue Src = N->getOperand(0);
3988 if (Src.getOpcode() == ISD::BUILD_VECTOR) {
3989 EVT SrcVT = Src.getValueType();
3990 unsigned NElts = DestVT.getVectorNumElements();
3992 if (SrcVT.getVectorNumElements() == NElts) {
3993 EVT DestEltVT = DestVT.getVectorElementType();
3995 SmallVector<SDValue, 8> CastedElts;
3997 for (unsigned I = 0, E = SrcVT.getVectorNumElements(); I != E; ++I) {
3998 SDValue Elt = Src.getOperand(I);
3999 CastedElts.push_back(DAG.getNode(ISD::BITCAST, DL, DestEltVT, Elt));
4002 return DAG.getBuildVector(DestVT, SL, CastedElts);
4007 if (DestVT.getSizeInBits() != 64 && !DestVT.isVector())
4010 // Fold bitcasts of constants.
4012 // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
4013 // TODO: Generalize and move to DAGCombiner
4014 SDValue Src = N->getOperand(0);
4015 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
4016 if (Src.getValueType() == MVT::i64) {
4018 uint64_t CVal = C->getZExtValue();
4019 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
4020 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
4021 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
4022 return DAG.getNode(ISD::BITCAST, SL, DestVT, BV);
4026 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) {
4027 const APInt &Val = C->getValueAPF().bitcastToAPInt();
4029 uint64_t CVal = Val.getZExtValue();
4030 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
4031 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
4032 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
4034 return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec);
4040 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
4043 return performShlCombine(N, DCI);
4046 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
4049 return performSrlCombine(N, DCI);
4052 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
4055 return performSraCombine(N, DCI);
4058 return performTruncateCombine(N, DCI);
4060 return performMulCombine(N, DCI);
4062 return performMulhsCombine(N, DCI);
4064 return performMulhuCombine(N, DCI);
4065 case AMDGPUISD::MUL_I24:
4066 case AMDGPUISD::MUL_U24:
4067 case AMDGPUISD::MULHI_I24:
4068 case AMDGPUISD::MULHI_U24: {
4069 if (SDValue V = simplifyI24(N, DCI))
4073 case AMDGPUISD::MUL_LOHI_I24:
4074 case AMDGPUISD::MUL_LOHI_U24:
4075 return performMulLoHi24Combine(N, DCI);
4077 return performSelectCombine(N, DCI);
4079 return performFNegCombine(N, DCI);
4081 return performFAbsCombine(N, DCI);
4082 case AMDGPUISD::BFE_I32:
4083 case AMDGPUISD::BFE_U32: {
4084 assert(!N->getValueType(0).isVector() &&
4085 "Vector handling of BFE not implemented");
4086 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
4090 uint32_t WidthVal = Width->getZExtValue() & 0x1f;
4092 return DAG.getConstant(0, DL, MVT::i32);
4094 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
4098 SDValue BitsFrom = N->getOperand(0);
4099 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
4101 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
4103 if (OffsetVal == 0) {
4104 // This is already sign / zero extended, so try to fold away extra BFEs.
4105 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
4107 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
4108 if (OpSignBits >= SignBits)
4111 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
4113 // This is a sign_extend_inreg. Replace it to take advantage of existing
4114 // DAG Combines. If not eliminated, we will match back to BFE during
4117 // TODO: The sext_inreg of extended types ends, although we can could
4118 // handle them in a single BFE.
4119 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
4120 DAG.getValueType(SmallVT));
4123 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
4126 if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) {
4128 return constantFoldBFE<int32_t>(DAG,
4129 CVal->getSExtValue(),
4135 return constantFoldBFE<uint32_t>(DAG,
4136 CVal->getZExtValue(),
4142 if ((OffsetVal + WidthVal) >= 32 &&
4143 !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) {
4144 SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
4145 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
4146 BitsFrom, ShiftVal);
4149 if (BitsFrom.hasOneUse()) {
4150 APInt Demanded = APInt::getBitsSet(32,
4152 OffsetVal + WidthVal);
4155 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
4156 !DCI.isBeforeLegalizeOps());
4157 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4158 if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) ||
4159 TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) {
4160 DCI.CommitTargetLoweringOpt(TLO);
4167 return performLoadCombine(N, DCI);
4169 return performStoreCombine(N, DCI);
4170 case AMDGPUISD::RCP:
4171 case AMDGPUISD::RCP_IFLAG:
4172 return performRcpCombine(N, DCI);
4173 case ISD::AssertZext:
4174 case ISD::AssertSext:
4175 return performAssertSZExtCombine(N, DCI);
4180 //===----------------------------------------------------------------------===//
4182 //===----------------------------------------------------------------------===//
4184 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
4185 const TargetRegisterClass *RC,
4186 unsigned Reg, EVT VT,
4188 bool RawReg) const {
4189 MachineFunction &MF = DAG.getMachineFunction();
4190 MachineRegisterInfo &MRI = MF.getRegInfo();
4193 if (!MRI.isLiveIn(Reg)) {
4194 VReg = MRI.createVirtualRegister(RC);
4195 MRI.addLiveIn(Reg, VReg);
4197 VReg = MRI.getLiveInVirtReg(Reg);
4201 return DAG.getRegister(VReg, VT);
4203 return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT);
4206 SDValue AMDGPUTargetLowering::loadStackInputValue(SelectionDAG &DAG,
4209 int64_t Offset) const {
4210 MachineFunction &MF = DAG.getMachineFunction();
4211 MachineFrameInfo &MFI = MF.getFrameInfo();
4213 int FI = MFI.CreateFixedObject(VT.getStoreSize(), Offset, true);
4214 auto SrcPtrInfo = MachinePointerInfo::getStack(MF, Offset);
4215 SDValue Ptr = DAG.getFrameIndex(FI, MVT::i32);
4217 return DAG.getLoad(VT, SL, DAG.getEntryNode(), Ptr, SrcPtrInfo, 4,
4218 MachineMemOperand::MODereferenceable |
4219 MachineMemOperand::MOInvariant);
4222 SDValue AMDGPUTargetLowering::storeStackInputValue(SelectionDAG &DAG,
4226 int64_t Offset) const {
4227 MachineFunction &MF = DAG.getMachineFunction();
4228 MachinePointerInfo DstInfo = MachinePointerInfo::getStack(MF, Offset);
4230 SDValue Ptr = DAG.getConstant(Offset, SL, MVT::i32);
4231 SDValue Store = DAG.getStore(Chain, SL, ArgVal, Ptr, DstInfo, 4,
4232 MachineMemOperand::MODereferenceable);
4236 SDValue AMDGPUTargetLowering::loadInputValue(SelectionDAG &DAG,
4237 const TargetRegisterClass *RC,
4238 EVT VT, const SDLoc &SL,
4239 const ArgDescriptor &Arg) const {
4240 assert(Arg && "Attempting to load missing argument");
4242 SDValue V = Arg.isRegister() ?
4243 CreateLiveInRegister(DAG, RC, Arg.getRegister(), VT, SL) :
4244 loadStackInputValue(DAG, VT, SL, Arg.getStackOffset());
4246 if (!Arg.isMasked())
4249 unsigned Mask = Arg.getMask();
4250 unsigned Shift = countTrailingZeros<unsigned>(Mask);
4251 V = DAG.getNode(ISD::SRL, SL, VT, V,
4252 DAG.getShiftAmountConstant(Shift, VT, SL));
4253 return DAG.getNode(ISD::AND, SL, VT, V,
4254 DAG.getConstant(Mask >> Shift, SL, VT));
4257 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
4258 const MachineFunction &MF, const ImplicitParameter Param) const {
4259 const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
4260 const AMDGPUSubtarget &ST =
4261 AMDGPUSubtarget::get(getTargetMachine(), MF.getFunction());
4262 unsigned ExplicitArgOffset = ST.getExplicitKernelArgOffset(MF.getFunction());
4263 unsigned Alignment = ST.getAlignmentForImplicitArgPtr();
4264 uint64_t ArgOffset = alignTo(MFI->getExplicitKernArgSize(), Alignment) +
4270 return ArgOffset + 4;
4272 llvm_unreachable("unexpected implicit parameter type");
4275 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
4277 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
4278 switch ((AMDGPUISD::NodeType)Opcode) {
4279 case AMDGPUISD::FIRST_NUMBER: break;
4281 NODE_NAME_CASE(UMUL);
4282 NODE_NAME_CASE(BRANCH_COND);
4286 NODE_NAME_CASE(ELSE)
4287 NODE_NAME_CASE(LOOP)
4288 NODE_NAME_CASE(CALL)
4289 NODE_NAME_CASE(TC_RETURN)
4290 NODE_NAME_CASE(TRAP)
4291 NODE_NAME_CASE(RET_FLAG)
4292 NODE_NAME_CASE(RETURN_TO_EPILOG)
4293 NODE_NAME_CASE(ENDPGM)
4294 NODE_NAME_CASE(DWORDADDR)
4295 NODE_NAME_CASE(FRACT)
4296 NODE_NAME_CASE(SETCC)
4297 NODE_NAME_CASE(SETREG)
4298 NODE_NAME_CASE(FMA_W_CHAIN)
4299 NODE_NAME_CASE(FMUL_W_CHAIN)
4300 NODE_NAME_CASE(CLAMP)
4301 NODE_NAME_CASE(COS_HW)
4302 NODE_NAME_CASE(SIN_HW)
4303 NODE_NAME_CASE(FMAX_LEGACY)
4304 NODE_NAME_CASE(FMIN_LEGACY)
4305 NODE_NAME_CASE(FMAX3)
4306 NODE_NAME_CASE(SMAX3)
4307 NODE_NAME_CASE(UMAX3)
4308 NODE_NAME_CASE(FMIN3)
4309 NODE_NAME_CASE(SMIN3)
4310 NODE_NAME_CASE(UMIN3)
4311 NODE_NAME_CASE(FMED3)
4312 NODE_NAME_CASE(SMED3)
4313 NODE_NAME_CASE(UMED3)
4314 NODE_NAME_CASE(FDOT2)
4315 NODE_NAME_CASE(URECIP)
4316 NODE_NAME_CASE(DIV_SCALE)
4317 NODE_NAME_CASE(DIV_FMAS)
4318 NODE_NAME_CASE(DIV_FIXUP)
4319 NODE_NAME_CASE(FMAD_FTZ)
4320 NODE_NAME_CASE(TRIG_PREOP)
4323 NODE_NAME_CASE(RCP_LEGACY)
4324 NODE_NAME_CASE(RSQ_LEGACY)
4325 NODE_NAME_CASE(RCP_IFLAG)
4326 NODE_NAME_CASE(FMUL_LEGACY)
4327 NODE_NAME_CASE(RSQ_CLAMP)
4328 NODE_NAME_CASE(LDEXP)
4329 NODE_NAME_CASE(FP_CLASS)
4330 NODE_NAME_CASE(DOT4)
4331 NODE_NAME_CASE(CARRY)
4332 NODE_NAME_CASE(BORROW)
4333 NODE_NAME_CASE(BFE_U32)
4334 NODE_NAME_CASE(BFE_I32)
4337 NODE_NAME_CASE(FFBH_U32)
4338 NODE_NAME_CASE(FFBH_I32)
4339 NODE_NAME_CASE(FFBL_B32)
4340 NODE_NAME_CASE(MUL_U24)
4341 NODE_NAME_CASE(MUL_I24)
4342 NODE_NAME_CASE(MULHI_U24)
4343 NODE_NAME_CASE(MULHI_I24)
4344 NODE_NAME_CASE(MUL_LOHI_U24)
4345 NODE_NAME_CASE(MUL_LOHI_I24)
4346 NODE_NAME_CASE(MAD_U24)
4347 NODE_NAME_CASE(MAD_I24)
4348 NODE_NAME_CASE(MAD_I64_I32)
4349 NODE_NAME_CASE(MAD_U64_U32)
4350 NODE_NAME_CASE(PERM)
4351 NODE_NAME_CASE(TEXTURE_FETCH)
4352 NODE_NAME_CASE(EXPORT)
4353 NODE_NAME_CASE(EXPORT_DONE)
4354 NODE_NAME_CASE(R600_EXPORT)
4355 NODE_NAME_CASE(CONST_ADDRESS)
4356 NODE_NAME_CASE(REGISTER_LOAD)
4357 NODE_NAME_CASE(REGISTER_STORE)
4358 NODE_NAME_CASE(SAMPLE)
4359 NODE_NAME_CASE(SAMPLEB)
4360 NODE_NAME_CASE(SAMPLED)
4361 NODE_NAME_CASE(SAMPLEL)
4362 NODE_NAME_CASE(CVT_F32_UBYTE0)
4363 NODE_NAME_CASE(CVT_F32_UBYTE1)
4364 NODE_NAME_CASE(CVT_F32_UBYTE2)
4365 NODE_NAME_CASE(CVT_F32_UBYTE3)
4366 NODE_NAME_CASE(CVT_PKRTZ_F16_F32)
4367 NODE_NAME_CASE(CVT_PKNORM_I16_F32)
4368 NODE_NAME_CASE(CVT_PKNORM_U16_F32)
4369 NODE_NAME_CASE(CVT_PK_I16_I32)
4370 NODE_NAME_CASE(CVT_PK_U16_U32)
4371 NODE_NAME_CASE(FP_TO_FP16)
4372 NODE_NAME_CASE(FP16_ZEXT)
4373 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
4374 NODE_NAME_CASE(CONST_DATA_PTR)
4375 NODE_NAME_CASE(PC_ADD_REL_OFFSET)
4377 NODE_NAME_CASE(KILL)
4378 NODE_NAME_CASE(DUMMY_CHAIN)
4379 case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
4380 NODE_NAME_CASE(INIT_EXEC)
4381 NODE_NAME_CASE(INIT_EXEC_FROM_INPUT)
4382 NODE_NAME_CASE(SENDMSG)
4383 NODE_NAME_CASE(SENDMSGHALT)
4384 NODE_NAME_CASE(INTERP_MOV)
4385 NODE_NAME_CASE(INTERP_P1)
4386 NODE_NAME_CASE(INTERP_P2)
4387 NODE_NAME_CASE(INTERP_P1LL_F16)
4388 NODE_NAME_CASE(INTERP_P1LV_F16)
4389 NODE_NAME_CASE(INTERP_P2_F16)
4390 NODE_NAME_CASE(LOAD_D16_HI)
4391 NODE_NAME_CASE(LOAD_D16_LO)
4392 NODE_NAME_CASE(LOAD_D16_HI_I8)
4393 NODE_NAME_CASE(LOAD_D16_HI_U8)
4394 NODE_NAME_CASE(LOAD_D16_LO_I8)
4395 NODE_NAME_CASE(LOAD_D16_LO_U8)
4396 NODE_NAME_CASE(STORE_MSKOR)
4397 NODE_NAME_CASE(LOAD_CONSTANT)
4398 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
4399 NODE_NAME_CASE(TBUFFER_STORE_FORMAT_D16)
4400 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT)
4401 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT_D16)
4402 NODE_NAME_CASE(DS_ORDERED_COUNT)
4403 NODE_NAME_CASE(ATOMIC_CMP_SWAP)
4404 NODE_NAME_CASE(ATOMIC_INC)
4405 NODE_NAME_CASE(ATOMIC_DEC)
4406 NODE_NAME_CASE(ATOMIC_LOAD_FMIN)
4407 NODE_NAME_CASE(ATOMIC_LOAD_FMAX)
4408 NODE_NAME_CASE(BUFFER_LOAD)
4409 NODE_NAME_CASE(BUFFER_LOAD_UBYTE)
4410 NODE_NAME_CASE(BUFFER_LOAD_USHORT)
4411 NODE_NAME_CASE(BUFFER_LOAD_BYTE)
4412 NODE_NAME_CASE(BUFFER_LOAD_SHORT)
4413 NODE_NAME_CASE(BUFFER_LOAD_FORMAT)
4414 NODE_NAME_CASE(BUFFER_LOAD_FORMAT_D16)
4415 NODE_NAME_CASE(SBUFFER_LOAD)
4416 NODE_NAME_CASE(BUFFER_STORE)
4417 NODE_NAME_CASE(BUFFER_STORE_BYTE)
4418 NODE_NAME_CASE(BUFFER_STORE_SHORT)
4419 NODE_NAME_CASE(BUFFER_STORE_FORMAT)
4420 NODE_NAME_CASE(BUFFER_STORE_FORMAT_D16)
4421 NODE_NAME_CASE(BUFFER_ATOMIC_SWAP)
4422 NODE_NAME_CASE(BUFFER_ATOMIC_ADD)
4423 NODE_NAME_CASE(BUFFER_ATOMIC_SUB)
4424 NODE_NAME_CASE(BUFFER_ATOMIC_SMIN)
4425 NODE_NAME_CASE(BUFFER_ATOMIC_UMIN)
4426 NODE_NAME_CASE(BUFFER_ATOMIC_SMAX)
4427 NODE_NAME_CASE(BUFFER_ATOMIC_UMAX)
4428 NODE_NAME_CASE(BUFFER_ATOMIC_AND)
4429 NODE_NAME_CASE(BUFFER_ATOMIC_OR)
4430 NODE_NAME_CASE(BUFFER_ATOMIC_XOR)
4431 NODE_NAME_CASE(BUFFER_ATOMIC_CMPSWAP)
4432 NODE_NAME_CASE(BUFFER_ATOMIC_FADD)
4433 NODE_NAME_CASE(BUFFER_ATOMIC_PK_FADD)
4434 NODE_NAME_CASE(ATOMIC_FADD)
4435 NODE_NAME_CASE(ATOMIC_PK_FADD)
4437 case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
4442 SDValue AMDGPUTargetLowering::getSqrtEstimate(SDValue Operand,
4443 SelectionDAG &DAG, int Enabled,
4444 int &RefinementSteps,
4445 bool &UseOneConstNR,
4446 bool Reciprocal) const {
4447 EVT VT = Operand.getValueType();
4449 if (VT == MVT::f32) {
4450 RefinementSteps = 0;
4451 return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
4454 // TODO: There is also f64 rsq instruction, but the documentation is less
4455 // clear on its precision.
4460 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
4461 SelectionDAG &DAG, int Enabled,
4462 int &RefinementSteps) const {
4463 EVT VT = Operand.getValueType();
4465 if (VT == MVT::f32) {
4466 // Reciprocal, < 1 ulp error.
4468 // This reciprocal approximation converges to < 0.5 ulp error with one
4469 // newton rhapson performed with two fused multiple adds (FMAs).
4471 RefinementSteps = 0;
4472 return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
4475 // TODO: There is also f64 rcp instruction, but the documentation is less
4476 // clear on its precision.
4481 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
4482 const SDValue Op, KnownBits &Known,
4483 const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
4485 Known.resetAll(); // Don't know anything.
4487 unsigned Opc = Op.getOpcode();
4492 case AMDGPUISD::CARRY:
4493 case AMDGPUISD::BORROW: {
4494 Known.Zero = APInt::getHighBitsSet(32, 31);
4498 case AMDGPUISD::BFE_I32:
4499 case AMDGPUISD::BFE_U32: {
4500 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4504 uint32_t Width = CWidth->getZExtValue() & 0x1f;
4506 if (Opc == AMDGPUISD::BFE_U32)
4507 Known.Zero = APInt::getHighBitsSet(32, 32 - Width);
4511 case AMDGPUISD::FP_TO_FP16:
4512 case AMDGPUISD::FP16_ZEXT: {
4513 unsigned BitWidth = Known.getBitWidth();
4515 // High bits are zero.
4516 Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
4519 case AMDGPUISD::MUL_U24:
4520 case AMDGPUISD::MUL_I24: {
4521 KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
4522 KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
4523 unsigned TrailZ = LHSKnown.countMinTrailingZeros() +
4524 RHSKnown.countMinTrailingZeros();
4525 Known.Zero.setLowBits(std::min(TrailZ, 32u));
4527 // Truncate to 24 bits.
4528 LHSKnown = LHSKnown.trunc(24);
4529 RHSKnown = RHSKnown.trunc(24);
4531 bool Negative = false;
4532 if (Opc == AMDGPUISD::MUL_I24) {
4533 unsigned LHSValBits = 24 - LHSKnown.countMinSignBits();
4534 unsigned RHSValBits = 24 - RHSKnown.countMinSignBits();
4535 unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u);
4536 if (MaxValBits >= 32)
4538 bool LHSNegative = LHSKnown.isNegative();
4539 bool LHSPositive = LHSKnown.isNonNegative();
4540 bool RHSNegative = RHSKnown.isNegative();
4541 bool RHSPositive = RHSKnown.isNonNegative();
4542 if ((!LHSNegative && !LHSPositive) || (!RHSNegative && !RHSPositive))
4544 Negative = (LHSNegative && RHSPositive) || (LHSPositive && RHSNegative);
4546 Known.One.setHighBits(32 - MaxValBits);
4548 Known.Zero.setHighBits(32 - MaxValBits);
4550 unsigned LHSValBits = 24 - LHSKnown.countMinLeadingZeros();
4551 unsigned RHSValBits = 24 - RHSKnown.countMinLeadingZeros();
4552 unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u);
4553 if (MaxValBits >= 32)
4555 Known.Zero.setHighBits(32 - MaxValBits);
4559 case AMDGPUISD::PERM: {
4560 ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4564 KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
4565 KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
4566 unsigned Sel = CMask->getZExtValue();
4568 for (unsigned I = 0; I < 32; I += 8) {
4569 unsigned SelBits = Sel & 0xff;
4572 Known.One |= ((RHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
4573 Known.Zero |= ((RHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
4574 } else if (SelBits < 7) {
4575 SelBits = (SelBits & 3) * 8;
4576 Known.One |= ((LHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
4577 Known.Zero |= ((LHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
4578 } else if (SelBits == 0x0c) {
4579 Known.Zero |= 0xff << I;
4580 } else if (SelBits > 0x0c) {
4581 Known.One |= 0xff << I;
4587 case AMDGPUISD::BUFFER_LOAD_UBYTE: {
4588 Known.Zero.setHighBits(24);
4591 case AMDGPUISD::BUFFER_LOAD_USHORT: {
4592 Known.Zero.setHighBits(16);
4595 case AMDGPUISD::LDS: {
4596 auto GA = cast<GlobalAddressSDNode>(Op.getOperand(0).getNode());
4597 unsigned Align = GA->getGlobal()->getAlignment();
4599 Known.Zero.setHighBits(16);
4601 Known.Zero.setLowBits(Log2_32(Align));
4604 case ISD::INTRINSIC_WO_CHAIN: {
4605 unsigned IID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4607 case Intrinsic::amdgcn_mbcnt_lo:
4608 case Intrinsic::amdgcn_mbcnt_hi: {
4609 const GCNSubtarget &ST =
4610 DAG.getMachineFunction().getSubtarget<GCNSubtarget>();
4611 // These return at most the wavefront size - 1.
4612 unsigned Size = Op.getValueType().getSizeInBits();
4613 Known.Zero.setHighBits(Size - ST.getWavefrontSizeLog2());
4623 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
4624 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
4625 unsigned Depth) const {
4626 switch (Op.getOpcode()) {
4627 case AMDGPUISD::BFE_I32: {
4628 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4632 unsigned SignBits = 32 - Width->getZExtValue() + 1;
4633 if (!isNullConstant(Op.getOperand(1)))
4636 // TODO: Could probably figure something out with non-0 offsets.
4637 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
4638 return std::max(SignBits, Op0SignBits);
4641 case AMDGPUISD::BFE_U32: {
4642 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4643 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
4646 case AMDGPUISD::CARRY:
4647 case AMDGPUISD::BORROW:
4649 case AMDGPUISD::BUFFER_LOAD_BYTE:
4651 case AMDGPUISD::BUFFER_LOAD_SHORT:
4653 case AMDGPUISD::BUFFER_LOAD_UBYTE:
4655 case AMDGPUISD::BUFFER_LOAD_USHORT:
4657 case AMDGPUISD::FP_TO_FP16:
4658 case AMDGPUISD::FP16_ZEXT:
4665 bool AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
4666 const SelectionDAG &DAG,
4668 unsigned Depth) const {
4669 unsigned Opcode = Op.getOpcode();
4671 case AMDGPUISD::FMIN_LEGACY:
4672 case AMDGPUISD::FMAX_LEGACY: {
4676 // TODO: Can check no nans on one of the operands for each one, but which
4680 case AMDGPUISD::FMUL_LEGACY:
4681 case AMDGPUISD::CVT_PKRTZ_F16_F32: {
4684 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4685 DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4687 case AMDGPUISD::FMED3:
4688 case AMDGPUISD::FMIN3:
4689 case AMDGPUISD::FMAX3:
4690 case AMDGPUISD::FMAD_FTZ: {
4693 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
4694 DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4695 DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4697 case AMDGPUISD::CVT_F32_UBYTE0:
4698 case AMDGPUISD::CVT_F32_UBYTE1:
4699 case AMDGPUISD::CVT_F32_UBYTE2:
4700 case AMDGPUISD::CVT_F32_UBYTE3:
4703 case AMDGPUISD::RCP:
4704 case AMDGPUISD::RSQ:
4705 case AMDGPUISD::RCP_LEGACY:
4706 case AMDGPUISD::RSQ_LEGACY:
4707 case AMDGPUISD::RSQ_CLAMP: {
4711 // TODO: Need is known positive check.
4714 case AMDGPUISD::LDEXP:
4715 case AMDGPUISD::FRACT: {
4718 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
4720 case AMDGPUISD::DIV_SCALE:
4721 case AMDGPUISD::DIV_FMAS:
4722 case AMDGPUISD::DIV_FIXUP:
4723 case AMDGPUISD::TRIG_PREOP:
4724 // TODO: Refine on operands.
4726 case AMDGPUISD::SIN_HW:
4727 case AMDGPUISD::COS_HW: {
4728 // TODO: Need check for infinity
4731 case ISD::INTRINSIC_WO_CHAIN: {
4732 unsigned IntrinsicID
4733 = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4734 // TODO: Handle more intrinsics
4735 switch (IntrinsicID) {
4736 case Intrinsic::amdgcn_cubeid:
4739 case Intrinsic::amdgcn_frexp_mant: {
4742 return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
4744 case Intrinsic::amdgcn_cvt_pkrtz: {
4747 return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
4748 DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
4750 case Intrinsic::amdgcn_fdot2:
4751 // TODO: Refine on operand
4762 TargetLowering::AtomicExpansionKind
4763 AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
4764 switch (RMW->getOperation()) {
4765 case AtomicRMWInst::Nand:
4766 case AtomicRMWInst::FAdd:
4767 case AtomicRMWInst::FSub:
4768 return AtomicExpansionKind::CmpXChg;
4770 return AtomicExpansionKind::None;