1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This is the parent TargetLowering class for hardware code gen
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUISelLowering.h"
18 #include "AMDGPUCallLowering.h"
19 #include "AMDGPUFrameLowering.h"
20 #include "AMDGPUIntrinsicInfo.h"
21 #include "AMDGPURegisterInfo.h"
22 #include "AMDGPUSubtarget.h"
23 #include "R600MachineFunctionInfo.h"
24 #include "SIInstrInfo.h"
25 #include "SIMachineFunctionInfo.h"
26 #include "llvm/CodeGen/CallingConvLower.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/SelectionDAG.h"
30 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DiagnosticInfo.h"
33 #include "llvm/Support/KnownBits.h"
36 static bool allocateKernArg(unsigned ValNo, MVT ValVT, MVT LocVT,
37 CCValAssign::LocInfo LocInfo,
38 ISD::ArgFlagsTy ArgFlags, CCState &State) {
39 MachineFunction &MF = State.getMachineFunction();
40 AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
42 uint64_t Offset = MFI->allocateKernArg(LocVT.getStoreSize(),
43 ArgFlags.getOrigAlign());
44 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo));
48 static bool allocateCCRegs(unsigned ValNo, MVT ValVT, MVT LocVT,
49 CCValAssign::LocInfo LocInfo,
50 ISD::ArgFlagsTy ArgFlags, CCState &State,
51 const TargetRegisterClass *RC,
53 ArrayRef<MCPhysReg> RegList = makeArrayRef(RC->begin(), NumRegs);
54 unsigned RegResult = State.AllocateReg(RegList);
55 if (RegResult == AMDGPU::NoRegister)
58 State.addLoc(CCValAssign::getReg(ValNo, ValVT, RegResult, LocVT, LocInfo));
62 static bool allocateSGPRTuple(unsigned ValNo, MVT ValVT, MVT LocVT,
63 CCValAssign::LocInfo LocInfo,
64 ISD::ArgFlagsTy ArgFlags, CCState &State) {
65 switch (LocVT.SimpleTy) {
71 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
72 &AMDGPU::SGPR_64RegClass, 20);
79 // Allocate up to VGPR31.
81 // TODO: Since there are no VGPR alignent requirements would it be better to
82 // split into individual scalar registers?
83 static bool allocateVGPRTuple(unsigned ValNo, MVT ValVT, MVT LocVT,
84 CCValAssign::LocInfo LocInfo,
85 ISD::ArgFlagsTy ArgFlags, CCState &State) {
86 switch (LocVT.SimpleTy) {
91 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
92 &AMDGPU::VReg_64RegClass, 31);
98 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
99 &AMDGPU::VReg_128RegClass, 29);
103 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
104 &AMDGPU::VReg_256RegClass, 25);
109 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State,
110 &AMDGPU::VReg_512RegClass, 17);
118 #include "AMDGPUGenCallingConv.inc"
120 // Find a larger type to do a load / store of a vector with.
121 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
122 unsigned StoreSize = VT.getStoreSizeInBits();
124 return EVT::getIntegerVT(Ctx, StoreSize);
126 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
127 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
130 bool AMDGPUTargetLowering::isOrEquivalentToAdd(SelectionDAG &DAG, SDValue Op)
132 assert(Op.getOpcode() == ISD::OR);
134 SDValue N0 = Op->getOperand(0);
135 SDValue N1 = Op->getOperand(1);
136 EVT VT = N0.getValueType();
138 if (VT.isInteger() && !VT.isVector()) {
139 KnownBits LHSKnown, RHSKnown;
140 DAG.computeKnownBits(N0, LHSKnown);
142 if (LHSKnown.Zero.getBoolValue()) {
143 DAG.computeKnownBits(N1, RHSKnown);
145 if (!(~RHSKnown.Zero & ~LHSKnown.Zero))
153 AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
154 const AMDGPUSubtarget &STI)
155 : TargetLowering(TM), Subtarget(&STI) {
156 AMDGPUASI = AMDGPU::getAMDGPUAS(TM);
157 // Lower floating point store/load to integer store/load to reduce the number
158 // of patterns in tablegen.
159 setOperationAction(ISD::LOAD, MVT::f32, Promote);
160 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
162 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
163 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
165 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
166 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
168 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
169 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
171 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
172 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
174 setOperationAction(ISD::LOAD, MVT::i64, Promote);
175 AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
177 setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
178 AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32);
180 setOperationAction(ISD::LOAD, MVT::f64, Promote);
181 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32);
183 setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
184 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32);
186 // There are no 64-bit extloads. These should be done as a 32-bit extload and
187 // an extension to 64-bit.
188 for (MVT VT : MVT::integer_valuetypes()) {
189 setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand);
190 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand);
191 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand);
194 for (MVT VT : MVT::integer_valuetypes()) {
198 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
199 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal);
200 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal);
201 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
203 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
204 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal);
205 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal);
206 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
208 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
209 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal);
210 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal);
211 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
214 for (MVT VT : MVT::integer_vector_valuetypes()) {
215 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand);
216 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand);
217 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand);
218 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand);
219 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand);
220 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand);
221 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand);
222 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand);
223 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand);
224 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand);
225 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand);
226 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand);
229 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
230 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
231 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
232 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
234 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
235 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
236 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
237 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand);
239 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
240 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
241 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
242 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
244 setOperationAction(ISD::STORE, MVT::f32, Promote);
245 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
247 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
248 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
250 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
251 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
253 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
254 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
256 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
257 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
259 setOperationAction(ISD::STORE, MVT::i64, Promote);
260 AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
262 setOperationAction(ISD::STORE, MVT::v2i64, Promote);
263 AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32);
265 setOperationAction(ISD::STORE, MVT::f64, Promote);
266 AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32);
268 setOperationAction(ISD::STORE, MVT::v2f64, Promote);
269 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32);
271 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
272 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
273 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
274 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
276 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
277 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand);
278 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand);
279 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand);
281 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
282 setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
283 setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
284 setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
286 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
287 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
289 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
290 setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand);
292 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand);
293 setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand);
295 setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand);
296 setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand);
299 setOperationAction(ISD::Constant, MVT::i32, Legal);
300 setOperationAction(ISD::Constant, MVT::i64, Legal);
301 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
302 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
304 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
305 setOperationAction(ISD::BRIND, MVT::Other, Expand);
307 // This is totally unsupported, just custom lower to produce an error.
308 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
310 // Library functions. These default to Expand, but we have instructions
312 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
313 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
314 setOperationAction(ISD::FPOW, MVT::f32, Legal);
315 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
316 setOperationAction(ISD::FABS, MVT::f32, Legal);
317 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
318 setOperationAction(ISD::FRINT, MVT::f32, Legal);
319 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
320 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
321 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
323 setOperationAction(ISD::FROUND, MVT::f32, Custom);
324 setOperationAction(ISD::FROUND, MVT::f64, Custom);
326 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
327 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
329 setOperationAction(ISD::FREM, MVT::f32, Custom);
330 setOperationAction(ISD::FREM, MVT::f64, Custom);
332 // v_mad_f32 does not support denormals according to some sources.
333 if (!Subtarget->hasFP32Denormals())
334 setOperationAction(ISD::FMAD, MVT::f32, Legal);
336 // Expand to fneg + fadd.
337 setOperationAction(ISD::FSUB, MVT::f64, Expand);
339 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
340 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
341 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
342 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
343 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
344 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
345 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
346 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
347 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
348 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
350 if (Subtarget->getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
351 setOperationAction(ISD::FCEIL, MVT::f64, Custom);
352 setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
353 setOperationAction(ISD::FRINT, MVT::f64, Custom);
354 setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
357 if (!Subtarget->hasBFI()) {
358 // fcopysign can be done in a single instruction with BFI.
359 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
360 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
363 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
364 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Custom);
365 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Custom);
367 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
368 for (MVT VT : ScalarIntVTs) {
369 // These should use [SU]DIVREM, so set them to expand
370 setOperationAction(ISD::SDIV, VT, Expand);
371 setOperationAction(ISD::UDIV, VT, Expand);
372 setOperationAction(ISD::SREM, VT, Expand);
373 setOperationAction(ISD::UREM, VT, Expand);
375 // GPU does not have divrem function for signed or unsigned.
376 setOperationAction(ISD::SDIVREM, VT, Custom);
377 setOperationAction(ISD::UDIVREM, VT, Custom);
379 // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
380 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
381 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
383 setOperationAction(ISD::BSWAP, VT, Expand);
384 setOperationAction(ISD::CTTZ, VT, Expand);
385 setOperationAction(ISD::CTLZ, VT, Expand);
388 if (!Subtarget->hasBCNT(32))
389 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
391 if (!Subtarget->hasBCNT(64))
392 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
394 // The hardware supports 32-bit ROTR, but not ROTL.
395 setOperationAction(ISD::ROTL, MVT::i32, Expand);
396 setOperationAction(ISD::ROTL, MVT::i64, Expand);
397 setOperationAction(ISD::ROTR, MVT::i64, Expand);
399 setOperationAction(ISD::MUL, MVT::i64, Expand);
400 setOperationAction(ISD::MULHU, MVT::i64, Expand);
401 setOperationAction(ISD::MULHS, MVT::i64, Expand);
402 setOperationAction(ISD::UDIV, MVT::i32, Expand);
403 setOperationAction(ISD::UREM, MVT::i32, Expand);
404 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
405 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
406 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
407 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
408 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
410 setOperationAction(ISD::SMIN, MVT::i32, Legal);
411 setOperationAction(ISD::UMIN, MVT::i32, Legal);
412 setOperationAction(ISD::SMAX, MVT::i32, Legal);
413 setOperationAction(ISD::UMAX, MVT::i32, Legal);
415 if (Subtarget->hasFFBH())
416 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
418 if (Subtarget->hasFFBL())
419 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Legal);
421 setOperationAction(ISD::CTLZ, MVT::i64, Custom);
422 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
424 // We only really have 32-bit BFE instructions (and 16-bit on VI).
426 // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
427 // effort to match them now. We want this to be false for i64 cases when the
428 // extraction isn't restricted to the upper or lower half. Ideally we would
429 // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
430 // span the midpoint are probably relatively rare, so don't worry about them
432 if (Subtarget->hasBFE())
433 setHasExtractBitsInsn(true);
435 static const MVT::SimpleValueType VectorIntTypes[] = {
436 MVT::v2i32, MVT::v4i32
439 for (MVT VT : VectorIntTypes) {
440 // Expand the following operations for the current type by default.
441 setOperationAction(ISD::ADD, VT, Expand);
442 setOperationAction(ISD::AND, VT, Expand);
443 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
444 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
445 setOperationAction(ISD::MUL, VT, Expand);
446 setOperationAction(ISD::MULHU, VT, Expand);
447 setOperationAction(ISD::MULHS, VT, Expand);
448 setOperationAction(ISD::OR, VT, Expand);
449 setOperationAction(ISD::SHL, VT, Expand);
450 setOperationAction(ISD::SRA, VT, Expand);
451 setOperationAction(ISD::SRL, VT, Expand);
452 setOperationAction(ISD::ROTL, VT, Expand);
453 setOperationAction(ISD::ROTR, VT, Expand);
454 setOperationAction(ISD::SUB, VT, Expand);
455 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
456 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
457 setOperationAction(ISD::SDIV, VT, Expand);
458 setOperationAction(ISD::UDIV, VT, Expand);
459 setOperationAction(ISD::SREM, VT, Expand);
460 setOperationAction(ISD::UREM, VT, Expand);
461 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
462 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
463 setOperationAction(ISD::SDIVREM, VT, Custom);
464 setOperationAction(ISD::UDIVREM, VT, Expand);
465 setOperationAction(ISD::ADDC, VT, Expand);
466 setOperationAction(ISD::SUBC, VT, Expand);
467 setOperationAction(ISD::ADDE, VT, Expand);
468 setOperationAction(ISD::SUBE, VT, Expand);
469 setOperationAction(ISD::SELECT, VT, Expand);
470 setOperationAction(ISD::VSELECT, VT, Expand);
471 setOperationAction(ISD::SELECT_CC, VT, Expand);
472 setOperationAction(ISD::XOR, VT, Expand);
473 setOperationAction(ISD::BSWAP, VT, Expand);
474 setOperationAction(ISD::CTPOP, VT, Expand);
475 setOperationAction(ISD::CTTZ, VT, Expand);
476 setOperationAction(ISD::CTLZ, VT, Expand);
477 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
480 static const MVT::SimpleValueType FloatVectorTypes[] = {
481 MVT::v2f32, MVT::v4f32
484 for (MVT VT : FloatVectorTypes) {
485 setOperationAction(ISD::FABS, VT, Expand);
486 setOperationAction(ISD::FMINNUM, VT, Expand);
487 setOperationAction(ISD::FMAXNUM, VT, Expand);
488 setOperationAction(ISD::FADD, VT, Expand);
489 setOperationAction(ISD::FCEIL, VT, Expand);
490 setOperationAction(ISD::FCOS, VT, Expand);
491 setOperationAction(ISD::FDIV, VT, Expand);
492 setOperationAction(ISD::FEXP2, VT, Expand);
493 setOperationAction(ISD::FLOG2, VT, Expand);
494 setOperationAction(ISD::FREM, VT, Expand);
495 setOperationAction(ISD::FPOW, VT, Expand);
496 setOperationAction(ISD::FFLOOR, VT, Expand);
497 setOperationAction(ISD::FTRUNC, VT, Expand);
498 setOperationAction(ISD::FMUL, VT, Expand);
499 setOperationAction(ISD::FMA, VT, Expand);
500 setOperationAction(ISD::FRINT, VT, Expand);
501 setOperationAction(ISD::FNEARBYINT, VT, Expand);
502 setOperationAction(ISD::FSQRT, VT, Expand);
503 setOperationAction(ISD::FSIN, VT, Expand);
504 setOperationAction(ISD::FSUB, VT, Expand);
505 setOperationAction(ISD::FNEG, VT, Expand);
506 setOperationAction(ISD::VSELECT, VT, Expand);
507 setOperationAction(ISD::SELECT_CC, VT, Expand);
508 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
509 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
512 // This causes using an unrolled select operation rather than expansion with
513 // bit operations. This is in general better, but the alternative using BFI
514 // instructions may be better if the select sources are SGPRs.
515 setOperationAction(ISD::SELECT, MVT::v2f32, Promote);
516 AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32);
518 setOperationAction(ISD::SELECT, MVT::v4f32, Promote);
519 AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32);
521 // There are no libcalls of any kind.
522 for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I)
523 setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr);
525 setBooleanContents(ZeroOrNegativeOneBooleanContent);
526 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
528 setSchedulingPreference(Sched::RegPressure);
529 setJumpIsExpensive(true);
531 // FIXME: This is only partially true. If we have to do vector compares, any
532 // SGPR pair can be a condition register. If we have a uniform condition, we
533 // are better off doing SALU operations, where there is only one SCC. For now,
534 // we don't have a way of knowing during instruction selection if a condition
535 // will be uniform and we always use vector compares. Assume we are using
536 // vector compares until that is fixed.
537 setHasMultipleConditionRegisters(true);
539 // SI at least has hardware support for floating point exceptions, but no way
540 // of using or handling them is implemented. They are also optional in OpenCL
542 setHasFloatingPointExceptions(Subtarget->hasFPExceptions());
544 PredictableSelectIsExpensive = false;
546 // We want to find all load dependencies for long chains of stores to enable
547 // merging into very wide vectors. The problem is with vectors with > 4
548 // elements. MergeConsecutiveStores will attempt to merge these because x8/x16
549 // vectors are a legal type, even though we have to split the loads
550 // usually. When we can more precisely specify load legality per address
551 // space, we should be able to make FindBetterChain/MergeConsecutiveStores
552 // smarter so that they can figure out what to do in 2 iterations without all
553 // N > 4 stores on the same chain.
554 GatherAllAliasesMaxDepth = 16;
556 // memcpy/memmove/memset are expanded in the IR, so we shouldn't need to worry
557 // about these during lowering.
558 MaxStoresPerMemcpy = 0xffffffff;
559 MaxStoresPerMemmove = 0xffffffff;
560 MaxStoresPerMemset = 0xffffffff;
562 setTargetDAGCombine(ISD::BITCAST);
563 setTargetDAGCombine(ISD::SHL);
564 setTargetDAGCombine(ISD::SRA);
565 setTargetDAGCombine(ISD::SRL);
566 setTargetDAGCombine(ISD::MUL);
567 setTargetDAGCombine(ISD::MULHU);
568 setTargetDAGCombine(ISD::MULHS);
569 setTargetDAGCombine(ISD::SELECT);
570 setTargetDAGCombine(ISD::SELECT_CC);
571 setTargetDAGCombine(ISD::STORE);
572 setTargetDAGCombine(ISD::FADD);
573 setTargetDAGCombine(ISD::FSUB);
574 setTargetDAGCombine(ISD::FNEG);
575 setTargetDAGCombine(ISD::FABS);
576 setTargetDAGCombine(ISD::AssertZext);
577 setTargetDAGCombine(ISD::AssertSext);
580 //===----------------------------------------------------------------------===//
581 // Target Information
582 //===----------------------------------------------------------------------===//
585 static bool fnegFoldsIntoOp(unsigned Opc) {
597 case ISD::FNEARBYINT:
599 case AMDGPUISD::RCP_LEGACY:
600 case AMDGPUISD::SIN_HW:
601 case AMDGPUISD::FMUL_LEGACY:
602 case AMDGPUISD::FMIN_LEGACY:
603 case AMDGPUISD::FMAX_LEGACY:
610 /// \p returns true if the operation will definitely need to use a 64-bit
611 /// encoding, and thus will use a VOP3 encoding regardless of the source
614 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) {
615 return N->getNumOperands() > 2 || VT == MVT::f64;
618 // Most FP instructions support source modifiers, but this could be refined
621 static bool hasSourceMods(const SDNode *N) {
622 if (isa<MemSDNode>(N))
625 switch (N->getOpcode()) {
631 case AMDGPUISD::INTERP_P1:
632 case AMDGPUISD::INTERP_P2:
633 case AMDGPUISD::DIV_SCALE:
635 // TODO: Should really be looking at the users of the bitcast. These are
636 // problematic because bitcasts are used to legalize all stores to integer
645 bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N,
646 unsigned CostThreshold) {
647 // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus
648 // it is truly free to use a source modifier in all cases. If there are
649 // multiple users but for each one will necessitate using VOP3, there will be
650 // a code size increase. Try to avoid increasing code size unless we know it
651 // will save on the instruction count.
652 unsigned NumMayIncreaseSize = 0;
653 MVT VT = N->getValueType(0).getScalarType().getSimpleVT();
655 // XXX - Should this limit number of uses to check?
656 for (const SDNode *U : N->uses()) {
657 if (!hasSourceMods(U))
660 if (!opMustUseVOP3Encoding(U, VT)) {
661 if (++NumMayIncreaseSize > CostThreshold)
669 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const {
673 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
677 // The backend supports 32 and 64 bit floating point immediates.
678 // FIXME: Why are we reporting vectors of FP immediates as legal?
679 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
680 EVT ScalarVT = VT.getScalarType();
681 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 ||
682 (ScalarVT == MVT::f16 && Subtarget->has16BitInsts()));
685 // We don't want to shrink f64 / f32 constants.
686 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
687 EVT ScalarVT = VT.getScalarType();
688 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
691 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
695 unsigned NewSize = NewVT.getStoreSizeInBits();
697 // If we are reducing to a 32-bit load, this is always better.
701 EVT OldVT = N->getValueType(0);
702 unsigned OldSize = OldVT.getStoreSizeInBits();
704 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
705 // extloads, so doing one requires using a buffer_load. In cases where we
706 // still couldn't use a scalar load, using the wider load shouldn't really
709 // If the old size already had to be an extload, there's no harm in continuing
710 // to reduce the width.
711 return (OldSize < 32);
714 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
717 assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits());
719 if (LoadTy.getScalarType() == MVT::i32)
722 unsigned LScalarSize = LoadTy.getScalarSizeInBits();
723 unsigned CastScalarSize = CastTy.getScalarSizeInBits();
725 return (LScalarSize < CastScalarSize) ||
726 (CastScalarSize >= 32);
729 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
730 // profitable with the expansion for 64-bit since it's generally good to
732 // FIXME: These should really have the size as a parameter.
733 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const {
737 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const {
741 //===---------------------------------------------------------------------===//
743 //===---------------------------------------------------------------------===//
745 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
746 assert(VT.isFloatingPoint());
748 // Packed operations do not have a fabs modifier.
749 return VT == MVT::f32 || VT == MVT::f64 ||
750 (Subtarget->has16BitInsts() && VT == MVT::f16);
753 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
754 assert(VT.isFloatingPoint());
755 return VT == MVT::f32 || VT == MVT::f64 ||
756 (Subtarget->has16BitInsts() && VT == MVT::f16) ||
757 (Subtarget->hasVOP3PInsts() && VT == MVT::v2f16);
760 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT,
766 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const {
767 // There are few operations which truly have vector input operands. Any vector
768 // operation is going to involve operations on each component, and a
769 // build_vector will be a copy per element, so it always makes sense to use a
770 // build_vector input in place of the extracted element to avoid a copy into a
773 // We should probably only do this if all users are extracts only, but this
774 // should be the common case.
778 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
779 // Truncate is just accessing a subregister.
781 unsigned SrcSize = Source.getSizeInBits();
782 unsigned DestSize = Dest.getSizeInBits();
784 return DestSize < SrcSize && DestSize % 32 == 0 ;
787 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
788 // Truncate is just accessing a subregister.
790 unsigned SrcSize = Source->getScalarSizeInBits();
791 unsigned DestSize = Dest->getScalarSizeInBits();
793 if (DestSize== 16 && Subtarget->has16BitInsts())
794 return SrcSize >= 32;
796 return DestSize < SrcSize && DestSize % 32 == 0;
799 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
800 unsigned SrcSize = Src->getScalarSizeInBits();
801 unsigned DestSize = Dest->getScalarSizeInBits();
803 if (SrcSize == 16 && Subtarget->has16BitInsts())
804 return DestSize >= 32;
806 return SrcSize == 32 && DestSize == 64;
809 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
810 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
811 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
812 // this will enable reducing 64-bit operations the 32-bit, which is always
816 return Dest == MVT::i32 ||Dest == MVT::i64 ;
818 return Src == MVT::i32 && Dest == MVT::i64;
821 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
822 return isZExtFree(Val.getValueType(), VT2);
825 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
826 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
827 // limited number of native 64-bit operations. Shrinking an operation to fit
828 // in a single 32-bit register should always be helpful. As currently used,
829 // this is much less general than the name suggests, and is only used in
830 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
831 // not profitable, and may actually be harmful.
832 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
835 //===---------------------------------------------------------------------===//
836 // TargetLowering Callbacks
837 //===---------------------------------------------------------------------===//
839 CCAssignFn *AMDGPUCallLowering::CCAssignFnForCall(CallingConv::ID CC,
842 case CallingConv::AMDGPU_KERNEL:
843 case CallingConv::SPIR_KERNEL:
844 return CC_AMDGPU_Kernel;
845 case CallingConv::AMDGPU_VS:
846 case CallingConv::AMDGPU_GS:
847 case CallingConv::AMDGPU_PS:
848 case CallingConv::AMDGPU_CS:
849 case CallingConv::AMDGPU_HS:
852 case CallingConv::Fast:
853 return CC_AMDGPU_Func;
855 report_fatal_error("Unsupported calling convention.");
859 CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC,
862 case CallingConv::AMDGPU_KERNEL:
863 case CallingConv::SPIR_KERNEL:
864 return CC_AMDGPU_Kernel;
865 case CallingConv::AMDGPU_VS:
866 case CallingConv::AMDGPU_GS:
867 case CallingConv::AMDGPU_PS:
868 case CallingConv::AMDGPU_CS:
869 case CallingConv::AMDGPU_HS:
870 return RetCC_SI_Shader;
872 case CallingConv::Fast:
873 return RetCC_AMDGPU_Func;
875 report_fatal_error("Unsupported calling convention.");
879 /// The SelectionDAGBuilder will automatically promote function arguments
880 /// with illegal types. However, this does not work for the AMDGPU targets
881 /// since the function arguments are stored in memory as these illegal types.
882 /// In order to handle this properly we need to get the original types sizes
883 /// from the LLVM IR Function and fixup the ISD:InputArg values before
884 /// passing them to AnalyzeFormalArguments()
886 /// When the SelectionDAGBuilder computes the Ins, it takes care of splitting
887 /// input values across multiple registers. Each item in the Ins array
888 /// represents a single value that will be stored in registers. Ins[x].VT is
889 /// the value type of the value that will be stored in the register, so
890 /// whatever SDNode we lower the argument to needs to be this type.
892 /// In order to correctly lower the arguments we need to know the size of each
893 /// argument. Since Ins[x].VT gives us the size of the register that will
894 /// hold the value, we need to look at Ins[x].ArgVT to see the 'real' type
895 /// for the orignal function argument so that we can deduce the correct memory
896 /// type to use for Ins[x]. In most cases the correct memory type will be
897 /// Ins[x].ArgVT. However, this will not always be the case. If, for example,
898 /// we have a kernel argument of type v8i8, this argument will be split into
899 /// 8 parts and each part will be represented by its own item in the Ins array.
900 /// For each part the Ins[x].ArgVT will be the v8i8, which is the full type of
901 /// the argument before it was split. From this, we deduce that the memory type
902 /// for each individual part is i8. We pass the memory type as LocVT to the
903 /// calling convention analysis function and the register type (Ins[x].VT) as
905 void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(CCState &State,
906 const SmallVectorImpl<ISD::InputArg> &Ins) const {
907 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
908 const ISD::InputArg &In = Ins[i];
911 unsigned NumRegs = getNumRegisters(State.getContext(), In.ArgVT);
913 if (!Subtarget->isAmdHsaOS() &&
914 (In.ArgVT == MVT::i16 || In.ArgVT == MVT::i8 || In.ArgVT == MVT::f16)) {
915 // The ABI says the caller will extend these values to 32-bits.
916 MemVT = In.ArgVT.isInteger() ? MVT::i32 : MVT::f32;
917 } else if (NumRegs == 1) {
918 // This argument is not split, so the IR type is the memory type.
919 assert(!In.Flags.isSplit());
920 if (In.ArgVT.isExtended()) {
921 // We have an extended type, like i24, so we should just use the register type
926 } else if (In.ArgVT.isVector() && In.VT.isVector() &&
927 In.ArgVT.getScalarType() == In.VT.getScalarType()) {
928 assert(In.ArgVT.getVectorNumElements() > In.VT.getVectorNumElements());
929 // We have a vector value which has been split into a vector with
930 // the same scalar type, but fewer elements. This should handle
931 // all the floating-point vector types.
933 } else if (In.ArgVT.isVector() &&
934 In.ArgVT.getVectorNumElements() == NumRegs) {
935 // This arg has been split so that each element is stored in a separate
937 MemVT = In.ArgVT.getScalarType();
938 } else if (In.ArgVT.isExtended()) {
939 // We have an extended type, like i65.
942 unsigned MemoryBits = In.ArgVT.getStoreSizeInBits() / NumRegs;
943 assert(In.ArgVT.getStoreSizeInBits() % NumRegs == 0);
944 if (In.VT.isInteger()) {
945 MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits);
946 } else if (In.VT.isVector()) {
947 assert(!In.VT.getScalarType().isFloatingPoint());
948 unsigned NumElements = In.VT.getVectorNumElements();
949 assert(MemoryBits % NumElements == 0);
950 // This vector type has been split into another vector type with
951 // a different elements size.
952 EVT ScalarVT = EVT::getIntegerVT(State.getContext(),
953 MemoryBits / NumElements);
954 MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements);
956 llvm_unreachable("cannot deduce memory type.");
960 // Convert one element vectors to scalar.
961 if (MemVT.isVector() && MemVT.getVectorNumElements() == 1)
962 MemVT = MemVT.getScalarType();
964 if (MemVT.isExtended()) {
965 // This should really only happen if we have vec3 arguments
966 assert(MemVT.isVector() && MemVT.getVectorNumElements() == 3);
967 MemVT = MemVT.getPow2VectorType(State.getContext());
970 assert(MemVT.isSimple());
971 allocateKernArg(i, In.VT, MemVT.getSimpleVT(), CCValAssign::Full, In.Flags,
976 SDValue AMDGPUTargetLowering::LowerReturn(
977 SDValue Chain, CallingConv::ID CallConv,
979 const SmallVectorImpl<ISD::OutputArg> &Outs,
980 const SmallVectorImpl<SDValue> &OutVals,
981 const SDLoc &DL, SelectionDAG &DAG) const {
982 // FIXME: Fails for r600 tests
983 //assert(!isVarArg && Outs.empty() && OutVals.empty() &&
984 // "wave terminate should not have return values");
985 return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain);
988 //===---------------------------------------------------------------------===//
989 // Target specific lowering
990 //===---------------------------------------------------------------------===//
992 /// Selects the correct CCAssignFn for a given CallingConvention value.
993 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
995 return AMDGPUCallLowering::CCAssignFnForCall(CC, IsVarArg);
998 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1000 return AMDGPUCallLowering::CCAssignFnForReturn(CC, IsVarArg);
1003 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
1004 SmallVectorImpl<SDValue> &InVals) const {
1005 SDValue Callee = CLI.Callee;
1006 SelectionDAG &DAG = CLI.DAG;
1008 const Function &Fn = *DAG.getMachineFunction().getFunction();
1010 StringRef FuncName("<unknown>");
1012 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
1013 FuncName = G->getSymbol();
1014 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1015 FuncName = G->getGlobal()->getName();
1017 DiagnosticInfoUnsupported NoCalls(
1018 Fn, "unsupported call to function " + FuncName, CLI.DL.getDebugLoc());
1019 DAG.getContext()->diagnose(NoCalls);
1021 if (!CLI.IsTailCall) {
1022 for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
1023 InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
1026 return DAG.getEntryNode();
1029 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
1030 SelectionDAG &DAG) const {
1031 const Function &Fn = *DAG.getMachineFunction().getFunction();
1033 DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
1034 SDLoc(Op).getDebugLoc());
1035 DAG.getContext()->diagnose(NoDynamicAlloca);
1036 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
1037 return DAG.getMergeValues(Ops, SDLoc());
1040 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
1041 SelectionDAG &DAG) const {
1042 switch (Op.getOpcode()) {
1044 Op->print(errs(), &DAG);
1045 llvm_unreachable("Custom lowering code for this"
1046 "instruction is not implemented yet!");
1048 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
1049 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
1050 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
1051 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
1052 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
1053 case ISD::FREM: return LowerFREM(Op, DAG);
1054 case ISD::FCEIL: return LowerFCEIL(Op, DAG);
1055 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
1056 case ISD::FRINT: return LowerFRINT(Op, DAG);
1057 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
1058 case ISD::FROUND: return LowerFROUND(Op, DAG);
1059 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
1060 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
1061 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
1062 case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
1063 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
1064 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
1066 case ISD::CTLZ_ZERO_UNDEF:
1067 return LowerCTLZ(Op, DAG);
1068 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
1073 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
1074 SmallVectorImpl<SDValue> &Results,
1075 SelectionDAG &DAG) const {
1076 switch (N->getOpcode()) {
1077 case ISD::SIGN_EXTEND_INREG:
1078 // Different parts of legalization seem to interpret which type of
1079 // sign_extend_inreg is the one to check for custom lowering. The extended
1080 // from type is what really matters, but some places check for custom
1081 // lowering of the result type. This results in trying to use
1082 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
1083 // nothing here and let the illegal result integer be handled normally.
1090 static bool hasDefinedInitializer(const GlobalValue *GV) {
1091 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
1092 if (!GVar || !GVar->hasInitializer())
1095 return !isa<UndefValue>(GVar->getInitializer());
1098 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
1100 SelectionDAG &DAG) const {
1102 const DataLayout &DL = DAG.getDataLayout();
1103 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
1104 const GlobalValue *GV = G->getGlobal();
1106 if (G->getAddressSpace() == AMDGPUASI.LOCAL_ADDRESS) {
1107 // XXX: What does the value of G->getOffset() mean?
1108 assert(G->getOffset() == 0 &&
1109 "Do not know what to do with an non-zero offset");
1111 // TODO: We could emit code to handle the initialization somewhere.
1112 if (!hasDefinedInitializer(GV)) {
1113 unsigned Offset = MFI->allocateLDSGlobal(DL, *GV);
1114 return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType());
1118 const Function &Fn = *DAG.getMachineFunction().getFunction();
1119 DiagnosticInfoUnsupported BadInit(
1120 Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc());
1121 DAG.getContext()->diagnose(BadInit);
1125 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
1126 SelectionDAG &DAG) const {
1127 SmallVector<SDValue, 8> Args;
1129 for (const SDUse &U : Op->ops())
1130 DAG.ExtractVectorElements(U.get(), Args);
1132 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1135 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
1136 SelectionDAG &DAG) const {
1138 SmallVector<SDValue, 8> Args;
1139 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1140 EVT VT = Op.getValueType();
1141 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
1142 VT.getVectorNumElements());
1144 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
1147 /// \brief Generate Min/Max node
1148 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT,
1149 SDValue LHS, SDValue RHS,
1150 SDValue True, SDValue False,
1152 DAGCombinerInfo &DCI) const {
1153 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
1156 SelectionDAG &DAG = DCI.DAG;
1157 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
1166 case ISD::SETFALSE2:
1175 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1176 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1182 // Ordered. Assume ordered for undefined.
1184 // Only do this after legalization to avoid interfering with other combines
1185 // which might occur.
1186 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1187 !DCI.isCalledByLegalizer())
1190 // We need to permute the operands to get the correct NaN behavior. The
1191 // selected operand is the second one based on the failing compare with NaN,
1192 // so permute it based on the compare type the hardware uses.
1194 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1195 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1200 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1201 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1207 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1208 !DCI.isCalledByLegalizer())
1212 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1213 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1215 case ISD::SETCC_INVALID:
1216 llvm_unreachable("Invalid setcc condcode!");
1221 std::pair<SDValue, SDValue>
1222 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const {
1225 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1227 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1228 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1230 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1231 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1233 return std::make_pair(Lo, Hi);
1236 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const {
1239 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1240 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1241 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1244 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const {
1247 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1248 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1249 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1252 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
1253 SelectionDAG &DAG) const {
1254 LoadSDNode *Load = cast<LoadSDNode>(Op);
1255 EVT VT = Op.getValueType();
1258 // If this is a 2 element vector, we really want to scalarize and not create
1259 // weird 1 element vectors.
1260 if (VT.getVectorNumElements() == 2)
1261 return scalarizeVectorLoad(Load, DAG);
1263 SDValue BasePtr = Load->getBasePtr();
1264 EVT PtrVT = BasePtr.getValueType();
1265 EVT MemVT = Load->getMemoryVT();
1268 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1271 EVT LoMemVT, HiMemVT;
1274 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
1275 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
1276 std::tie(Lo, Hi) = DAG.SplitVector(Op, SL, LoVT, HiVT);
1278 unsigned Size = LoMemVT.getStoreSize();
1279 unsigned BaseAlign = Load->getAlignment();
1280 unsigned HiAlign = MinAlign(BaseAlign, Size);
1282 SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1283 Load->getChain(), BasePtr, SrcValue, LoMemVT,
1284 BaseAlign, Load->getMemOperand()->getFlags());
1285 SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
1286 DAG.getConstant(Size, SL, PtrVT));
1288 DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(),
1289 HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1290 HiMemVT, HiAlign, Load->getMemOperand()->getFlags());
1293 DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad),
1294 DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
1295 LoLoad.getValue(1), HiLoad.getValue(1))
1298 return DAG.getMergeValues(Ops, SL);
1301 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1302 SelectionDAG &DAG) const {
1303 StoreSDNode *Store = cast<StoreSDNode>(Op);
1304 SDValue Val = Store->getValue();
1305 EVT VT = Val.getValueType();
1307 // If this is a 2 element vector, we really want to scalarize and not create
1308 // weird 1 element vectors.
1309 if (VT.getVectorNumElements() == 2)
1310 return scalarizeVectorStore(Store, DAG);
1312 EVT MemVT = Store->getMemoryVT();
1313 SDValue Chain = Store->getChain();
1314 SDValue BasePtr = Store->getBasePtr();
1318 EVT LoMemVT, HiMemVT;
1321 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
1322 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
1323 std::tie(Lo, Hi) = DAG.SplitVector(Val, SL, LoVT, HiVT);
1325 EVT PtrVT = BasePtr.getValueType();
1326 SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
1327 DAG.getConstant(LoMemVT.getStoreSize(), SL,
1330 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
1331 unsigned BaseAlign = Store->getAlignment();
1332 unsigned Size = LoMemVT.getStoreSize();
1333 unsigned HiAlign = MinAlign(BaseAlign, Size);
1336 DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign,
1337 Store->getMemOperand()->getFlags());
1339 DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size),
1340 HiMemVT, HiAlign, Store->getMemOperand()->getFlags());
1342 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1345 // This is a shortcut for integer division because we have fast i32<->f32
1346 // conversions, and fast f32 reciprocal instructions. The fractional part of a
1347 // float is enough to accurately represent up to a 24-bit signed integer.
1348 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG,
1351 EVT VT = Op.getValueType();
1352 SDValue LHS = Op.getOperand(0);
1353 SDValue RHS = Op.getOperand(1);
1354 MVT IntVT = MVT::i32;
1355 MVT FltVT = MVT::f32;
1357 unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS);
1358 if (LHSSignBits < 9)
1361 unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS);
1362 if (RHSSignBits < 9)
1365 unsigned BitSize = VT.getSizeInBits();
1366 unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1367 unsigned DivBits = BitSize - SignBits;
1371 ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
1372 ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
1374 SDValue jq = DAG.getConstant(1, DL, IntVT);
1377 // char|short jq = ia ^ ib;
1378 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
1380 // jq = jq >> (bitsize - 2)
1381 jq = DAG.getNode(ISD::SRA, DL, VT, jq,
1382 DAG.getConstant(BitSize - 2, DL, VT));
1385 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
1388 // int ia = (int)LHS;
1391 // int ib, (int)RHS;
1394 // float fa = (float)ia;
1395 SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
1397 // float fb = (float)ib;
1398 SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
1400 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1401 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
1404 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
1406 // float fqneg = -fq;
1407 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
1409 // float fr = mad(fqneg, fb, fa);
1410 unsigned OpCode = Subtarget->hasFP32Denormals() ?
1411 (unsigned)AMDGPUISD::FMAD_FTZ :
1412 (unsigned)ISD::FMAD;
1413 SDValue fr = DAG.getNode(OpCode, DL, FltVT, fqneg, fb, fa);
1415 // int iq = (int)fq;
1416 SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
1419 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
1422 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1424 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1426 // int cv = fr >= fb;
1427 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1429 // jq = (cv ? jq : 0);
1430 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
1433 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1435 // Rem needs compensation, it's easier to recompute it
1436 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
1437 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
1439 // Truncate to number of bits this divide really is.
1442 = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits));
1443 Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize);
1444 Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize);
1446 SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT);
1447 Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask);
1448 Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask);
1451 return DAG.getMergeValues({ Div, Rem }, DL);
1454 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
1456 SmallVectorImpl<SDValue> &Results) const {
1457 assert(Op.getValueType() == MVT::i64);
1460 EVT VT = Op.getValueType();
1461 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1463 SDValue one = DAG.getConstant(1, DL, HalfVT);
1464 SDValue zero = DAG.getConstant(0, DL, HalfVT);
1467 SDValue LHS = Op.getOperand(0);
1468 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, zero);
1469 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, one);
1471 SDValue RHS = Op.getOperand(1);
1472 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, zero);
1473 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, one);
1475 if (VT == MVT::i64 &&
1476 DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
1477 DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
1479 SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1482 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), zero});
1483 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), zero});
1485 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV));
1486 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM));
1490 // Get Speculative values
1491 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
1492 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
1494 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, zero, REM_Part, LHS_Hi, ISD::SETEQ);
1495 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, zero});
1496 REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM);
1498 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, zero, DIV_Part, zero, ISD::SETEQ);
1499 SDValue DIV_Lo = zero;
1501 const unsigned halfBitWidth = HalfVT.getSizeInBits();
1503 for (unsigned i = 0; i < halfBitWidth; ++i) {
1504 const unsigned bitPos = halfBitWidth - i - 1;
1505 SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
1506 // Get value of high bit
1507 SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
1508 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, one);
1509 HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
1512 REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
1514 REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
1516 SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT);
1517 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, zero, ISD::SETUGE);
1519 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
1522 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
1523 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
1526 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi});
1527 DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV);
1528 Results.push_back(DIV);
1529 Results.push_back(REM);
1532 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1533 SelectionDAG &DAG) const {
1535 EVT VT = Op.getValueType();
1537 if (VT == MVT::i64) {
1538 SmallVector<SDValue, 2> Results;
1539 LowerUDIVREM64(Op, DAG, Results);
1540 return DAG.getMergeValues(Results, DL);
1543 if (VT == MVT::i32) {
1544 if (SDValue Res = LowerDIVREM24(Op, DAG, false))
1548 SDValue Num = Op.getOperand(0);
1549 SDValue Den = Op.getOperand(1);
1551 // RCP = URECIP(Den) = 2^32 / Den + e
1552 // e is rounding error.
1553 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
1555 // RCP_LO = mul(RCP, Den) */
1556 SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den);
1558 // RCP_HI = mulhu (RCP, Den) */
1559 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
1561 // NEG_RCP_LO = -RCP_LO
1562 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
1565 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
1566 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
1569 // Calculate the rounding error from the URECIP instruction
1570 // E = mulhu(ABS_RCP_LO, RCP)
1571 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
1573 // RCP_A_E = RCP + E
1574 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
1576 // RCP_S_E = RCP - E
1577 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
1579 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
1580 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
1583 // Quotient = mulhu(Tmp0, Num)
1584 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
1586 // Num_S_Remainder = Quotient * Den
1587 SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den);
1589 // Remainder = Num - Num_S_Remainder
1590 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
1592 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
1593 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
1594 DAG.getConstant(-1, DL, VT),
1595 DAG.getConstant(0, DL, VT),
1597 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
1598 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
1600 DAG.getConstant(-1, DL, VT),
1601 DAG.getConstant(0, DL, VT),
1603 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
1604 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
1607 // Calculate Division result:
1609 // Quotient_A_One = Quotient + 1
1610 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
1611 DAG.getConstant(1, DL, VT));
1613 // Quotient_S_One = Quotient - 1
1614 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
1615 DAG.getConstant(1, DL, VT));
1617 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
1618 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
1619 Quotient, Quotient_A_One, ISD::SETEQ);
1621 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
1622 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
1623 Quotient_S_One, Div, ISD::SETEQ);
1625 // Calculate Rem result:
1627 // Remainder_S_Den = Remainder - Den
1628 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
1630 // Remainder_A_Den = Remainder + Den
1631 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
1633 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
1634 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
1635 Remainder, Remainder_S_Den, ISD::SETEQ);
1637 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
1638 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
1639 Remainder_A_Den, Rem, ISD::SETEQ);
1644 return DAG.getMergeValues(Ops, DL);
1647 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
1648 SelectionDAG &DAG) const {
1650 EVT VT = Op.getValueType();
1652 SDValue LHS = Op.getOperand(0);
1653 SDValue RHS = Op.getOperand(1);
1655 SDValue Zero = DAG.getConstant(0, DL, VT);
1656 SDValue NegOne = DAG.getConstant(-1, DL, VT);
1658 if (VT == MVT::i32) {
1659 if (SDValue Res = LowerDIVREM24(Op, DAG, true))
1663 if (VT == MVT::i64 &&
1664 DAG.ComputeNumSignBits(LHS) > 32 &&
1665 DAG.ComputeNumSignBits(RHS) > 32) {
1666 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1669 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
1670 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
1671 SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1674 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
1675 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
1677 return DAG.getMergeValues(Res, DL);
1680 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
1681 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
1682 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
1683 SDValue RSign = LHSign; // Remainder sign is the same as LHS
1685 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
1686 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
1688 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
1689 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
1691 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
1692 SDValue Rem = Div.getValue(1);
1694 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
1695 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
1697 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
1698 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
1704 return DAG.getMergeValues(Res, DL);
1707 // (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y))
1708 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
1710 EVT VT = Op.getValueType();
1711 SDValue X = Op.getOperand(0);
1712 SDValue Y = Op.getOperand(1);
1714 // TODO: Should this propagate fast-math-flags?
1716 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y);
1717 SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div);
1718 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y);
1720 return DAG.getNode(ISD::FSUB, SL, VT, X, Mul);
1723 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
1725 SDValue Src = Op.getOperand(0);
1727 // result = trunc(src)
1728 // if (src > 0.0 && src != result)
1731 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1733 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
1734 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
1737 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
1739 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
1740 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
1741 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
1743 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
1744 // TODO: Should this propagate fast-math-flags?
1745 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
1748 static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL,
1749 SelectionDAG &DAG) {
1750 const unsigned FractBits = 52;
1751 const unsigned ExpBits = 11;
1753 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
1755 DAG.getConstant(FractBits - 32, SL, MVT::i32),
1756 DAG.getConstant(ExpBits, SL, MVT::i32));
1757 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
1758 DAG.getConstant(1023, SL, MVT::i32));
1763 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
1765 SDValue Src = Op.getOperand(0);
1767 assert(Op.getValueType() == MVT::f64);
1769 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1770 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1772 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
1774 // Extract the upper half, since this is where we will find the sign and
1776 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
1778 SDValue Exp = extractF64Exponent(Hi, SL, DAG);
1780 const unsigned FractBits = 52;
1782 // Extract the sign bit.
1783 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
1784 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
1786 // Extend back to to 64-bits.
1787 SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit});
1788 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
1790 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
1791 const SDValue FractMask
1792 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
1794 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
1795 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
1796 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
1799 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
1801 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
1803 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
1804 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
1806 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
1807 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
1809 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
1812 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
1814 SDValue Src = Op.getOperand(0);
1816 assert(Op.getValueType() == MVT::f64);
1818 APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
1819 SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
1820 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
1822 // TODO: Should this propagate fast-math-flags?
1824 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
1825 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
1827 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
1829 APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
1830 SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
1833 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
1834 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
1836 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
1839 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
1840 // FNEARBYINT and FRINT are the same, except in their handling of FP
1841 // exceptions. Those aren't really meaningful for us, and OpenCL only has
1842 // rint, so just treat them as equivalent.
1843 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
1846 // XXX - May require not supporting f32 denormals?
1848 // Don't handle v2f16. The extra instructions to scalarize and repack around the
1849 // compare and vselect end up producing worse code than scalarizing the whole
1851 SDValue AMDGPUTargetLowering::LowerFROUND32_16(SDValue Op, SelectionDAG &DAG) const {
1853 SDValue X = Op.getOperand(0);
1854 EVT VT = Op.getValueType();
1856 SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X);
1858 // TODO: Should this propagate fast-math-flags?
1860 SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T);
1862 SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff);
1864 const SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
1865 const SDValue One = DAG.getConstantFP(1.0, SL, VT);
1866 const SDValue Half = DAG.getConstantFP(0.5, SL, VT);
1868 SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, VT, One, X);
1871 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1873 SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
1875 SDValue Sel = DAG.getNode(ISD::SELECT, SL, VT, Cmp, SignOne, Zero);
1877 return DAG.getNode(ISD::FADD, SL, VT, T, Sel);
1880 SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const {
1882 SDValue X = Op.getOperand(0);
1884 SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X);
1886 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1887 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1888 const SDValue NegOne = DAG.getConstant(-1, SL, MVT::i32);
1889 const SDValue FiftyOne = DAG.getConstant(51, SL, MVT::i32);
1891 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
1893 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
1895 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, One);
1897 SDValue Exp = extractF64Exponent(Hi, SL, DAG);
1899 const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), SL,
1902 SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp);
1903 SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64,
1904 DAG.getConstant(INT64_C(0x0008000000000000), SL,
1908 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M);
1909 SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT,
1910 DAG.getConstant(0, SL, MVT::i64), Tmp0,
1913 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1,
1914 D, DAG.getConstant(0, SL, MVT::i64));
1915 SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2);
1917 K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64));
1918 K = DAG.getNode(ISD::BITCAST, SL, MVT::f64, K);
1920 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
1921 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
1922 SDValue ExpEqNegOne = DAG.getSetCC(SL, SetCCVT, NegOne, Exp, ISD::SETEQ);
1924 SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64,
1926 DAG.getConstantFP(1.0, SL, MVT::f64),
1927 DAG.getConstantFP(0.0, SL, MVT::f64));
1929 SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X);
1931 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpLt0, S, K);
1932 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpGt51, X, K);
1937 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
1938 EVT VT = Op.getValueType();
1940 if (VT == MVT::f32 || VT == MVT::f16)
1941 return LowerFROUND32_16(Op, DAG);
1944 return LowerFROUND64(Op, DAG);
1946 llvm_unreachable("unhandled type");
1949 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
1951 SDValue Src = Op.getOperand(0);
1953 // result = trunc(src);
1954 // if (src < 0.0 && src != result)
1957 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1959 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
1960 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
1963 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
1965 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
1966 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
1967 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
1969 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
1970 // TODO: Should this propagate fast-math-flags?
1971 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
1974 SDValue AMDGPUTargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const {
1976 SDValue Src = Op.getOperand(0);
1977 bool ZeroUndef = Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF;
1979 if (ZeroUndef && Src.getValueType() == MVT::i32)
1980 return DAG.getNode(AMDGPUISD::FFBH_U32, SL, MVT::i32, Src);
1982 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
1984 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1985 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1987 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1988 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1990 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
1991 *DAG.getContext(), MVT::i32);
1993 SDValue Hi0 = DAG.getSetCC(SL, SetCCVT, Hi, Zero, ISD::SETEQ);
1995 SDValue CtlzLo = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i32, Lo);
1996 SDValue CtlzHi = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i32, Hi);
1998 const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32);
1999 SDValue Add = DAG.getNode(ISD::ADD, SL, MVT::i32, CtlzLo, Bits32);
2001 // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x))
2002 SDValue NewCtlz = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0, Add, CtlzHi);
2005 // Test if the full 64-bit input is zero.
2007 // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32,
2008 // which we probably don't want.
2009 SDValue Lo0 = DAG.getSetCC(SL, SetCCVT, Lo, Zero, ISD::SETEQ);
2010 SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0, Hi0);
2012 // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction
2013 // with the same cycles, otherwise it is slower.
2014 // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src,
2015 // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ);
2017 const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32);
2019 // The instruction returns -1 for 0 input, but the defined intrinsic
2020 // behavior is to return the number of bits.
2021 NewCtlz = DAG.getNode(ISD::SELECT, SL, MVT::i32,
2022 SrcIsZero, Bits32, NewCtlz);
2025 return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewCtlz);
2028 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG,
2029 bool Signed) const {
2033 // uint lz = clz(u);
2034 // uint e = (u != 0) ? 127U + 63U - lz : 0;
2035 // u = (u << lz) & 0x7fffffffffffffffUL;
2036 // ulong t = u & 0xffffffffffUL;
2037 // uint v = (e << 23) | (uint)(u >> 40);
2038 // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U);
2039 // return as_float(v + r);
2044 // long s = l >> 63;
2045 // float r = cul2f((l + s) ^ s);
2046 // return s ? -r : r;
2050 SDValue Src = Op.getOperand(0);
2055 const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64);
2056 S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit);
2058 SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S);
2059 L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S);
2062 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
2063 *DAG.getContext(), MVT::f32);
2066 SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32);
2067 SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64);
2068 SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L);
2069 LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ);
2071 SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32);
2072 SDValue E = DAG.getSelect(SL, MVT::i32,
2073 DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE),
2074 DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ),
2077 SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64,
2078 DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ),
2079 DAG.getConstant((-1ULL) >> 1, SL, MVT::i64));
2081 SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U,
2082 DAG.getConstant(0xffffffffffULL, SL, MVT::i64));
2084 SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64,
2085 U, DAG.getConstant(40, SL, MVT::i64));
2087 SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32,
2088 DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)),
2089 DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, UShl));
2091 SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64);
2092 SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT);
2093 SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ);
2095 SDValue One = DAG.getConstant(1, SL, MVT::i32);
2097 SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One);
2099 SDValue R = DAG.getSelect(SL, MVT::i32,
2102 DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32));
2103 R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R);
2104 R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R);
2109 SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R);
2110 return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R);
2113 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
2114 bool Signed) const {
2116 SDValue Src = Op.getOperand(0);
2118 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2120 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2121 DAG.getConstant(0, SL, MVT::i32));
2122 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2123 DAG.getConstant(1, SL, MVT::i32));
2125 SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
2128 SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
2130 SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi,
2131 DAG.getConstant(32, SL, MVT::i32));
2132 // TODO: Should this propagate fast-math-flags?
2133 return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
2136 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
2137 SelectionDAG &DAG) const {
2138 assert(Op.getOperand(0).getValueType() == MVT::i64 &&
2139 "operation should be legal");
2141 // TODO: Factor out code common with LowerSINT_TO_FP.
2143 EVT DestVT = Op.getValueType();
2144 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2146 SDValue Src = Op.getOperand(0);
2148 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2149 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2151 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2156 if (DestVT == MVT::f32)
2157 return LowerINT_TO_FP32(Op, DAG, false);
2159 assert(DestVT == MVT::f64);
2160 return LowerINT_TO_FP64(Op, DAG, false);
2163 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
2164 SelectionDAG &DAG) const {
2165 assert(Op.getOperand(0).getValueType() == MVT::i64 &&
2166 "operation should be legal");
2168 // TODO: Factor out code common with LowerUINT_TO_FP.
2170 EVT DestVT = Op.getValueType();
2171 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2173 SDValue Src = Op.getOperand(0);
2175 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2176 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op));
2178 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2183 if (DestVT == MVT::f32)
2184 return LowerINT_TO_FP32(Op, DAG, true);
2186 assert(DestVT == MVT::f64);
2187 return LowerINT_TO_FP64(Op, DAG, true);
2190 SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG,
2191 bool Signed) const {
2194 SDValue Src = Op.getOperand(0);
2196 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2198 SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL,
2200 SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL,
2202 // TODO: Should this propagate fast-math-flags?
2203 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0);
2205 SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul);
2208 SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc);
2210 SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL,
2211 MVT::i32, FloorMul);
2212 SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
2214 SDValue Result = DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi});
2216 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result);
2219 SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const {
2221 SDValue N0 = Op.getOperand(0);
2223 // Convert to target node to get known bits
2224 if (N0.getValueType() == MVT::f32)
2225 return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0);
2227 if (getTargetMachine().Options.UnsafeFPMath) {
2228 // There is a generic expand for FP_TO_FP16 with unsafe fast math.
2232 assert(N0.getSimpleValueType() == MVT::f64);
2234 // f64 -> f16 conversion using round-to-nearest-even rounding mode.
2235 const unsigned ExpMask = 0x7ff;
2236 const unsigned ExpBiasf64 = 1023;
2237 const unsigned ExpBiasf16 = 15;
2238 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
2239 SDValue One = DAG.getConstant(1, DL, MVT::i32);
2240 SDValue U = DAG.getNode(ISD::BITCAST, DL, MVT::i64, N0);
2241 SDValue UH = DAG.getNode(ISD::SRL, DL, MVT::i64, U,
2242 DAG.getConstant(32, DL, MVT::i64));
2243 UH = DAG.getZExtOrTrunc(UH, DL, MVT::i32);
2244 U = DAG.getZExtOrTrunc(U, DL, MVT::i32);
2245 SDValue E = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2246 DAG.getConstant(20, DL, MVT::i64));
2247 E = DAG.getNode(ISD::AND, DL, MVT::i32, E,
2248 DAG.getConstant(ExpMask, DL, MVT::i32));
2249 // Subtract the fp64 exponent bias (1023) to get the real exponent and
2250 // add the f16 bias (15) to get the biased exponent for the f16 format.
2251 E = DAG.getNode(ISD::ADD, DL, MVT::i32, E,
2252 DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32));
2254 SDValue M = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2255 DAG.getConstant(8, DL, MVT::i32));
2256 M = DAG.getNode(ISD::AND, DL, MVT::i32, M,
2257 DAG.getConstant(0xffe, DL, MVT::i32));
2259 SDValue MaskedSig = DAG.getNode(ISD::AND, DL, MVT::i32, UH,
2260 DAG.getConstant(0x1ff, DL, MVT::i32));
2261 MaskedSig = DAG.getNode(ISD::OR, DL, MVT::i32, MaskedSig, U);
2263 SDValue Lo40Set = DAG.getSelectCC(DL, MaskedSig, Zero, Zero, One, ISD::SETEQ);
2264 M = DAG.getNode(ISD::OR, DL, MVT::i32, M, Lo40Set);
2266 // (M != 0 ? 0x0200 : 0) | 0x7c00;
2267 SDValue I = DAG.getNode(ISD::OR, DL, MVT::i32,
2268 DAG.getSelectCC(DL, M, Zero, DAG.getConstant(0x0200, DL, MVT::i32),
2269 Zero, ISD::SETNE), DAG.getConstant(0x7c00, DL, MVT::i32));
2271 // N = M | (E << 12);
2272 SDValue N = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2273 DAG.getNode(ISD::SHL, DL, MVT::i32, E,
2274 DAG.getConstant(12, DL, MVT::i32)));
2276 // B = clamp(1-E, 0, 13);
2277 SDValue OneSubExp = DAG.getNode(ISD::SUB, DL, MVT::i32,
2279 SDValue B = DAG.getNode(ISD::SMAX, DL, MVT::i32, OneSubExp, Zero);
2280 B = DAG.getNode(ISD::SMIN, DL, MVT::i32, B,
2281 DAG.getConstant(13, DL, MVT::i32));
2283 SDValue SigSetHigh = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2284 DAG.getConstant(0x1000, DL, MVT::i32));
2286 SDValue D = DAG.getNode(ISD::SRL, DL, MVT::i32, SigSetHigh, B);
2287 SDValue D0 = DAG.getNode(ISD::SHL, DL, MVT::i32, D, B);
2288 SDValue D1 = DAG.getSelectCC(DL, D0, SigSetHigh, One, Zero, ISD::SETNE);
2289 D = DAG.getNode(ISD::OR, DL, MVT::i32, D, D1);
2291 SDValue V = DAG.getSelectCC(DL, E, One, D, N, ISD::SETLT);
2292 SDValue VLow3 = DAG.getNode(ISD::AND, DL, MVT::i32, V,
2293 DAG.getConstant(0x7, DL, MVT::i32));
2294 V = DAG.getNode(ISD::SRL, DL, MVT::i32, V,
2295 DAG.getConstant(2, DL, MVT::i32));
2296 SDValue V0 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(3, DL, MVT::i32),
2297 One, Zero, ISD::SETEQ);
2298 SDValue V1 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(5, DL, MVT::i32),
2299 One, Zero, ISD::SETGT);
2300 V1 = DAG.getNode(ISD::OR, DL, MVT::i32, V0, V1);
2301 V = DAG.getNode(ISD::ADD, DL, MVT::i32, V, V1);
2303 V = DAG.getSelectCC(DL, E, DAG.getConstant(30, DL, MVT::i32),
2304 DAG.getConstant(0x7c00, DL, MVT::i32), V, ISD::SETGT);
2305 V = DAG.getSelectCC(DL, E, DAG.getConstant(1039, DL, MVT::i32),
2308 // Extract the sign bit.
2309 SDValue Sign = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2310 DAG.getConstant(16, DL, MVT::i32));
2311 Sign = DAG.getNode(ISD::AND, DL, MVT::i32, Sign,
2312 DAG.getConstant(0x8000, DL, MVT::i32));
2314 V = DAG.getNode(ISD::OR, DL, MVT::i32, Sign, V);
2315 return DAG.getZExtOrTrunc(V, DL, Op.getValueType());
2318 SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op,
2319 SelectionDAG &DAG) const {
2320 SDValue Src = Op.getOperand(0);
2322 // TODO: Factor out code common with LowerFP_TO_UINT.
2324 EVT SrcVT = Src.getValueType();
2325 if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) {
2328 SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src);
2330 DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend);
2335 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2336 return LowerFP64_TO_INT(Op, DAG, true);
2341 SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op,
2342 SelectionDAG &DAG) const {
2343 SDValue Src = Op.getOperand(0);
2345 // TODO: Factor out code common with LowerFP_TO_SINT.
2347 EVT SrcVT = Src.getValueType();
2348 if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) {
2351 SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src);
2353 DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend);
2358 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2359 return LowerFP64_TO_INT(Op, DAG, false);
2364 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
2365 SelectionDAG &DAG) const {
2366 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2367 MVT VT = Op.getSimpleValueType();
2368 MVT ScalarVT = VT.getScalarType();
2370 assert(VT.isVector());
2372 SDValue Src = Op.getOperand(0);
2375 // TODO: Don't scalarize on Evergreen?
2376 unsigned NElts = VT.getVectorNumElements();
2377 SmallVector<SDValue, 8> Args;
2378 DAG.ExtractVectorElements(Src, Args, 0, NElts);
2380 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
2381 for (unsigned I = 0; I < NElts; ++I)
2382 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
2384 return DAG.getBuildVector(VT, DL, Args);
2387 //===----------------------------------------------------------------------===//
2388 // Custom DAG optimizations
2389 //===----------------------------------------------------------------------===//
2391 static bool isU24(SDValue Op, SelectionDAG &DAG) {
2393 EVT VT = Op.getValueType();
2394 DAG.computeKnownBits(Op, Known);
2396 return (VT.getSizeInBits() - Known.countMinLeadingZeros()) <= 24;
2399 static bool isI24(SDValue Op, SelectionDAG &DAG) {
2400 EVT VT = Op.getValueType();
2402 // In order for this to be a signed 24-bit value, bit 23, must
2404 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
2405 // as unsigned 24-bit values.
2406 (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
2409 static bool simplifyI24(SDNode *Node24, unsigned OpIdx,
2410 TargetLowering::DAGCombinerInfo &DCI) {
2412 SelectionDAG &DAG = DCI.DAG;
2413 SDValue Op = Node24->getOperand(OpIdx);
2414 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2415 EVT VT = Op.getValueType();
2417 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
2418 APInt KnownZero, KnownOne;
2419 TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
2420 if (TLI.SimplifyDemandedBits(Node24, OpIdx, Demanded, DCI, TLO))
2426 template <typename IntTy>
2427 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset,
2428 uint32_t Width, const SDLoc &DL) {
2429 if (Width + Offset < 32) {
2430 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
2431 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
2432 return DAG.getConstant(Result, DL, MVT::i32);
2435 return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
2438 static bool hasVolatileUser(SDNode *Val) {
2439 for (SDNode *U : Val->uses()) {
2440 if (MemSDNode *M = dyn_cast<MemSDNode>(U)) {
2441 if (M->isVolatile())
2449 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const {
2450 // i32 vectors are the canonical memory type.
2451 if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT))
2454 if (!VT.isByteSized())
2457 unsigned Size = VT.getStoreSize();
2459 if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector())
2462 if (Size == 3 || (Size > 4 && (Size % 4 != 0)))
2468 // Replace load of an illegal type with a store of a bitcast to a friendlier
2470 SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N,
2471 DAGCombinerInfo &DCI) const {
2472 if (!DCI.isBeforeLegalize())
2475 LoadSDNode *LN = cast<LoadSDNode>(N);
2476 if (LN->isVolatile() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN))
2480 SelectionDAG &DAG = DCI.DAG;
2481 EVT VT = LN->getMemoryVT();
2483 unsigned Size = VT.getStoreSize();
2484 unsigned Align = LN->getAlignment();
2485 if (Align < Size && isTypeLegal(VT)) {
2487 unsigned AS = LN->getAddressSpace();
2489 // Expand unaligned loads earlier than legalization. Due to visitation order
2490 // problems during legalization, the emitted instructions to pack and unpack
2491 // the bytes again are not eliminated in the case of an unaligned copy.
2492 if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast)) {
2494 return scalarizeVectorLoad(LN, DAG);
2497 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
2498 return DAG.getMergeValues(Ops, SDLoc(N));
2505 if (!shouldCombineMemoryType(VT))
2508 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2511 = DAG.getLoad(NewVT, SL, LN->getChain(),
2512 LN->getBasePtr(), LN->getMemOperand());
2514 SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad);
2515 DCI.CombineTo(N, BC, NewLoad.getValue(1));
2516 return SDValue(N, 0);
2519 // Replace store of an illegal type with a store of a bitcast to a friendlier
2521 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
2522 DAGCombinerInfo &DCI) const {
2523 if (!DCI.isBeforeLegalize())
2526 StoreSDNode *SN = cast<StoreSDNode>(N);
2527 if (SN->isVolatile() || !ISD::isNormalStore(SN))
2530 EVT VT = SN->getMemoryVT();
2531 unsigned Size = VT.getStoreSize();
2534 SelectionDAG &DAG = DCI.DAG;
2535 unsigned Align = SN->getAlignment();
2536 if (Align < Size && isTypeLegal(VT)) {
2538 unsigned AS = SN->getAddressSpace();
2540 // Expand unaligned stores earlier than legalization. Due to visitation
2541 // order problems during legalization, the emitted instructions to pack and
2542 // unpack the bytes again are not eliminated in the case of an unaligned
2544 if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast)) {
2546 return scalarizeVectorStore(SN, DAG);
2548 return expandUnalignedStore(SN, DAG);
2555 if (!shouldCombineMemoryType(VT))
2558 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2559 SDValue Val = SN->getValue();
2561 //DCI.AddToWorklist(Val.getNode());
2563 bool OtherUses = !Val.hasOneUse();
2564 SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val);
2566 SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal);
2567 DAG.ReplaceAllUsesOfValueWith(Val, CastBack);
2570 return DAG.getStore(SN->getChain(), SL, CastVal,
2571 SN->getBasePtr(), SN->getMemOperand());
2574 SDValue AMDGPUTargetLowering::performClampCombine(SDNode *N,
2575 DAGCombinerInfo &DCI) const {
2576 ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
2580 const APFloat &F = CSrc->getValueAPF();
2581 APFloat Zero = APFloat::getZero(F.getSemantics());
2582 APFloat::cmpResult Cmp0 = F.compare(Zero);
2583 if (Cmp0 == APFloat::cmpLessThan ||
2584 (Cmp0 == APFloat::cmpUnordered && Subtarget->enableDX10Clamp())) {
2585 return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0));
2588 APFloat One(F.getSemantics(), "1.0");
2589 APFloat::cmpResult Cmp1 = F.compare(One);
2590 if (Cmp1 == APFloat::cmpGreaterThan)
2591 return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0));
2593 return SDValue(CSrc, 0);
2596 // FIXME: This should go in generic DAG combiner with an isTruncateFree check,
2597 // but isTruncateFree is inaccurate for i16 now because of SALU vs. VALU
2599 SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N,
2600 DAGCombinerInfo &DCI) const {
2601 SelectionDAG &DAG = DCI.DAG;
2602 SDValue N0 = N->getOperand(0);
2604 // (vt2 (assertzext (truncate vt0:x), vt1)) ->
2605 // (vt2 (truncate (assertzext vt0:x, vt1)))
2606 if (N0.getOpcode() == ISD::TRUNCATE) {
2607 SDValue N1 = N->getOperand(1);
2608 EVT ExtVT = cast<VTSDNode>(N1)->getVT();
2611 SDValue Src = N0.getOperand(0);
2612 EVT SrcVT = Src.getValueType();
2613 if (SrcVT.bitsGE(ExtVT)) {
2614 SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1);
2615 return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg);
2621 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the
2622 /// binary operation \p Opc to it with the corresponding constant operands.
2623 SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl(
2624 DAGCombinerInfo &DCI, const SDLoc &SL,
2625 unsigned Opc, SDValue LHS,
2626 uint32_t ValLo, uint32_t ValHi) const {
2627 SelectionDAG &DAG = DCI.DAG;
2629 std::tie(Lo, Hi) = split64BitValue(LHS, DAG);
2631 SDValue LoRHS = DAG.getConstant(ValLo, SL, MVT::i32);
2632 SDValue HiRHS = DAG.getConstant(ValHi, SL, MVT::i32);
2634 SDValue LoAnd = DAG.getNode(Opc, SL, MVT::i32, Lo, LoRHS);
2635 SDValue HiAnd = DAG.getNode(Opc, SL, MVT::i32, Hi, HiRHS);
2637 // Re-visit the ands. It's possible we eliminated one of them and it could
2638 // simplify the vector.
2639 DCI.AddToWorklist(Lo.getNode());
2640 DCI.AddToWorklist(Hi.getNode());
2642 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd});
2643 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
2646 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
2647 DAGCombinerInfo &DCI) const {
2648 EVT VT = N->getValueType(0);
2650 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
2654 SDValue LHS = N->getOperand(0);
2655 unsigned RHSVal = RHS->getZExtValue();
2660 SelectionDAG &DAG = DCI.DAG;
2662 switch (LHS->getOpcode()) {
2665 case ISD::ZERO_EXTEND:
2666 case ISD::SIGN_EXTEND:
2667 case ISD::ANY_EXTEND: {
2668 // shl (ext x) => zext (shl x), if shift does not overflow int
2672 SDValue X = LHS->getOperand(0);
2673 DAG.computeKnownBits(X, Known);
2674 unsigned LZ = Known.countMinLeadingZeros();
2677 EVT XVT = X.getValueType();
2678 SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0));
2679 return DAG.getZExtOrTrunc(Shl, SL, VT);
2682 if (!isOrEquivalentToAdd(DAG, LHS))
2686 // shl (or|add x, c2), c1 => or|add (shl x, c1), (c2 << c1)
2687 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
2688 SDValue Shl = DAG.getNode(ISD::SHL, SL, VT, LHS->getOperand(0),
2690 SDValue C2V = DAG.getConstant(C2->getAPIntValue() << RHSVal,
2692 return DAG.getNode(LHS->getOpcode(), SL, VT, Shl, C2V);
2701 // i64 (shl x, C) -> (build_pair 0, (shl x, C -32))
2703 // On some subtargets, 64-bit shift is a quarter rate instruction. In the
2704 // common case, splitting this into a move and a 32-bit shift is faster and
2705 // the same code size.
2709 SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32);
2711 SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
2712 SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
2714 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2716 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
2717 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
2720 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
2721 DAGCombinerInfo &DCI) const {
2722 if (N->getValueType(0) != MVT::i64)
2725 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
2729 SelectionDAG &DAG = DCI.DAG;
2731 unsigned RHSVal = RHS->getZExtValue();
2733 // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31)
2735 SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
2736 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
2737 DAG.getConstant(31, SL, MVT::i32));
2739 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift});
2740 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
2743 // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31)
2745 SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
2746 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
2747 DAG.getConstant(31, SL, MVT::i32));
2748 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift});
2749 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
2755 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
2756 DAGCombinerInfo &DCI) const {
2757 if (N->getValueType(0) != MVT::i64)
2760 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
2764 unsigned ShiftAmt = RHS->getZExtValue();
2768 // srl i64:x, C for C >= 32
2770 // build_pair (srl hi_32(x), C - 32), 0
2772 SelectionDAG &DAG = DCI.DAG;
2775 SDValue One = DAG.getConstant(1, SL, MVT::i32);
2776 SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2778 SDValue VecOp = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, N->getOperand(0));
2779 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32,
2782 SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
2783 SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst);
2785 SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero});
2787 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
2790 // We need to specifically handle i64 mul here to avoid unnecessary conversion
2791 // instructions. If we only match on the legalized i64 mul expansion,
2792 // SimplifyDemandedBits will be unable to remove them because there will be
2793 // multiple uses due to the separate mul + mulh[su].
2794 static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL,
2795 SDValue N0, SDValue N1, unsigned Size, bool Signed) {
2797 unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
2798 return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1);
2801 // Because we want to eliminate extension instructions before the
2802 // operation, we need to create a single user here (i.e. not the separate
2803 // mul_lo + mul_hi) so that SimplifyDemandedBits will deal with it.
2805 unsigned MulOpc = Signed ? AMDGPUISD::MUL_LOHI_I24 : AMDGPUISD::MUL_LOHI_U24;
2807 SDValue Mul = DAG.getNode(MulOpc, SL,
2808 DAG.getVTList(MVT::i32, MVT::i32), N0, N1);
2810 return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64,
2811 Mul.getValue(0), Mul.getValue(1));
2814 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
2815 DAGCombinerInfo &DCI) const {
2816 EVT VT = N->getValueType(0);
2818 unsigned Size = VT.getSizeInBits();
2819 if (VT.isVector() || Size > 64)
2822 // There are i16 integer mul/mad.
2823 if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16))
2826 SelectionDAG &DAG = DCI.DAG;
2829 SDValue N0 = N->getOperand(0);
2830 SDValue N1 = N->getOperand(1);
2833 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
2834 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
2835 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
2836 Mul = getMul24(DAG, DL, N0, N1, Size, false);
2837 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
2838 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
2839 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
2840 Mul = getMul24(DAG, DL, N0, N1, Size, true);
2845 // We need to use sext even for MUL_U24, because MUL_U24 is used
2846 // for signed multiply of 8 and 16-bit types.
2847 return DAG.getSExtOrTrunc(Mul, DL, VT);
2850 SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N,
2851 DAGCombinerInfo &DCI) const {
2852 EVT VT = N->getValueType(0);
2854 if (!Subtarget->hasMulI24() || VT.isVector())
2857 SelectionDAG &DAG = DCI.DAG;
2860 SDValue N0 = N->getOperand(0);
2861 SDValue N1 = N->getOperand(1);
2863 if (!isI24(N0, DAG) || !isI24(N1, DAG))
2866 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
2867 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
2869 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1);
2870 DCI.AddToWorklist(Mulhi.getNode());
2871 return DAG.getSExtOrTrunc(Mulhi, DL, VT);
2874 SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N,
2875 DAGCombinerInfo &DCI) const {
2876 EVT VT = N->getValueType(0);
2878 if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32)
2881 SelectionDAG &DAG = DCI.DAG;
2884 SDValue N0 = N->getOperand(0);
2885 SDValue N1 = N->getOperand(1);
2887 if (!isU24(N0, DAG) || !isU24(N1, DAG))
2890 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
2891 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
2893 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1);
2894 DCI.AddToWorklist(Mulhi.getNode());
2895 return DAG.getZExtOrTrunc(Mulhi, DL, VT);
2898 SDValue AMDGPUTargetLowering::performMulLoHi24Combine(
2899 SDNode *N, DAGCombinerInfo &DCI) const {
2900 SelectionDAG &DAG = DCI.DAG;
2902 // Simplify demanded bits before splitting into multiple users.
2903 if (simplifyI24(N, 0, DCI) || simplifyI24(N, 1, DCI))
2906 SDValue N0 = N->getOperand(0);
2907 SDValue N1 = N->getOperand(1);
2909 bool Signed = (N->getOpcode() == AMDGPUISD::MUL_LOHI_I24);
2911 unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
2912 unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24;
2916 SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1);
2917 SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1);
2918 return DAG.getMergeValues({ MulLo, MulHi }, SL);
2921 static bool isNegativeOne(SDValue Val) {
2922 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val))
2923 return C->isAllOnesValue();
2927 static bool isCtlzOpc(unsigned Opc) {
2928 return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF;
2931 SDValue AMDGPUTargetLowering::getFFBH_U32(SelectionDAG &DAG,
2933 const SDLoc &DL) const {
2934 EVT VT = Op.getValueType();
2935 EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT);
2936 if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() &&
2937 LegalVT != MVT::i16))
2941 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Op);
2943 SDValue FFBH = DAG.getNode(AMDGPUISD::FFBH_U32, DL, MVT::i32, Op);
2945 FFBH = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBH);
2950 // The native instructions return -1 on 0 input. Optimize out a select that
2951 // produces -1 on 0.
2953 // TODO: If zero is not undef, we could also do this if the output is compared
2954 // against the bitwidth.
2956 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly.
2957 SDValue AMDGPUTargetLowering::performCtlzCombine(const SDLoc &SL, SDValue Cond,
2958 SDValue LHS, SDValue RHS,
2959 DAGCombinerInfo &DCI) const {
2960 ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
2961 if (!CmpRhs || !CmpRhs->isNullValue())
2964 SelectionDAG &DAG = DCI.DAG;
2965 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
2966 SDValue CmpLHS = Cond.getOperand(0);
2968 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x
2969 if (CCOpcode == ISD::SETEQ &&
2970 isCtlzOpc(RHS.getOpcode()) &&
2971 RHS.getOperand(0) == CmpLHS &&
2972 isNegativeOne(LHS)) {
2973 return getFFBH_U32(DAG, CmpLHS, SL);
2976 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x
2977 if (CCOpcode == ISD::SETNE &&
2978 isCtlzOpc(LHS.getOpcode()) &&
2979 LHS.getOperand(0) == CmpLHS &&
2980 isNegativeOne(RHS)) {
2981 return getFFBH_U32(DAG, CmpLHS, SL);
2987 static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI,
2993 SelectionDAG &DAG = DCI.DAG;
2994 EVT VT = N1.getValueType();
2996 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond,
2997 N1.getOperand(0), N2.getOperand(0));
2998 DCI.AddToWorklist(NewSelect.getNode());
2999 return DAG.getNode(Op, SL, VT, NewSelect);
3002 // Pull a free FP operation out of a select so it may fold into uses.
3004 // select c, (fneg x), (fneg y) -> fneg (select c, x, y)
3005 // select c, (fneg x), k -> fneg (select c, x, (fneg k))
3007 // select c, (fabs x), (fabs y) -> fabs (select c, x, y)
3008 // select c, (fabs x), +k -> fabs (select c, x, k)
3009 static SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI,
3011 SelectionDAG &DAG = DCI.DAG;
3012 SDValue Cond = N.getOperand(0);
3013 SDValue LHS = N.getOperand(1);
3014 SDValue RHS = N.getOperand(2);
3016 EVT VT = N.getValueType();
3017 if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) ||
3018 (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) {
3019 return distributeOpThroughSelect(DCI, LHS.getOpcode(),
3020 SDLoc(N), Cond, LHS, RHS);
3024 if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) {
3025 std::swap(LHS, RHS);
3029 // TODO: Support vector constants.
3030 ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
3031 if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS) {
3033 // If one side is an fneg/fabs and the other is a constant, we can push the
3034 // fneg/fabs down. If it's an fabs, the constant needs to be non-negative.
3035 SDValue NewLHS = LHS.getOperand(0);
3036 SDValue NewRHS = RHS;
3038 // Careful: if the neg can be folded up, don't try to pull it back down.
3039 bool ShouldFoldNeg = true;
3041 if (NewLHS.hasOneUse()) {
3042 unsigned Opc = NewLHS.getOpcode();
3043 if (LHS.getOpcode() == ISD::FNEG && fnegFoldsIntoOp(Opc))
3044 ShouldFoldNeg = false;
3045 if (LHS.getOpcode() == ISD::FABS && Opc == ISD::FMUL)
3046 ShouldFoldNeg = false;
3049 if (ShouldFoldNeg) {
3050 if (LHS.getOpcode() == ISD::FNEG)
3051 NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3052 else if (CRHS->isNegative())
3056 std::swap(NewLHS, NewRHS);
3058 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT,
3059 Cond, NewLHS, NewRHS);
3060 DCI.AddToWorklist(NewSelect.getNode());
3061 return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect);
3069 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N,
3070 DAGCombinerInfo &DCI) const {
3071 if (SDValue Folded = foldFreeOpFromSelect(DCI, SDValue(N, 0)))
3074 SDValue Cond = N->getOperand(0);
3075 if (Cond.getOpcode() != ISD::SETCC)
3078 EVT VT = N->getValueType(0);
3079 SDValue LHS = Cond.getOperand(0);
3080 SDValue RHS = Cond.getOperand(1);
3081 SDValue CC = Cond.getOperand(2);
3083 SDValue True = N->getOperand(1);
3084 SDValue False = N->getOperand(2);
3086 if (Cond.hasOneUse()) { // TODO: Look for multiple select uses.
3087 SelectionDAG &DAG = DCI.DAG;
3088 if ((DAG.isConstantValueOfAnyType(True) ||
3089 DAG.isConstantValueOfAnyType(True)) &&
3090 (!DAG.isConstantValueOfAnyType(False) &&
3091 !DAG.isConstantValueOfAnyType(False))) {
3092 // Swap cmp + select pair to move constant to false input.
3093 // This will allow using VOPC cndmasks more often.
3094 // select (setcc x, y), k, x -> select (setcc y, x) x, x
3097 ISD::CondCode NewCC = getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
3098 LHS.getValueType().isInteger());
3100 SDValue NewCond = DAG.getSetCC(SL, Cond.getValueType(), LHS, RHS, NewCC);
3101 return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True);
3104 if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) {
3106 = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI);
3107 // Revisit this node so we can catch min3/max3/med3 patterns.
3108 //DCI.AddToWorklist(MinMax.getNode());
3113 // There's no reason to not do this if the condition has other uses.
3114 return performCtlzCombine(SDLoc(N), Cond, True, False, DCI);
3117 static bool isConstantFPZero(SDValue N) {
3118 if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N))
3119 return C->isZero() && !C->isNegative();
3123 static unsigned inverseMinMax(unsigned Opc) {
3126 return ISD::FMINNUM;
3128 return ISD::FMAXNUM;
3129 case AMDGPUISD::FMAX_LEGACY:
3130 return AMDGPUISD::FMIN_LEGACY;
3131 case AMDGPUISD::FMIN_LEGACY:
3132 return AMDGPUISD::FMAX_LEGACY;
3134 llvm_unreachable("invalid min/max opcode");
3138 SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N,
3139 DAGCombinerInfo &DCI) const {
3140 SelectionDAG &DAG = DCI.DAG;
3141 SDValue N0 = N->getOperand(0);
3142 EVT VT = N->getValueType(0);
3144 unsigned Opc = N0.getOpcode();
3146 // If the input has multiple uses and we can either fold the negate down, or
3147 // the other uses cannot, give up. This both prevents unprofitable
3148 // transformations and infinite loops: we won't repeatedly try to fold around
3149 // a negate that has no 'good' form.
3150 if (N0.hasOneUse()) {
3151 // This may be able to fold into the source, but at a code size cost. Don't
3152 // fold if the fold into the user is free.
3153 if (allUsesHaveSourceMods(N, 0))
3156 if (fnegFoldsIntoOp(Opc) &&
3157 (allUsesHaveSourceMods(N) || !allUsesHaveSourceMods(N0.getNode())))
3164 if (!mayIgnoreSignedZero(N0))
3167 // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y))
3168 SDValue LHS = N0.getOperand(0);
3169 SDValue RHS = N0.getOperand(1);
3171 if (LHS.getOpcode() != ISD::FNEG)
3172 LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3174 LHS = LHS.getOperand(0);
3176 if (RHS.getOpcode() != ISD::FNEG)
3177 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3179 RHS = RHS.getOperand(0);
3181 SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags());
3182 if (!N0.hasOneUse())
3183 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3187 case AMDGPUISD::FMUL_LEGACY: {
3188 // (fneg (fmul x, y)) -> (fmul x, (fneg y))
3189 // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y))
3190 SDValue LHS = N0.getOperand(0);
3191 SDValue RHS = N0.getOperand(1);
3193 if (LHS.getOpcode() == ISD::FNEG)
3194 LHS = LHS.getOperand(0);
3195 else if (RHS.getOpcode() == ISD::FNEG)
3196 RHS = RHS.getOperand(0);
3198 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3200 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags());
3201 if (!N0.hasOneUse())
3202 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3207 if (!mayIgnoreSignedZero(N0))
3210 // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z))
3211 SDValue LHS = N0.getOperand(0);
3212 SDValue MHS = N0.getOperand(1);
3213 SDValue RHS = N0.getOperand(2);
3215 if (LHS.getOpcode() == ISD::FNEG)
3216 LHS = LHS.getOperand(0);
3217 else if (MHS.getOpcode() == ISD::FNEG)
3218 MHS = MHS.getOperand(0);
3220 MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS);
3222 if (RHS.getOpcode() != ISD::FNEG)
3223 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3225 RHS = RHS.getOperand(0);
3227 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS);
3228 if (!N0.hasOneUse())
3229 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3234 case AMDGPUISD::FMAX_LEGACY:
3235 case AMDGPUISD::FMIN_LEGACY: {
3236 // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y)
3237 // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y)
3238 // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y)
3239 // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y)
3241 SDValue LHS = N0.getOperand(0);
3242 SDValue RHS = N0.getOperand(1);
3244 // 0 doesn't have a negated inline immediate.
3245 // TODO: Shouldn't fold 1/2pi either, and should be generalized to other
3247 if (isConstantFPZero(RHS))
3250 SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
3251 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3252 unsigned Opposite = inverseMinMax(Opc);
3254 SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags());
3255 if (!N0.hasOneUse())
3256 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
3259 case ISD::FP_EXTEND:
3262 case ISD::FNEARBYINT: // XXX - Should fround be handled?
3264 case AMDGPUISD::RCP:
3265 case AMDGPUISD::RCP_LEGACY:
3266 case AMDGPUISD::SIN_HW: {
3267 SDValue CvtSrc = N0.getOperand(0);
3268 if (CvtSrc.getOpcode() == ISD::FNEG) {
3269 // (fneg (fp_extend (fneg x))) -> (fp_extend x)
3270 // (fneg (rcp (fneg x))) -> (rcp x)
3271 return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0));
3274 if (!N0.hasOneUse())
3277 // (fneg (fp_extend x)) -> (fp_extend (fneg x))
3278 // (fneg (rcp x)) -> (rcp (fneg x))
3279 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3280 return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags());
3282 case ISD::FP_ROUND: {
3283 SDValue CvtSrc = N0.getOperand(0);
3285 if (CvtSrc.getOpcode() == ISD::FNEG) {
3286 // (fneg (fp_round (fneg x))) -> (fp_round x)
3287 return DAG.getNode(ISD::FP_ROUND, SL, VT,
3288 CvtSrc.getOperand(0), N0.getOperand(1));
3291 if (!N0.hasOneUse())
3294 // (fneg (fp_round x)) -> (fp_round (fneg x))
3295 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
3296 return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1));
3298 case ISD::FP16_TO_FP: {
3299 // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal
3300 // f16, but legalization of f16 fneg ends up pulling it out of the source.
3301 // Put the fneg back as a legal source operation that can be matched later.
3304 SDValue Src = N0.getOperand(0);
3305 EVT SrcVT = Src.getValueType();
3307 // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000)
3308 SDValue IntFNeg = DAG.getNode(ISD::XOR, SL, SrcVT, Src,
3309 DAG.getConstant(0x8000, SL, SrcVT));
3310 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg);
3317 SDValue AMDGPUTargetLowering::performFAbsCombine(SDNode *N,
3318 DAGCombinerInfo &DCI) const {
3319 SelectionDAG &DAG = DCI.DAG;
3320 SDValue N0 = N->getOperand(0);
3322 if (!N0.hasOneUse())
3325 switch (N0.getOpcode()) {
3326 case ISD::FP16_TO_FP: {
3327 assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal");
3329 SDValue Src = N0.getOperand(0);
3330 EVT SrcVT = Src.getValueType();
3332 // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff)
3333 SDValue IntFAbs = DAG.getNode(ISD::AND, SL, SrcVT, Src,
3334 DAG.getConstant(0x7fff, SL, SrcVT));
3335 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs);
3342 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
3343 DAGCombinerInfo &DCI) const {
3344 SelectionDAG &DAG = DCI.DAG;
3347 switch(N->getOpcode()) {
3350 case ISD::BITCAST: {
3351 EVT DestVT = N->getValueType(0);
3353 // Push casts through vector builds. This helps avoid emitting a large
3354 // number of copies when materializing floating point vector constants.
3356 // vNt1 bitcast (vNt0 (build_vector t0:x, t0:y)) =>
3357 // vnt1 = build_vector (t1 (bitcast t0:x)), (t1 (bitcast t0:y))
3358 if (DestVT.isVector()) {
3359 SDValue Src = N->getOperand(0);
3360 if (Src.getOpcode() == ISD::BUILD_VECTOR) {
3361 EVT SrcVT = Src.getValueType();
3362 unsigned NElts = DestVT.getVectorNumElements();
3364 if (SrcVT.getVectorNumElements() == NElts) {
3365 EVT DestEltVT = DestVT.getVectorElementType();
3367 SmallVector<SDValue, 8> CastedElts;
3369 for (unsigned I = 0, E = SrcVT.getVectorNumElements(); I != E; ++I) {
3370 SDValue Elt = Src.getOperand(I);
3371 CastedElts.push_back(DAG.getNode(ISD::BITCAST, DL, DestEltVT, Elt));
3374 return DAG.getBuildVector(DestVT, SL, CastedElts);
3379 if (DestVT.getSizeInBits() != 64 && !DestVT.isVector())
3382 // Fold bitcasts of constants.
3384 // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
3385 // TODO: Generalize and move to DAGCombiner
3386 SDValue Src = N->getOperand(0);
3387 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
3388 assert(Src.getValueType() == MVT::i64);
3390 uint64_t CVal = C->getZExtValue();
3391 return DAG.getNode(ISD::BUILD_VECTOR, SL, DestVT,
3392 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
3393 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
3396 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) {
3397 const APInt &Val = C->getValueAPF().bitcastToAPInt();
3399 uint64_t CVal = Val.getZExtValue();
3400 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
3401 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
3402 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
3404 return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec);
3410 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3413 return performShlCombine(N, DCI);
3416 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3419 return performSrlCombine(N, DCI);
3422 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3425 return performSraCombine(N, DCI);
3428 return performMulCombine(N, DCI);
3430 return performMulhsCombine(N, DCI);
3432 return performMulhuCombine(N, DCI);
3433 case AMDGPUISD::MUL_I24:
3434 case AMDGPUISD::MUL_U24:
3435 case AMDGPUISD::MULHI_I24:
3436 case AMDGPUISD::MULHI_U24: {
3437 // If the first call to simplify is successfull, then N may end up being
3438 // deleted, so we shouldn't call simplifyI24 again.
3439 simplifyI24(N, 0, DCI) || simplifyI24(N, 1, DCI);
3442 case AMDGPUISD::MUL_LOHI_I24:
3443 case AMDGPUISD::MUL_LOHI_U24:
3444 return performMulLoHi24Combine(N, DCI);
3446 return performSelectCombine(N, DCI);
3448 return performFNegCombine(N, DCI);
3450 return performFAbsCombine(N, DCI);
3451 case AMDGPUISD::BFE_I32:
3452 case AMDGPUISD::BFE_U32: {
3453 assert(!N->getValueType(0).isVector() &&
3454 "Vector handling of BFE not implemented");
3455 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
3459 uint32_t WidthVal = Width->getZExtValue() & 0x1f;
3461 return DAG.getConstant(0, DL, MVT::i32);
3463 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
3467 SDValue BitsFrom = N->getOperand(0);
3468 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
3470 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
3472 if (OffsetVal == 0) {
3473 // This is already sign / zero extended, so try to fold away extra BFEs.
3474 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
3476 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
3477 if (OpSignBits >= SignBits)
3480 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
3482 // This is a sign_extend_inreg. Replace it to take advantage of existing
3483 // DAG Combines. If not eliminated, we will match back to BFE during
3486 // TODO: The sext_inreg of extended types ends, although we can could
3487 // handle them in a single BFE.
3488 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
3489 DAG.getValueType(SmallVT));
3492 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
3495 if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) {
3497 return constantFoldBFE<int32_t>(DAG,
3498 CVal->getSExtValue(),
3504 return constantFoldBFE<uint32_t>(DAG,
3505 CVal->getZExtValue(),
3511 if ((OffsetVal + WidthVal) >= 32 &&
3512 !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) {
3513 SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
3514 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
3515 BitsFrom, ShiftVal);
3518 if (BitsFrom.hasOneUse()) {
3519 APInt Demanded = APInt::getBitsSet(32,
3521 OffsetVal + WidthVal);
3524 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
3525 !DCI.isBeforeLegalizeOps());
3526 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3527 if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) ||
3528 TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) {
3529 DCI.CommitTargetLoweringOpt(TLO);
3536 return performLoadCombine(N, DCI);
3538 return performStoreCombine(N, DCI);
3539 case AMDGPUISD::CLAMP:
3540 return performClampCombine(N, DCI);
3541 case AMDGPUISD::RCP: {
3542 if (const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) {
3543 // XXX - Should this flush denormals?
3544 const APFloat &Val = CFP->getValueAPF();
3545 APFloat One(Val.getSemantics(), "1.0");
3546 return DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0));
3551 case ISD::AssertZext:
3552 case ISD::AssertSext:
3553 return performAssertSZExtCombine(N, DCI);
3558 //===----------------------------------------------------------------------===//
3560 //===----------------------------------------------------------------------===//
3562 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
3563 const TargetRegisterClass *RC,
3564 unsigned Reg, EVT VT,
3566 bool RawReg) const {
3567 MachineFunction &MF = DAG.getMachineFunction();
3568 MachineRegisterInfo &MRI = MF.getRegInfo();
3571 if (!MRI.isLiveIn(Reg)) {
3572 VReg = MRI.createVirtualRegister(RC);
3573 MRI.addLiveIn(Reg, VReg);
3575 VReg = MRI.getLiveInVirtReg(Reg);
3579 return DAG.getRegister(VReg, VT);
3581 return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT);
3584 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
3585 const AMDGPUMachineFunction *MFI, const ImplicitParameter Param) const {
3586 unsigned Alignment = Subtarget->getAlignmentForImplicitArgPtr();
3587 uint64_t ArgOffset = alignTo(MFI->getABIArgOffset(), Alignment);
3592 return ArgOffset + 4;
3594 llvm_unreachable("unexpected implicit parameter type");
3597 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
3599 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
3600 switch ((AMDGPUISD::NodeType)Opcode) {
3601 case AMDGPUISD::FIRST_NUMBER: break;
3603 NODE_NAME_CASE(UMUL);
3604 NODE_NAME_CASE(BRANCH_COND);
3608 NODE_NAME_CASE(ELSE)
3609 NODE_NAME_CASE(LOOP)
3610 NODE_NAME_CASE(CALL)
3611 NODE_NAME_CASE(TRAP)
3612 NODE_NAME_CASE(RET_FLAG)
3613 NODE_NAME_CASE(RETURN_TO_EPILOG)
3614 NODE_NAME_CASE(ENDPGM)
3615 NODE_NAME_CASE(DWORDADDR)
3616 NODE_NAME_CASE(FRACT)
3617 NODE_NAME_CASE(SETCC)
3618 NODE_NAME_CASE(SETREG)
3619 NODE_NAME_CASE(FMA_W_CHAIN)
3620 NODE_NAME_CASE(FMUL_W_CHAIN)
3621 NODE_NAME_CASE(CLAMP)
3622 NODE_NAME_CASE(COS_HW)
3623 NODE_NAME_CASE(SIN_HW)
3624 NODE_NAME_CASE(FMAX_LEGACY)
3625 NODE_NAME_CASE(FMIN_LEGACY)
3626 NODE_NAME_CASE(FMAX3)
3627 NODE_NAME_CASE(SMAX3)
3628 NODE_NAME_CASE(UMAX3)
3629 NODE_NAME_CASE(FMIN3)
3630 NODE_NAME_CASE(SMIN3)
3631 NODE_NAME_CASE(UMIN3)
3632 NODE_NAME_CASE(FMED3)
3633 NODE_NAME_CASE(SMED3)
3634 NODE_NAME_CASE(UMED3)
3635 NODE_NAME_CASE(URECIP)
3636 NODE_NAME_CASE(DIV_SCALE)
3637 NODE_NAME_CASE(DIV_FMAS)
3638 NODE_NAME_CASE(DIV_FIXUP)
3639 NODE_NAME_CASE(FMAD_FTZ)
3640 NODE_NAME_CASE(TRIG_PREOP)
3643 NODE_NAME_CASE(RCP_LEGACY)
3644 NODE_NAME_CASE(RSQ_LEGACY)
3645 NODE_NAME_CASE(FMUL_LEGACY)
3646 NODE_NAME_CASE(RSQ_CLAMP)
3647 NODE_NAME_CASE(LDEXP)
3648 NODE_NAME_CASE(FP_CLASS)
3649 NODE_NAME_CASE(DOT4)
3650 NODE_NAME_CASE(CARRY)
3651 NODE_NAME_CASE(BORROW)
3652 NODE_NAME_CASE(BFE_U32)
3653 NODE_NAME_CASE(BFE_I32)
3656 NODE_NAME_CASE(FFBH_U32)
3657 NODE_NAME_CASE(FFBH_I32)
3658 NODE_NAME_CASE(MUL_U24)
3659 NODE_NAME_CASE(MUL_I24)
3660 NODE_NAME_CASE(MULHI_U24)
3661 NODE_NAME_CASE(MULHI_I24)
3662 NODE_NAME_CASE(MUL_LOHI_U24)
3663 NODE_NAME_CASE(MUL_LOHI_I24)
3664 NODE_NAME_CASE(MAD_U24)
3665 NODE_NAME_CASE(MAD_I24)
3666 NODE_NAME_CASE(TEXTURE_FETCH)
3667 NODE_NAME_CASE(EXPORT)
3668 NODE_NAME_CASE(EXPORT_DONE)
3669 NODE_NAME_CASE(R600_EXPORT)
3670 NODE_NAME_CASE(CONST_ADDRESS)
3671 NODE_NAME_CASE(REGISTER_LOAD)
3672 NODE_NAME_CASE(REGISTER_STORE)
3673 NODE_NAME_CASE(SAMPLE)
3674 NODE_NAME_CASE(SAMPLEB)
3675 NODE_NAME_CASE(SAMPLED)
3676 NODE_NAME_CASE(SAMPLEL)
3677 NODE_NAME_CASE(CVT_F32_UBYTE0)
3678 NODE_NAME_CASE(CVT_F32_UBYTE1)
3679 NODE_NAME_CASE(CVT_F32_UBYTE2)
3680 NODE_NAME_CASE(CVT_F32_UBYTE3)
3681 NODE_NAME_CASE(CVT_PKRTZ_F16_F32)
3682 NODE_NAME_CASE(FP_TO_FP16)
3683 NODE_NAME_CASE(FP16_ZEXT)
3684 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
3685 NODE_NAME_CASE(CONST_DATA_PTR)
3686 NODE_NAME_CASE(PC_ADD_REL_OFFSET)
3687 NODE_NAME_CASE(KILL)
3688 NODE_NAME_CASE(DUMMY_CHAIN)
3689 case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
3690 NODE_NAME_CASE(INIT_EXEC)
3691 NODE_NAME_CASE(INIT_EXEC_FROM_INPUT)
3692 NODE_NAME_CASE(SENDMSG)
3693 NODE_NAME_CASE(SENDMSGHALT)
3694 NODE_NAME_CASE(INTERP_MOV)
3695 NODE_NAME_CASE(INTERP_P1)
3696 NODE_NAME_CASE(INTERP_P2)
3697 NODE_NAME_CASE(STORE_MSKOR)
3698 NODE_NAME_CASE(LOAD_CONSTANT)
3699 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
3700 NODE_NAME_CASE(TBUFFER_STORE_FORMAT_X3)
3701 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT)
3702 NODE_NAME_CASE(ATOMIC_CMP_SWAP)
3703 NODE_NAME_CASE(ATOMIC_INC)
3704 NODE_NAME_CASE(ATOMIC_DEC)
3705 NODE_NAME_CASE(BUFFER_LOAD)
3706 NODE_NAME_CASE(BUFFER_LOAD_FORMAT)
3707 case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
3712 SDValue AMDGPUTargetLowering::getSqrtEstimate(SDValue Operand,
3713 SelectionDAG &DAG, int Enabled,
3714 int &RefinementSteps,
3715 bool &UseOneConstNR,
3716 bool Reciprocal) const {
3717 EVT VT = Operand.getValueType();
3719 if (VT == MVT::f32) {
3720 RefinementSteps = 0;
3721 return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
3724 // TODO: There is also f64 rsq instruction, but the documentation is less
3725 // clear on its precision.
3730 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
3731 SelectionDAG &DAG, int Enabled,
3732 int &RefinementSteps) const {
3733 EVT VT = Operand.getValueType();
3735 if (VT == MVT::f32) {
3736 // Reciprocal, < 1 ulp error.
3738 // This reciprocal approximation converges to < 0.5 ulp error with one
3739 // newton rhapson performed with two fused multiple adds (FMAs).
3741 RefinementSteps = 0;
3742 return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
3745 // TODO: There is also f64 rcp instruction, but the documentation is less
3746 // clear on its precision.
3751 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
3752 const SDValue Op, KnownBits &Known,
3753 const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
3755 Known.resetAll(); // Don't know anything.
3758 unsigned Opc = Op.getOpcode();
3763 case AMDGPUISD::CARRY:
3764 case AMDGPUISD::BORROW: {
3765 Known.Zero = APInt::getHighBitsSet(32, 31);
3769 case AMDGPUISD::BFE_I32:
3770 case AMDGPUISD::BFE_U32: {
3771 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
3775 uint32_t Width = CWidth->getZExtValue() & 0x1f;
3777 if (Opc == AMDGPUISD::BFE_U32)
3778 Known.Zero = APInt::getHighBitsSet(32, 32 - Width);
3782 case AMDGPUISD::FP_TO_FP16:
3783 case AMDGPUISD::FP16_ZEXT: {
3784 unsigned BitWidth = Known.getBitWidth();
3786 // High bits are zero.
3787 Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
3793 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
3794 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
3795 unsigned Depth) const {
3796 switch (Op.getOpcode()) {
3797 case AMDGPUISD::BFE_I32: {
3798 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
3802 unsigned SignBits = 32 - Width->getZExtValue() + 1;
3803 if (!isNullConstant(Op.getOperand(1)))
3806 // TODO: Could probably figure something out with non-0 offsets.
3807 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3808 return std::max(SignBits, Op0SignBits);
3811 case AMDGPUISD::BFE_U32: {
3812 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
3813 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
3816 case AMDGPUISD::CARRY:
3817 case AMDGPUISD::BORROW:
3819 case AMDGPUISD::FP_TO_FP16:
3820 case AMDGPUISD::FP16_ZEXT: