1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief Custom DAG lowering for SI
13 //===----------------------------------------------------------------------===//
17 #define _USE_MATH_DEFINES
20 #include "SIISelLowering.h"
22 #include "AMDGPUIntrinsicInfo.h"
23 #include "AMDGPUSubtarget.h"
24 #include "AMDGPUTargetMachine.h"
25 #include "SIDefines.h"
26 #include "SIInstrInfo.h"
27 #include "SIMachineFunctionInfo.h"
28 #include "SIRegisterInfo.h"
29 #include "Utils/AMDGPUBaseInfo.h"
30 #include "llvm/ADT/APFloat.h"
31 #include "llvm/ADT/APInt.h"
32 #include "llvm/ADT/ArrayRef.h"
33 #include "llvm/ADT/BitVector.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/CodeGen/Analysis.h"
39 #include "llvm/CodeGen/CallingConvLower.h"
40 #include "llvm/CodeGen/DAGCombine.h"
41 #include "llvm/CodeGen/ISDOpcodes.h"
42 #include "llvm/CodeGen/MachineBasicBlock.h"
43 #include "llvm/CodeGen/MachineFrameInfo.h"
44 #include "llvm/CodeGen/MachineFunction.h"
45 #include "llvm/CodeGen/MachineInstr.h"
46 #include "llvm/CodeGen/MachineInstrBuilder.h"
47 #include "llvm/CodeGen/MachineMemOperand.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/MachineValueType.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/ValueTypes.h"
54 #include "llvm/IR/Constants.h"
55 #include "llvm/IR/DataLayout.h"
56 #include "llvm/IR/DebugLoc.h"
57 #include "llvm/IR/DerivedTypes.h"
58 #include "llvm/IR/DiagnosticInfo.h"
59 #include "llvm/IR/Function.h"
60 #include "llvm/IR/GlobalValue.h"
61 #include "llvm/IR/InstrTypes.h"
62 #include "llvm/IR/Instruction.h"
63 #include "llvm/IR/Instructions.h"
64 #include "llvm/IR/IntrinsicInst.h"
65 #include "llvm/IR/Type.h"
66 #include "llvm/Support/Casting.h"
67 #include "llvm/Support/CodeGen.h"
68 #include "llvm/Support/CommandLine.h"
69 #include "llvm/Support/Compiler.h"
70 #include "llvm/Support/ErrorHandling.h"
71 #include "llvm/Support/KnownBits.h"
72 #include "llvm/Support/MathExtras.h"
73 #include "llvm/Target/TargetCallingConv.h"
74 #include "llvm/Target/TargetOptions.h"
75 #include "llvm/Target/TargetRegisterInfo.h"
86 static cl::opt<bool> EnableVGPRIndexMode(
87 "amdgpu-vgpr-index-mode",
88 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
91 static unsigned findFirstFreeSGPR(CCState &CCInfo) {
92 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
93 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
94 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
95 return AMDGPU::SGPR0 + Reg;
98 llvm_unreachable("Cannot allocate sgpr");
101 SITargetLowering::SITargetLowering(const TargetMachine &TM,
102 const SISubtarget &STI)
103 : AMDGPUTargetLowering(TM, STI) {
104 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
105 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
107 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
108 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
110 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
111 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
112 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
114 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
115 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
117 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
118 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
120 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
121 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
123 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
124 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
126 if (Subtarget->has16BitInsts()) {
127 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
128 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
131 if (Subtarget->hasVOP3PInsts()) {
132 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
133 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
136 computeRegisterProperties(STI.getRegisterInfo());
138 // We need to custom lower vector stores from local memory
139 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
140 setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
141 setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
142 setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
143 setOperationAction(ISD::LOAD, MVT::i1, Custom);
145 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
146 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
147 setOperationAction(ISD::STORE, MVT::v8i32, Custom);
148 setOperationAction(ISD::STORE, MVT::v16i32, Custom);
149 setOperationAction(ISD::STORE, MVT::i1, Custom);
151 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
152 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
153 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
154 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
155 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
156 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
157 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
158 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
159 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
160 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
162 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
163 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
164 setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand);
166 setOperationAction(ISD::SELECT, MVT::i1, Promote);
167 setOperationAction(ISD::SELECT, MVT::i64, Custom);
168 setOperationAction(ISD::SELECT, MVT::f64, Promote);
169 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
171 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
172 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
173 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
174 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
175 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
177 setOperationAction(ISD::SETCC, MVT::i1, Promote);
178 setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
179 setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
180 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
182 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
183 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
185 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
186 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
187 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
188 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
189 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
190 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
191 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
193 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
194 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
195 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
196 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
198 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
200 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
201 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
202 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
204 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
205 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
206 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
207 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
208 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
209 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
211 setOperationAction(ISD::UADDO, MVT::i32, Legal);
212 setOperationAction(ISD::USUBO, MVT::i32, Legal);
214 setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
215 setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
217 // We only support LOAD/STORE and vector manipulation ops for vectors
218 // with > 4 elements.
219 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
220 MVT::v2i64, MVT::v2f64}) {
221 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
225 case ISD::BUILD_VECTOR:
227 case ISD::EXTRACT_VECTOR_ELT:
228 case ISD::INSERT_VECTOR_ELT:
229 case ISD::INSERT_SUBVECTOR:
230 case ISD::EXTRACT_SUBVECTOR:
231 case ISD::SCALAR_TO_VECTOR:
233 case ISD::CONCAT_VECTORS:
234 setOperationAction(Op, VT, Custom);
237 setOperationAction(Op, VT, Expand);
243 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
244 // is expanded to avoid having two separate loops in case the index is a VGPR.
246 // Most operations are naturally 32-bit vector operations. We only support
247 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
248 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
249 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
250 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
252 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
253 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
255 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
256 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
258 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
259 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
262 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
263 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
264 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
265 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
267 // Avoid stack access for these.
268 // TODO: Generalize to more vector types.
269 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
270 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
271 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
272 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
274 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
275 // and output demarshalling
276 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
277 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
279 // We can't return success/failure, only the old value,
280 // let LLVM add the comparison
281 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
282 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
284 if (getSubtarget()->hasFlatAddressSpace()) {
285 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
286 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
289 setOperationAction(ISD::BSWAP, MVT::i32, Legal);
290 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
292 // On SI this is s_memtime and s_memrealtime on VI.
293 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
294 setOperationAction(ISD::TRAP, MVT::Other, Custom);
295 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
297 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
298 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
300 if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) {
301 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
302 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
303 setOperationAction(ISD::FRINT, MVT::f64, Legal);
306 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
308 setOperationAction(ISD::FSIN, MVT::f32, Custom);
309 setOperationAction(ISD::FCOS, MVT::f32, Custom);
310 setOperationAction(ISD::FDIV, MVT::f32, Custom);
311 setOperationAction(ISD::FDIV, MVT::f64, Custom);
313 if (Subtarget->has16BitInsts()) {
314 setOperationAction(ISD::Constant, MVT::i16, Legal);
316 setOperationAction(ISD::SMIN, MVT::i16, Legal);
317 setOperationAction(ISD::SMAX, MVT::i16, Legal);
319 setOperationAction(ISD::UMIN, MVT::i16, Legal);
320 setOperationAction(ISD::UMAX, MVT::i16, Legal);
322 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
323 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
325 setOperationAction(ISD::ROTR, MVT::i16, Promote);
326 setOperationAction(ISD::ROTL, MVT::i16, Promote);
328 setOperationAction(ISD::SDIV, MVT::i16, Promote);
329 setOperationAction(ISD::UDIV, MVT::i16, Promote);
330 setOperationAction(ISD::SREM, MVT::i16, Promote);
331 setOperationAction(ISD::UREM, MVT::i16, Promote);
333 setOperationAction(ISD::BSWAP, MVT::i16, Promote);
334 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
336 setOperationAction(ISD::CTTZ, MVT::i16, Promote);
337 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
338 setOperationAction(ISD::CTLZ, MVT::i16, Promote);
339 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
341 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
343 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
345 setOperationAction(ISD::LOAD, MVT::i16, Custom);
347 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
349 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
350 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
351 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
352 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
354 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
355 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
356 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
357 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
359 // F16 - Constant Actions.
360 setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
362 // F16 - Load/Store Actions.
363 setOperationAction(ISD::LOAD, MVT::f16, Promote);
364 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
365 setOperationAction(ISD::STORE, MVT::f16, Promote);
366 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
368 // F16 - VOP1 Actions.
369 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
370 setOperationAction(ISD::FCOS, MVT::f16, Promote);
371 setOperationAction(ISD::FSIN, MVT::f16, Promote);
372 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
373 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
374 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
375 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
376 setOperationAction(ISD::FROUND, MVT::f16, Custom);
378 // F16 - VOP2 Actions.
379 setOperationAction(ISD::BR_CC, MVT::f16, Expand);
380 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
381 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
382 setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
383 setOperationAction(ISD::FDIV, MVT::f16, Custom);
385 // F16 - VOP3 Actions.
386 setOperationAction(ISD::FMA, MVT::f16, Legal);
387 if (!Subtarget->hasFP16Denormals())
388 setOperationAction(ISD::FMAD, MVT::f16, Legal);
391 if (Subtarget->hasVOP3PInsts()) {
392 for (MVT VT : {MVT::v2i16, MVT::v2f16}) {
393 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
397 case ISD::BUILD_VECTOR:
399 case ISD::EXTRACT_VECTOR_ELT:
400 case ISD::INSERT_VECTOR_ELT:
401 case ISD::INSERT_SUBVECTOR:
402 case ISD::EXTRACT_SUBVECTOR:
403 case ISD::SCALAR_TO_VECTOR:
405 case ISD::CONCAT_VECTORS:
406 setOperationAction(Op, VT, Custom);
409 setOperationAction(Op, VT, Expand);
415 // XXX - Do these do anything? Vector constants turn into build_vector.
416 setOperationAction(ISD::Constant, MVT::v2i16, Legal);
417 setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
419 setOperationAction(ISD::STORE, MVT::v2i16, Promote);
420 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
421 setOperationAction(ISD::STORE, MVT::v2f16, Promote);
422 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
424 setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
425 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
426 setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
427 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
429 setOperationAction(ISD::AND, MVT::v2i16, Promote);
430 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
431 setOperationAction(ISD::OR, MVT::v2i16, Promote);
432 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
433 setOperationAction(ISD::XOR, MVT::v2i16, Promote);
434 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
435 setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
436 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
437 setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
438 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
440 setOperationAction(ISD::ADD, MVT::v2i16, Legal);
441 setOperationAction(ISD::SUB, MVT::v2i16, Legal);
442 setOperationAction(ISD::MUL, MVT::v2i16, Legal);
443 setOperationAction(ISD::SHL, MVT::v2i16, Legal);
444 setOperationAction(ISD::SRL, MVT::v2i16, Legal);
445 setOperationAction(ISD::SRA, MVT::v2i16, Legal);
446 setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
447 setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
448 setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
449 setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
451 setOperationAction(ISD::FADD, MVT::v2f16, Legal);
452 setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
453 setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
454 setOperationAction(ISD::FMA, MVT::v2f16, Legal);
455 setOperationAction(ISD::FMINNUM, MVT::v2f16, Legal);
456 setOperationAction(ISD::FMAXNUM, MVT::v2f16, Legal);
458 // This isn't really legal, but this avoids the legalizer unrolling it (and
459 // allows matching fneg (fabs x) patterns)
460 setOperationAction(ISD::FABS, MVT::v2f16, Legal);
462 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
463 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
465 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
466 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
467 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
469 setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
470 setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
473 for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
474 setOperationAction(ISD::SELECT, VT, Custom);
477 setTargetDAGCombine(ISD::ADD);
478 setTargetDAGCombine(ISD::ADDCARRY);
479 setTargetDAGCombine(ISD::SUB);
480 setTargetDAGCombine(ISD::SUBCARRY);
481 setTargetDAGCombine(ISD::FADD);
482 setTargetDAGCombine(ISD::FSUB);
483 setTargetDAGCombine(ISD::FMINNUM);
484 setTargetDAGCombine(ISD::FMAXNUM);
485 setTargetDAGCombine(ISD::SMIN);
486 setTargetDAGCombine(ISD::SMAX);
487 setTargetDAGCombine(ISD::UMIN);
488 setTargetDAGCombine(ISD::UMAX);
489 setTargetDAGCombine(ISD::SETCC);
490 setTargetDAGCombine(ISD::AND);
491 setTargetDAGCombine(ISD::OR);
492 setTargetDAGCombine(ISD::XOR);
493 setTargetDAGCombine(ISD::SINT_TO_FP);
494 setTargetDAGCombine(ISD::UINT_TO_FP);
495 setTargetDAGCombine(ISD::FCANONICALIZE);
496 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
497 setTargetDAGCombine(ISD::ZERO_EXTEND);
498 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
500 // All memory operations. Some folding on the pointer operand is done to help
501 // matching the constant offsets in the addressing modes.
502 setTargetDAGCombine(ISD::LOAD);
503 setTargetDAGCombine(ISD::STORE);
504 setTargetDAGCombine(ISD::ATOMIC_LOAD);
505 setTargetDAGCombine(ISD::ATOMIC_STORE);
506 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
507 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
508 setTargetDAGCombine(ISD::ATOMIC_SWAP);
509 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
510 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
511 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
512 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
513 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
514 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
515 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
516 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
517 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
518 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
520 setSchedulingPreference(Sched::RegPressure);
523 const SISubtarget *SITargetLowering::getSubtarget() const {
524 return static_cast<const SISubtarget *>(Subtarget);
527 //===----------------------------------------------------------------------===//
528 // TargetLowering queries
529 //===----------------------------------------------------------------------===//
531 bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &,
533 // SI has some legal vector types, but no legal vector operations. Say no
534 // shuffles are legal in order to prefer scalarizing some vector operations.
538 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
540 unsigned IntrID) const {
542 case Intrinsic::amdgcn_atomic_inc:
543 case Intrinsic::amdgcn_atomic_dec: {
544 Info.opc = ISD::INTRINSIC_W_CHAIN;
545 Info.memVT = MVT::getVT(CI.getType());
546 Info.ptrVal = CI.getOperand(0);
549 const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
550 Info.vol = !Vol || !Vol->isZero();
552 Info.writeMem = true;
560 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
561 SmallVectorImpl<Value*> &Ops,
562 Type *&AccessTy) const {
563 switch (II->getIntrinsicID()) {
564 case Intrinsic::amdgcn_atomic_inc:
565 case Intrinsic::amdgcn_atomic_dec: {
566 Value *Ptr = II->getArgOperand(0);
567 AccessTy = II->getType();
576 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
577 if (!Subtarget->hasFlatInstOffsets()) {
578 // Flat instructions do not have offsets, and only have the register
580 return AM.BaseOffs == 0 && AM.Scale == 0;
583 // GFX9 added a 13-bit signed offset. When using regular flat instructions,
584 // the sign bit is ignored and is treated as a 12-bit unsigned offset.
587 return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
590 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
591 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
592 // additionally can do r + r + i with addr64. 32-bit has more addressing
593 // mode options. Depending on the resource constant, it can also do
594 // (i64 r0) + (i32 r1) * (i14 i).
596 // Private arrays end up using a scratch buffer most of the time, so also
597 // assume those use MUBUF instructions. Scratch loads / stores are currently
598 // implemented as mubuf instructions with offen bit set, so slightly
599 // different than the normal addr64.
600 if (!isUInt<12>(AM.BaseOffs))
603 // FIXME: Since we can split immediate into soffset and immediate offset,
604 // would it make sense to allow any immediate?
607 case 0: // r + i or just i, depending on HasBaseReg.
610 return true; // We have r + r or r + i.
617 // Allow 2 * r as r + r
618 // Or 2 * r + i is allowed as r + r + i.
620 default: // Don't allow n * r
625 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
626 const AddrMode &AM, Type *Ty,
628 // No global is ever allowed as a base.
632 if (AS == AMDGPUASI.GLOBAL_ADDRESS) {
633 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
634 // Assume the we will use FLAT for all global memory accesses
636 // FIXME: This assumption is currently wrong. On VI we still use
637 // MUBUF instructions for the r + i addressing mode. As currently
638 // implemented, the MUBUF instructions only work on buffer < 4GB.
639 // It may be possible to support > 4GB buffers with MUBUF instructions,
640 // by setting the stride value in the resource descriptor which would
641 // increase the size limit to (stride * 4GB). However, this is risky,
642 // because it has never been validated.
643 return isLegalFlatAddressingMode(AM);
646 return isLegalMUBUFAddressingMode(AM);
647 } else if (AS == AMDGPUASI.CONSTANT_ADDRESS) {
648 // If the offset isn't a multiple of 4, it probably isn't going to be
649 // correctly aligned.
650 // FIXME: Can we get the real alignment here?
651 if (AM.BaseOffs % 4 != 0)
652 return isLegalMUBUFAddressingMode(AM);
654 // There are no SMRD extloads, so if we have to do a small type access we
655 // will use a MUBUF load.
656 // FIXME?: We also need to do this if unaligned, but we don't know the
658 if (DL.getTypeStoreSize(Ty) < 4)
659 return isLegalMUBUFAddressingMode(AM);
661 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) {
662 // SMRD instructions have an 8-bit, dword offset on SI.
663 if (!isUInt<8>(AM.BaseOffs / 4))
665 } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) {
666 // On CI+, this can also be a 32-bit literal constant offset. If it fits
667 // in 8-bits, it can use a smaller encoding.
668 if (!isUInt<32>(AM.BaseOffs / 4))
670 } else if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
671 // On VI, these use the SMEM format and the offset is 20-bit in bytes.
672 if (!isUInt<20>(AM.BaseOffs))
675 llvm_unreachable("unhandled generation");
677 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
680 if (AM.Scale == 1 && AM.HasBaseReg)
685 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
686 return isLegalMUBUFAddressingMode(AM);
687 } else if (AS == AMDGPUASI.LOCAL_ADDRESS ||
688 AS == AMDGPUASI.REGION_ADDRESS) {
689 // Basic, single offset DS instructions allow a 16-bit unsigned immediate
691 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
692 // an 8-bit dword offset but we don't know the alignment here.
693 if (!isUInt<16>(AM.BaseOffs))
696 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
699 if (AM.Scale == 1 && AM.HasBaseReg)
703 } else if (AS == AMDGPUASI.FLAT_ADDRESS ||
704 AS == AMDGPUASI.UNKNOWN_ADDRESS_SPACE) {
705 // For an unknown address space, this usually means that this is for some
706 // reason being used for pure arithmetic, and not based on some addressing
707 // computation. We don't have instructions that compute pointers with any
708 // addressing modes, so treat them as having no offset like flat
710 return isLegalFlatAddressingMode(AM);
712 llvm_unreachable("unhandled address space");
716 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
717 const SelectionDAG &DAG) const {
718 if (AS == AMDGPUASI.GLOBAL_ADDRESS || AS == AMDGPUASI.FLAT_ADDRESS) {
719 return (MemVT.getSizeInBits() <= 4 * 32);
720 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
721 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
722 return (MemVT.getSizeInBits() <= MaxPrivateBits);
723 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
724 return (MemVT.getSizeInBits() <= 2 * 32);
729 bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
732 bool *IsFast) const {
736 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
737 // which isn't a simple VT.
738 // Until MVT is extended to handle this, simply check for the size and
739 // rely on the condition below: allow accesses if the size is a multiple of 4.
740 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
741 VT.getStoreSize() > 16)) {
745 if (AddrSpace == AMDGPUASI.LOCAL_ADDRESS ||
746 AddrSpace == AMDGPUASI.REGION_ADDRESS) {
747 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
748 // aligned, 8 byte access in a single operation using ds_read2/write2_b32
749 // with adjacent offsets.
750 bool AlignedBy4 = (Align % 4 == 0);
752 *IsFast = AlignedBy4;
757 // FIXME: We have to be conservative here and assume that flat operations
758 // will access scratch. If we had access to the IR function, then we
759 // could determine if any private memory was used in the function.
760 if (!Subtarget->hasUnalignedScratchAccess() &&
761 (AddrSpace == AMDGPUASI.PRIVATE_ADDRESS ||
762 AddrSpace == AMDGPUASI.FLAT_ADDRESS)) {
766 if (Subtarget->hasUnalignedBufferAccess()) {
767 // If we have an uniform constant load, it still requires using a slow
768 // buffer instruction if unaligned.
770 *IsFast = (AddrSpace == AMDGPUASI.CONSTANT_ADDRESS) ?
771 (Align % 4 == 0) : true;
777 // Smaller than dword value must be aligned.
778 if (VT.bitsLT(MVT::i32))
781 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
782 // byte-address are ignored, thus forcing Dword alignment.
783 // This applies to private, global, and constant memory.
787 return VT.bitsGT(MVT::i32) && Align % 4 == 0;
790 EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
791 unsigned SrcAlign, bool IsMemset,
794 MachineFunction &MF) const {
795 // FIXME: Should account for address space here.
797 // The default fallback uses the private pointer size as a guess for a type to
798 // use. Make sure we switch these to 64-bit accesses.
800 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
803 if (Size >= 8 && DstAlign >= 4)
810 static bool isFlatGlobalAddrSpace(unsigned AS, AMDGPUAS AMDGPUASI) {
811 return AS == AMDGPUASI.GLOBAL_ADDRESS ||
812 AS == AMDGPUASI.FLAT_ADDRESS ||
813 AS == AMDGPUASI.CONSTANT_ADDRESS;
816 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
817 unsigned DestAS) const {
818 return isFlatGlobalAddrSpace(SrcAS, AMDGPUASI) &&
819 isFlatGlobalAddrSpace(DestAS, AMDGPUASI);
822 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
823 const MemSDNode *MemNode = cast<MemSDNode>(N);
824 const Value *Ptr = MemNode->getMemOperand()->getValue();
825 const Instruction *I = dyn_cast<Instruction>(Ptr);
826 return I && I->getMetadata("amdgpu.noclobber");
829 bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS,
830 unsigned DestAS) const {
831 // Flat -> private/local is a simple truncate.
832 // Flat -> global is no-op
833 if (SrcAS == AMDGPUASI.FLAT_ADDRESS)
836 return isNoopAddrSpaceCast(SrcAS, DestAS);
839 bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
840 const MemSDNode *MemNode = cast<MemSDNode>(N);
842 return AMDGPU::isUniformMMO(MemNode->getMemOperand());
845 TargetLoweringBase::LegalizeTypeAction
846 SITargetLowering::getPreferredVectorAction(EVT VT) const {
847 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
848 return TypeSplitVector;
850 return TargetLoweringBase::getPreferredVectorAction(VT);
853 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
855 // FIXME: Could be smarter if called for vector constants.
859 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
860 if (Subtarget->has16BitInsts() && VT == MVT::i16) {
865 // These operations are done with 32-bit instructions anyway.
877 // SimplifySetCC uses this function to determine whether or not it should
878 // create setcc with i1 operands. We don't have instructions for i1 setcc.
879 if (VT == MVT::i1 && Op == ISD::SETCC)
882 return TargetLowering::isTypeDesirableForOp(Op, VT);
885 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
888 uint64_t Offset) const {
889 const DataLayout &DL = DAG.getDataLayout();
890 MachineFunction &MF = DAG.getMachineFunction();
891 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
892 unsigned InputPtrReg = TRI->getPreloadedValue(MF,
893 SIRegisterInfo::KERNARG_SEGMENT_PTR);
895 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
896 MVT PtrVT = getPointerTy(DL, AMDGPUASI.CONSTANT_ADDRESS);
897 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
898 MRI.getLiveInVirtReg(InputPtrReg), PtrVT);
899 return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
900 DAG.getConstant(Offset, SL, PtrVT));
903 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
904 const SDLoc &SL, SDValue Val,
906 const ISD::InputArg *Arg) const {
907 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
909 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
910 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
913 if (MemVT.isFloatingPoint())
914 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
916 Val = DAG.getSExtOrTrunc(Val, SL, VT);
918 Val = DAG.getZExtOrTrunc(Val, SL, VT);
923 SDValue SITargetLowering::lowerKernargMemParameter(
924 SelectionDAG &DAG, EVT VT, EVT MemVT,
925 const SDLoc &SL, SDValue Chain,
926 uint64_t Offset, bool Signed,
927 const ISD::InputArg *Arg) const {
928 const DataLayout &DL = DAG.getDataLayout();
929 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
930 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
931 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
933 unsigned Align = DL.getABITypeAlignment(Ty);
935 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
936 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
937 MachineMemOperand::MONonTemporal |
938 MachineMemOperand::MODereferenceable |
939 MachineMemOperand::MOInvariant);
941 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
942 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
945 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
946 const SDLoc &SL, SDValue Chain,
947 const ISD::InputArg &Arg) const {
948 MachineFunction &MF = DAG.getMachineFunction();
949 MachineFrameInfo &MFI = MF.getFrameInfo();
951 if (Arg.Flags.isByVal()) {
952 unsigned Size = Arg.Flags.getByValSize();
953 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
954 return DAG.getFrameIndex(FrameIdx, MVT::i32);
957 unsigned ArgOffset = VA.getLocMemOffset();
958 unsigned ArgSize = VA.getValVT().getStoreSize();
960 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
962 // Create load nodes to retrieve arguments from the stack.
963 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
966 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
967 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
968 MVT MemVT = VA.getValVT();
970 switch (VA.getLocInfo()) {
973 case CCValAssign::BCvt:
974 MemVT = VA.getLocVT();
976 case CCValAssign::SExt:
977 ExtType = ISD::SEXTLOAD;
979 case CCValAssign::ZExt:
980 ExtType = ISD::ZEXTLOAD;
982 case CCValAssign::AExt:
983 ExtType = ISD::EXTLOAD;
987 ArgValue = DAG.getExtLoad(
988 ExtType, SL, VA.getLocVT(), Chain, FIN,
989 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
994 static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
995 CallingConv::ID CallConv,
996 ArrayRef<ISD::InputArg> Ins,
999 SIMachineFunctionInfo *Info) {
1000 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
1001 const ISD::InputArg &Arg = Ins[I];
1003 // First check if it's a PS input addr.
1004 if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() &&
1005 !Arg.Flags.isByVal() && PSInputNum <= 15) {
1007 if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) {
1008 // We can safely skip PS inputs.
1014 Info->markPSInputAllocated(PSInputNum);
1016 Info->markPSInputEnabled(PSInputNum);
1021 // Second split vertices into their elements.
1022 if (Arg.VT.isVector()) {
1023 ISD::InputArg NewArg = Arg;
1024 NewArg.Flags.setSplit();
1025 NewArg.VT = Arg.VT.getVectorElementType();
1027 // We REALLY want the ORIGINAL number of vertex elements here, e.g. a
1028 // three or five element vertex only needs three or five registers,
1029 // NOT four or eight.
1030 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
1031 unsigned NumElements = ParamType->getVectorNumElements();
1033 for (unsigned J = 0; J != NumElements; ++J) {
1034 Splits.push_back(NewArg);
1035 NewArg.PartOffset += NewArg.VT.getStoreSize();
1038 Splits.push_back(Arg);
1043 // Allocate special inputs passed in VGPRs.
1044 static void allocateSpecialInputVGPRs(CCState &CCInfo,
1045 MachineFunction &MF,
1046 const SIRegisterInfo &TRI,
1047 SIMachineFunctionInfo &Info) {
1048 if (Info.hasWorkItemIDX()) {
1049 unsigned Reg = TRI.getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X);
1050 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1051 CCInfo.AllocateReg(Reg);
1054 if (Info.hasWorkItemIDY()) {
1055 unsigned Reg = TRI.getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y);
1056 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1057 CCInfo.AllocateReg(Reg);
1060 if (Info.hasWorkItemIDZ()) {
1061 unsigned Reg = TRI.getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z);
1062 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1063 CCInfo.AllocateReg(Reg);
1067 // Allocate special inputs passed in user SGPRs.
1068 static void allocateHSAUserSGPRs(CCState &CCInfo,
1069 MachineFunction &MF,
1070 const SIRegisterInfo &TRI,
1071 SIMachineFunctionInfo &Info) {
1072 if (Info.hasImplicitBufferPtr()) {
1073 unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1074 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1075 CCInfo.AllocateReg(ImplicitBufferPtrReg);
1078 // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1079 if (Info.hasPrivateSegmentBuffer()) {
1080 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1081 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1082 CCInfo.AllocateReg(PrivateSegmentBufferReg);
1085 if (Info.hasDispatchPtr()) {
1086 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1087 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1088 CCInfo.AllocateReg(DispatchPtrReg);
1091 if (Info.hasQueuePtr()) {
1092 unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1093 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1094 CCInfo.AllocateReg(QueuePtrReg);
1097 if (Info.hasKernargSegmentPtr()) {
1098 unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI);
1099 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1100 CCInfo.AllocateReg(InputPtrReg);
1103 if (Info.hasDispatchID()) {
1104 unsigned DispatchIDReg = Info.addDispatchID(TRI);
1105 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1106 CCInfo.AllocateReg(DispatchIDReg);
1109 if (Info.hasFlatScratchInit()) {
1110 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1111 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1112 CCInfo.AllocateReg(FlatScratchInitReg);
1115 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1116 // these from the dispatch pointer.
1119 // Allocate special input registers that are initialized per-wave.
1120 static void allocateSystemSGPRs(CCState &CCInfo,
1121 MachineFunction &MF,
1122 SIMachineFunctionInfo &Info,
1123 CallingConv::ID CallConv,
1125 if (Info.hasWorkGroupIDX()) {
1126 unsigned Reg = Info.addWorkGroupIDX();
1127 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1128 CCInfo.AllocateReg(Reg);
1131 if (Info.hasWorkGroupIDY()) {
1132 unsigned Reg = Info.addWorkGroupIDY();
1133 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1134 CCInfo.AllocateReg(Reg);
1137 if (Info.hasWorkGroupIDZ()) {
1138 unsigned Reg = Info.addWorkGroupIDZ();
1139 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1140 CCInfo.AllocateReg(Reg);
1143 if (Info.hasWorkGroupInfo()) {
1144 unsigned Reg = Info.addWorkGroupInfo();
1145 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1146 CCInfo.AllocateReg(Reg);
1149 if (Info.hasPrivateSegmentWaveByteOffset()) {
1150 // Scratch wave offset passed in system SGPR.
1151 unsigned PrivateSegmentWaveByteOffsetReg;
1154 PrivateSegmentWaveByteOffsetReg =
1155 Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1157 // This is true if the scratch wave byte offset doesn't have a fixed
1159 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1160 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1161 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1164 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1166 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1167 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1171 static void reservePrivateMemoryRegs(const TargetMachine &TM,
1172 MachineFunction &MF,
1173 const SIRegisterInfo &TRI,
1174 SIMachineFunctionInfo &Info) {
1175 // Now that we've figured out where the scratch register inputs are, see if
1176 // should reserve the arguments and use them directly.
1177 MachineFrameInfo &MFI = MF.getFrameInfo();
1178 bool HasStackObjects = MFI.hasStackObjects();
1180 // Record that we know we have non-spill stack objects so we don't need to
1181 // check all stack objects later.
1182 if (HasStackObjects)
1183 Info.setHasNonSpillStackObjects(true);
1185 // Everything live out of a block is spilled with fast regalloc, so it's
1186 // almost certain that spilling will be required.
1187 if (TM.getOptLevel() == CodeGenOpt::None)
1188 HasStackObjects = true;
1190 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1191 if (ST.isAmdCodeObjectV2(MF)) {
1192 if (HasStackObjects) {
1193 // If we have stack objects, we unquestionably need the private buffer
1194 // resource. For the Code Object V2 ABI, this will be the first 4 user
1195 // SGPR inputs. We can reserve those and use them directly.
1197 unsigned PrivateSegmentBufferReg = TRI.getPreloadedValue(
1198 MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER);
1199 Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1201 unsigned PrivateSegmentWaveByteOffsetReg = TRI.getPreloadedValue(
1202 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1203 Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg);
1205 unsigned ReservedBufferReg
1206 = TRI.reservedPrivateSegmentBufferReg(MF);
1207 unsigned ReservedOffsetReg
1208 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1210 // We tentatively reserve the last registers (skipping the last two
1211 // which may contain VCC). After register allocation, we'll replace
1212 // these with the ones immediately after those which were really
1213 // allocated. In the prologue copies will be inserted from the argument
1214 // to these reserved registers.
1215 Info.setScratchRSrcReg(ReservedBufferReg);
1216 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1219 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1221 // Without HSA, relocations are used for the scratch pointer and the
1222 // buffer resource setup is always inserted in the prologue. Scratch wave
1223 // offset is still in an input SGPR.
1224 Info.setScratchRSrcReg(ReservedBufferReg);
1226 if (HasStackObjects) {
1227 unsigned ScratchWaveOffsetReg = TRI.getPreloadedValue(
1228 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1229 Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg);
1231 unsigned ReservedOffsetReg
1232 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1233 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1238 SDValue SITargetLowering::LowerFormalArguments(
1239 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1240 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1241 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1242 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1244 MachineFunction &MF = DAG.getMachineFunction();
1245 FunctionType *FType = MF.getFunction()->getFunctionType();
1246 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1247 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1249 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
1250 const Function *Fn = MF.getFunction();
1251 DiagnosticInfoUnsupported NoGraphicsHSA(
1252 *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
1253 DAG.getContext()->diagnose(NoGraphicsHSA);
1254 return DAG.getEntryNode();
1257 // Create stack objects that are used for emitting debugger prologue if
1258 // "amdgpu-debugger-emit-prologue" attribute was specified.
1259 if (ST.debuggerEmitPrologue())
1260 createDebuggerPrologueStackObjects(MF);
1262 SmallVector<ISD::InputArg, 16> Splits;
1263 SmallVector<CCValAssign, 16> ArgLocs;
1264 BitVector Skipped(Ins.size());
1265 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1268 bool IsShader = AMDGPU::isShader(CallConv);
1269 bool IsKernel = AMDGPU::isKernel(CallConv);
1270 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
1273 processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
1275 // At least one interpolation mode must be enabled or else the GPU will
1278 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
1279 // set PSInputAddr, the user wants to enable some bits after the compilation
1280 // based on run-time states. Since we can't know what the final PSInputEna
1281 // will look like, so we shouldn't do anything here and the user should take
1282 // responsibility for the correct programming.
1284 // Otherwise, the following restrictions apply:
1285 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
1286 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
1288 if (CallConv == CallingConv::AMDGPU_PS &&
1289 ((Info->getPSInputAddr() & 0x7F) == 0 ||
1290 ((Info->getPSInputAddr() & 0xF) == 0 &&
1291 Info->isPSInputAllocated(11)))) {
1292 CCInfo.AllocateReg(AMDGPU::VGPR0);
1293 CCInfo.AllocateReg(AMDGPU::VGPR1);
1294 Info->markPSInputAllocated(0);
1295 Info->markPSInputEnabled(0);
1298 assert(!Info->hasDispatchPtr() &&
1299 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
1300 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
1301 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
1302 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
1303 !Info->hasWorkItemIDZ());
1304 } else if (IsKernel) {
1305 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
1307 Splits.append(Ins.begin(), Ins.end());
1311 allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
1312 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
1316 analyzeFormalArgumentsCompute(CCInfo, Ins);
1318 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
1319 CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
1322 SmallVector<SDValue, 16> Chains;
1324 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
1325 const ISD::InputArg &Arg = Ins[i];
1327 InVals.push_back(DAG.getUNDEF(Arg.VT));
1331 CCValAssign &VA = ArgLocs[ArgIdx++];
1332 MVT VT = VA.getLocVT();
1334 if (IsEntryFunc && VA.isMemLoc()) {
1336 EVT MemVT = VA.getLocVT();
1338 const uint64_t Offset = Subtarget->getExplicitKernelArgOffset(MF) +
1339 VA.getLocMemOffset();
1340 Info->setABIArgOffset(Offset + MemVT.getStoreSize());
1342 // The first 36 bytes of the input buffer contains information about
1343 // thread group and global sizes.
1344 SDValue Arg = lowerKernargMemParameter(
1345 DAG, VT, MemVT, DL, Chain, Offset, Ins[i].Flags.isSExt(), &Ins[i]);
1346 Chains.push_back(Arg.getValue(1));
1349 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
1350 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
1351 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
1352 // On SI local pointers are just offsets into LDS, so they are always
1353 // less than 16-bits. On CI and newer they could potentially be
1354 // real pointers, so we can't guarantee their size.
1355 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
1356 DAG.getValueType(MVT::i16));
1359 InVals.push_back(Arg);
1361 } else if (!IsEntryFunc && VA.isMemLoc()) {
1362 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
1363 InVals.push_back(Val);
1364 if (!Arg.Flags.isByVal())
1365 Chains.push_back(Val.getValue(1));
1369 assert(VA.isRegLoc() && "Parameter must be in a register!");
1371 unsigned Reg = VA.getLocReg();
1372 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
1373 EVT ValVT = VA.getValVT();
1375 Reg = MF.addLiveIn(Reg, RC);
1376 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
1378 // If this is an 8 or 16-bit value, it is really passed promoted
1379 // to 32 bits. Insert an assert[sz]ext to capture this, then
1380 // truncate to the right size.
1381 switch (VA.getLocInfo()) {
1382 case CCValAssign::Full:
1384 case CCValAssign::BCvt:
1385 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
1387 case CCValAssign::SExt:
1388 Val = DAG.getNode(ISD::AssertSext, DL, VT, Val,
1389 DAG.getValueType(ValVT));
1390 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1392 case CCValAssign::ZExt:
1393 Val = DAG.getNode(ISD::AssertZext, DL, VT, Val,
1394 DAG.getValueType(ValVT));
1395 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1397 case CCValAssign::AExt:
1398 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
1401 llvm_unreachable("Unknown loc info!");
1404 if (IsShader && Arg.VT.isVector()) {
1405 // Build a vector from the registers
1406 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
1407 unsigned NumElements = ParamType->getVectorNumElements();
1409 SmallVector<SDValue, 4> Regs;
1410 Regs.push_back(Val);
1411 for (unsigned j = 1; j != NumElements; ++j) {
1412 Reg = ArgLocs[ArgIdx++].getLocReg();
1413 Reg = MF.addLiveIn(Reg, RC);
1415 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT);
1416 Regs.push_back(Copy);
1419 // Fill up the missing vector elements
1420 NumElements = Arg.VT.getVectorNumElements() - NumElements;
1421 Regs.append(NumElements, DAG.getUNDEF(VT));
1423 InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs));
1427 InVals.push_back(Val);
1430 // Start adding system SGPRs.
1432 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
1434 CCInfo.AllocateReg(Info->getScratchRSrcReg());
1435 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
1436 CCInfo.AllocateReg(Info->getFrameOffsetReg());
1439 return Chains.empty() ? Chain :
1440 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
1443 // TODO: If return values can't fit in registers, we should return as many as
1444 // possible in registers before passing on stack.
1445 bool SITargetLowering::CanLowerReturn(
1446 CallingConv::ID CallConv,
1447 MachineFunction &MF, bool IsVarArg,
1448 const SmallVectorImpl<ISD::OutputArg> &Outs,
1449 LLVMContext &Context) const {
1450 // Replacing returns with sret/stack usage doesn't make sense for shaders.
1451 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
1452 // for shaders. Vector types should be explicitly handled by CC.
1453 if (AMDGPU::isEntryFunctionCC(CallConv))
1456 SmallVector<CCValAssign, 16> RVLocs;
1457 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
1458 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
1462 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1464 const SmallVectorImpl<ISD::OutputArg> &Outs,
1465 const SmallVectorImpl<SDValue> &OutVals,
1466 const SDLoc &DL, SelectionDAG &DAG) const {
1467 MachineFunction &MF = DAG.getMachineFunction();
1468 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1470 if (AMDGPU::isKernel(CallConv)) {
1471 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
1475 bool IsShader = AMDGPU::isShader(CallConv);
1477 Info->setIfReturnsVoid(Outs.size() == 0);
1478 bool IsWaveEnd = Info->returnsVoid() && IsShader;
1480 SmallVector<ISD::OutputArg, 48> Splits;
1481 SmallVector<SDValue, 48> SplitVals;
1483 // Split vectors into their elements.
1484 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1485 const ISD::OutputArg &Out = Outs[i];
1487 if (IsShader && Out.VT.isVector()) {
1488 MVT VT = Out.VT.getVectorElementType();
1489 ISD::OutputArg NewOut = Out;
1490 NewOut.Flags.setSplit();
1493 // We want the original number of vector elements here, e.g.
1494 // three or five, not four or eight.
1495 unsigned NumElements = Out.ArgVT.getVectorNumElements();
1497 for (unsigned j = 0; j != NumElements; ++j) {
1498 SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i],
1499 DAG.getConstant(j, DL, MVT::i32));
1500 SplitVals.push_back(Elem);
1501 Splits.push_back(NewOut);
1502 NewOut.PartOffset += NewOut.VT.getStoreSize();
1505 SplitVals.push_back(OutVals[i]);
1506 Splits.push_back(Out);
1510 // CCValAssign - represent the assignment of the return value to a location.
1511 SmallVector<CCValAssign, 48> RVLocs;
1513 // CCState - Info about the registers and stack slots.
1514 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1517 // Analyze outgoing return values.
1518 CCInfo.AnalyzeReturn(Splits, CCAssignFnForReturn(CallConv, isVarArg));
1521 SmallVector<SDValue, 48> RetOps;
1522 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
1524 // Add return address for callable functions.
1525 if (!Info->isEntryFunction()) {
1526 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1527 SDValue ReturnAddrReg = CreateLiveInRegister(
1528 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
1530 // FIXME: Should be able to use a vreg here, but need a way to prevent it
1531 // from being allcoated to a CSR.
1533 SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
1536 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag);
1537 Flag = Chain.getValue(1);
1539 RetOps.push_back(PhysReturnAddrReg);
1542 // Copy the result values into the output registers.
1543 for (unsigned i = 0, realRVLocIdx = 0;
1545 ++i, ++realRVLocIdx) {
1546 CCValAssign &VA = RVLocs[i];
1547 assert(VA.isRegLoc() && "Can only return in registers!");
1548 // TODO: Partially return in registers if return values don't fit.
1550 SDValue Arg = SplitVals[realRVLocIdx];
1552 // Copied from other backends.
1553 switch (VA.getLocInfo()) {
1554 case CCValAssign::Full:
1556 case CCValAssign::BCvt:
1557 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1559 case CCValAssign::SExt:
1560 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1562 case CCValAssign::ZExt:
1563 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1565 case CCValAssign::AExt:
1566 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1569 llvm_unreachable("Unknown loc info!");
1572 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
1573 Flag = Chain.getValue(1);
1574 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1577 // FIXME: Does sret work properly?
1579 // Update chain and glue.
1582 RetOps.push_back(Flag);
1584 unsigned Opc = AMDGPUISD::ENDPGM;
1586 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
1587 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
1590 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
1591 SelectionDAG &DAG) const {
1592 unsigned Reg = StringSwitch<unsigned>(RegName)
1593 .Case("m0", AMDGPU::M0)
1594 .Case("exec", AMDGPU::EXEC)
1595 .Case("exec_lo", AMDGPU::EXEC_LO)
1596 .Case("exec_hi", AMDGPU::EXEC_HI)
1597 .Case("flat_scratch", AMDGPU::FLAT_SCR)
1598 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1599 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
1600 .Default(AMDGPU::NoRegister);
1602 if (Reg == AMDGPU::NoRegister) {
1603 report_fatal_error(Twine("invalid register name \""
1604 + StringRef(RegName) + "\"."));
1608 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
1609 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
1610 report_fatal_error(Twine("invalid register \""
1611 + StringRef(RegName) + "\" for subtarget."));
1616 case AMDGPU::EXEC_LO:
1617 case AMDGPU::EXEC_HI:
1618 case AMDGPU::FLAT_SCR_LO:
1619 case AMDGPU::FLAT_SCR_HI:
1620 if (VT.getSizeInBits() == 32)
1624 case AMDGPU::FLAT_SCR:
1625 if (VT.getSizeInBits() == 64)
1629 llvm_unreachable("missing register type checking");
1632 report_fatal_error(Twine("invalid type for register \""
1633 + StringRef(RegName) + "\"."));
1636 // If kill is not the last instruction, split the block so kill is always a
1637 // proper terminator.
1638 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
1639 MachineBasicBlock *BB) const {
1640 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
1642 MachineBasicBlock::iterator SplitPoint(&MI);
1645 if (SplitPoint == BB->end()) {
1646 // Don't bother with a new block.
1647 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR));
1651 MachineFunction *MF = BB->getParent();
1652 MachineBasicBlock *SplitBB
1653 = MF->CreateMachineBasicBlock(BB->getBasicBlock());
1655 MF->insert(++MachineFunction::iterator(BB), SplitBB);
1656 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
1658 SplitBB->transferSuccessorsAndUpdatePHIs(BB);
1659 BB->addSuccessor(SplitBB);
1661 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR));
1665 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
1666 // wavefront. If the value is uniform and just happens to be in a VGPR, this
1667 // will only do one iteration. In the worst case, this will loop 64 times.
1669 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
1670 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
1671 const SIInstrInfo *TII,
1672 MachineRegisterInfo &MRI,
1673 MachineBasicBlock &OrigBB,
1674 MachineBasicBlock &LoopBB,
1676 const MachineOperand &IdxReg,
1680 unsigned InitSaveExecReg,
1682 bool UseGPRIdxMode) {
1683 MachineBasicBlock::iterator I = LoopBB.begin();
1685 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1686 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1687 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1688 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1690 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
1696 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
1697 .addReg(InitSaveExecReg)
1702 // Read the next variant <- also loop target.
1703 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
1704 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
1706 // Compare the just read M0 value to all possible Idx values.
1707 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
1708 .addReg(CurrentIdxReg)
1709 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
1711 if (UseGPRIdxMode) {
1714 IdxReg = CurrentIdxReg;
1716 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1717 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
1718 .addReg(CurrentIdxReg, RegState::Kill)
1722 MachineInstr *SetIdx =
1723 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_IDX))
1724 .addReg(IdxReg, RegState::Kill);
1725 SetIdx->getOperand(2).setIsUndef();
1727 // Move index from VCC into M0
1729 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1730 .addReg(CurrentIdxReg, RegState::Kill);
1732 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
1733 .addReg(CurrentIdxReg, RegState::Kill)
1738 // Update EXEC, save the original EXEC value to VCC.
1739 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec)
1740 .addReg(CondReg, RegState::Kill);
1742 MRI.setSimpleHint(NewExec, CondReg);
1744 // Update EXEC, switch all done bits to 0 and all todo bits to 1.
1745 MachineInstr *InsertPt =
1746 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
1747 .addReg(AMDGPU::EXEC)
1750 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
1753 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
1754 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
1757 return InsertPt->getIterator();
1760 // This has slightly sub-optimal regalloc when the source vector is killed by
1761 // the read. The register allocator does not understand that the kill is
1762 // per-workitem, so is kept alive for the whole loop so we end up not re-using a
1763 // subregister from it, using 1 more VGPR than necessary. This was saved when
1764 // this was expanded after register allocation.
1765 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
1766 MachineBasicBlock &MBB,
1768 unsigned InitResultReg,
1771 bool UseGPRIdxMode) {
1772 MachineFunction *MF = MBB.getParent();
1773 MachineRegisterInfo &MRI = MF->getRegInfo();
1774 const DebugLoc &DL = MI.getDebugLoc();
1775 MachineBasicBlock::iterator I(&MI);
1777 unsigned DstReg = MI.getOperand(0).getReg();
1778 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1779 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1781 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
1783 // Save the EXEC mask
1784 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec)
1785 .addReg(AMDGPU::EXEC);
1787 // To insert the loop we need to split the block. Move everything after this
1788 // point to a new block, and insert a new empty block between the two.
1789 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
1790 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
1791 MachineFunction::iterator MBBI(MBB);
1794 MF->insert(MBBI, LoopBB);
1795 MF->insert(MBBI, RemainderBB);
1797 LoopBB->addSuccessor(LoopBB);
1798 LoopBB->addSuccessor(RemainderBB);
1800 // Move the rest of the block into a new block.
1801 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
1802 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
1804 MBB.addSuccessor(LoopBB);
1806 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
1808 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
1809 InitResultReg, DstReg, PhiReg, TmpExec,
1810 Offset, UseGPRIdxMode);
1812 MachineBasicBlock::iterator First = RemainderBB->begin();
1813 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
1819 // Returns subreg index, offset
1820 static std::pair<unsigned, int>
1821 computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
1822 const TargetRegisterClass *SuperRC,
1825 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
1827 // Skip out of bounds offsets, or else we would end up using an undefined
1829 if (Offset >= NumElts || Offset < 0)
1830 return std::make_pair(AMDGPU::sub0, Offset);
1832 return std::make_pair(AMDGPU::sub0 + Offset, 0);
1835 // Return true if the index is an SGPR and was set.
1836 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
1837 MachineRegisterInfo &MRI,
1841 bool IsIndirectSrc) {
1842 MachineBasicBlock *MBB = MI.getParent();
1843 const DebugLoc &DL = MI.getDebugLoc();
1844 MachineBasicBlock::iterator I(&MI);
1846 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
1847 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
1849 assert(Idx->getReg() != AMDGPU::NoRegister);
1851 if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
1854 if (UseGPRIdxMode) {
1855 unsigned IdxMode = IsIndirectSrc ?
1856 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE;
1858 MachineInstr *SetOn =
1859 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
1863 SetOn->getOperand(3).setIsUndef();
1865 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
1866 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
1869 MachineInstr *SetOn =
1870 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
1871 .addReg(Tmp, RegState::Kill)
1874 SetOn->getOperand(3).setIsUndef();
1881 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1884 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
1892 // Control flow needs to be inserted if indexing with a VGPR.
1893 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
1894 MachineBasicBlock &MBB,
1895 const SISubtarget &ST) {
1896 const SIInstrInfo *TII = ST.getInstrInfo();
1897 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1898 MachineFunction *MF = MBB.getParent();
1899 MachineRegisterInfo &MRI = MF->getRegInfo();
1901 unsigned Dst = MI.getOperand(0).getReg();
1902 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
1903 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
1905 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
1908 std::tie(SubReg, Offset)
1909 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
1911 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
1913 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
1914 MachineBasicBlock::iterator I(&MI);
1915 const DebugLoc &DL = MI.getDebugLoc();
1917 if (UseGPRIdxMode) {
1918 // TODO: Look at the uses to avoid the copy. This may require rescheduling
1919 // to avoid interfering with other uses, so probably requires a new
1920 // optimization pass.
1921 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
1922 .addReg(SrcReg, RegState::Undef, SubReg)
1923 .addReg(SrcReg, RegState::Implicit)
1924 .addReg(AMDGPU::M0, RegState::Implicit);
1925 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
1927 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
1928 .addReg(SrcReg, RegState::Undef, SubReg)
1929 .addReg(SrcReg, RegState::Implicit);
1932 MI.eraseFromParent();
1937 const DebugLoc &DL = MI.getDebugLoc();
1938 MachineBasicBlock::iterator I(&MI);
1940 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1941 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1943 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
1945 if (UseGPRIdxMode) {
1946 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
1947 .addImm(0) // Reset inside loop.
1948 .addImm(VGPRIndexMode::SRC0_ENABLE);
1949 SetOn->getOperand(3).setIsUndef();
1951 // Disable again after the loop.
1952 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
1955 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, UseGPRIdxMode);
1956 MachineBasicBlock *LoopBB = InsPt->getParent();
1958 if (UseGPRIdxMode) {
1959 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
1960 .addReg(SrcReg, RegState::Undef, SubReg)
1961 .addReg(SrcReg, RegState::Implicit)
1962 .addReg(AMDGPU::M0, RegState::Implicit);
1964 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
1965 .addReg(SrcReg, RegState::Undef, SubReg)
1966 .addReg(SrcReg, RegState::Implicit);
1969 MI.eraseFromParent();
1974 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
1975 const TargetRegisterClass *VecRC) {
1976 switch (TRI.getRegSizeInBits(*VecRC)) {
1978 return AMDGPU::V_MOVRELD_B32_V1;
1980 return AMDGPU::V_MOVRELD_B32_V2;
1981 case 128: // 16 bytes
1982 return AMDGPU::V_MOVRELD_B32_V4;
1983 case 256: // 32 bytes
1984 return AMDGPU::V_MOVRELD_B32_V8;
1985 case 512: // 64 bytes
1986 return AMDGPU::V_MOVRELD_B32_V16;
1988 llvm_unreachable("unsupported size for MOVRELD pseudos");
1992 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
1993 MachineBasicBlock &MBB,
1994 const SISubtarget &ST) {
1995 const SIInstrInfo *TII = ST.getInstrInfo();
1996 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1997 MachineFunction *MF = MBB.getParent();
1998 MachineRegisterInfo &MRI = MF->getRegInfo();
2000 unsigned Dst = MI.getOperand(0).getReg();
2001 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
2002 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
2003 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
2004 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
2005 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
2007 // This can be an immediate, but will be folded later.
2008 assert(Val->getReg());
2011 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
2014 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
2016 if (Idx->getReg() == AMDGPU::NoRegister) {
2017 MachineBasicBlock::iterator I(&MI);
2018 const DebugLoc &DL = MI.getDebugLoc();
2020 assert(Offset == 0);
2022 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
2027 MI.eraseFromParent();
2031 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
2032 MachineBasicBlock::iterator I(&MI);
2033 const DebugLoc &DL = MI.getDebugLoc();
2035 if (UseGPRIdxMode) {
2036 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
2037 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
2039 .addReg(Dst, RegState::ImplicitDefine)
2040 .addReg(SrcVec->getReg(), RegState::Implicit)
2041 .addReg(AMDGPU::M0, RegState::Implicit);
2043 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
2045 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
2047 BuildMI(MBB, I, DL, MovRelDesc)
2048 .addReg(Dst, RegState::Define)
2049 .addReg(SrcVec->getReg())
2051 .addImm(SubReg - AMDGPU::sub0);
2054 MI.eraseFromParent();
2059 MRI.clearKillFlags(Val->getReg());
2061 const DebugLoc &DL = MI.getDebugLoc();
2063 if (UseGPRIdxMode) {
2064 MachineBasicBlock::iterator I(&MI);
2066 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2067 .addImm(0) // Reset inside loop.
2068 .addImm(VGPRIndexMode::DST_ENABLE);
2069 SetOn->getOperand(3).setIsUndef();
2071 // Disable again after the loop.
2072 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
2075 unsigned PhiReg = MRI.createVirtualRegister(VecRC);
2077 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
2078 Offset, UseGPRIdxMode);
2079 MachineBasicBlock *LoopBB = InsPt->getParent();
2081 if (UseGPRIdxMode) {
2082 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
2083 .addReg(PhiReg, RegState::Undef, SubReg) // vdst
2085 .addReg(Dst, RegState::ImplicitDefine)
2086 .addReg(PhiReg, RegState::Implicit)
2087 .addReg(AMDGPU::M0, RegState::Implicit);
2089 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
2091 BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
2092 .addReg(Dst, RegState::Define)
2095 .addImm(SubReg - AMDGPU::sub0);
2098 MI.eraseFromParent();
2103 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
2104 MachineInstr &MI, MachineBasicBlock *BB) const {
2106 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2107 MachineFunction *MF = BB->getParent();
2108 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
2110 if (TII->isMIMG(MI)) {
2111 if (!MI.memoperands_empty())
2113 // Add a memoperand for mimg instructions so that they aren't assumed to
2114 // be ordered memory instuctions.
2116 MachinePointerInfo PtrInfo(MFI->getImagePSV());
2117 MachineMemOperand::Flags Flags = MachineMemOperand::MODereferenceable;
2119 Flags |= MachineMemOperand::MOStore;
2122 Flags |= MachineMemOperand::MOLoad;
2124 auto MMO = MF->getMachineMemOperand(PtrInfo, Flags, 0, 0);
2125 MI.addMemOperand(*MF, MMO);
2129 switch (MI.getOpcode()) {
2130 case AMDGPU::SI_INIT_M0:
2131 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
2132 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2133 .add(MI.getOperand(0));
2134 MI.eraseFromParent();
2137 case AMDGPU::SI_INIT_EXEC:
2138 // This should be before all vector instructions.
2139 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
2141 .addImm(MI.getOperand(0).getImm());
2142 MI.eraseFromParent();
2145 case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
2146 // Extract the thread count from an SGPR input and set EXEC accordingly.
2147 // Since BFM can't shift by 64, handle that case with CMP + CMOV.
2149 // S_BFE_U32 count, input, {shift, 7}
2150 // S_BFM_B64 exec, count, 0
2151 // S_CMP_EQ_U32 count, 64
2152 // S_CMOV_B64 exec, -1
2153 MachineInstr *FirstMI = &*BB->begin();
2154 MachineRegisterInfo &MRI = MF->getRegInfo();
2155 unsigned InputReg = MI.getOperand(0).getReg();
2156 unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2159 // Move the COPY of the input reg to the beginning, so that we can use it.
2160 for (auto I = BB->begin(); I != &MI; I++) {
2161 if (I->getOpcode() != TargetOpcode::COPY ||
2162 I->getOperand(0).getReg() != InputReg)
2166 FirstMI = &*++BB->begin();
2168 I->removeFromParent();
2169 BB->insert(FirstMI, &*I);
2177 // This should be before all vector instructions.
2178 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
2180 .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000);
2181 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64),
2185 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
2186 .addReg(CountReg, RegState::Kill)
2188 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64),
2191 MI.eraseFromParent();
2195 case AMDGPU::GET_GROUPSTATICSIZE: {
2196 DebugLoc DL = MI.getDebugLoc();
2197 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
2198 .add(MI.getOperand(0))
2199 .addImm(MFI->getLDSSize());
2200 MI.eraseFromParent();
2203 case AMDGPU::SI_INDIRECT_SRC_V1:
2204 case AMDGPU::SI_INDIRECT_SRC_V2:
2205 case AMDGPU::SI_INDIRECT_SRC_V4:
2206 case AMDGPU::SI_INDIRECT_SRC_V8:
2207 case AMDGPU::SI_INDIRECT_SRC_V16:
2208 return emitIndirectSrc(MI, *BB, *getSubtarget());
2209 case AMDGPU::SI_INDIRECT_DST_V1:
2210 case AMDGPU::SI_INDIRECT_DST_V2:
2211 case AMDGPU::SI_INDIRECT_DST_V4:
2212 case AMDGPU::SI_INDIRECT_DST_V8:
2213 case AMDGPU::SI_INDIRECT_DST_V16:
2214 return emitIndirectDst(MI, *BB, *getSubtarget());
2215 case AMDGPU::SI_KILL:
2216 return splitKillBlock(MI, BB);
2217 case AMDGPU::V_CNDMASK_B64_PSEUDO: {
2218 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
2220 unsigned Dst = MI.getOperand(0).getReg();
2221 unsigned Src0 = MI.getOperand(1).getReg();
2222 unsigned Src1 = MI.getOperand(2).getReg();
2223 const DebugLoc &DL = MI.getDebugLoc();
2224 unsigned SrcCond = MI.getOperand(3).getReg();
2226 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2227 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2229 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
2230 .addReg(Src0, 0, AMDGPU::sub0)
2231 .addReg(Src1, 0, AMDGPU::sub0)
2233 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
2234 .addReg(Src0, 0, AMDGPU::sub1)
2235 .addReg(Src1, 0, AMDGPU::sub1)
2238 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
2240 .addImm(AMDGPU::sub0)
2242 .addImm(AMDGPU::sub1);
2243 MI.eraseFromParent();
2246 case AMDGPU::SI_BR_UNDEF: {
2247 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2248 const DebugLoc &DL = MI.getDebugLoc();
2249 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
2250 .add(MI.getOperand(0));
2251 Br->getOperand(1).setIsUndef(true); // read undef SCC
2252 MI.eraseFromParent();
2256 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
2260 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
2261 // This currently forces unfolding various combinations of fsub into fma with
2262 // free fneg'd operands. As long as we have fast FMA (controlled by
2263 // isFMAFasterThanFMulAndFAdd), we should perform these.
2265 // When fma is quarter rate, for f64 where add / sub are at best half rate,
2266 // most of these combines appear to be cycle neutral but save on instruction
2267 // count / code size.
2271 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
2273 if (!VT.isVector()) {
2276 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
2279 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
2280 // TODO: Should i16 be used always if legal? For now it would force VALU
2282 return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
2285 // Answering this is somewhat tricky and depends on the specific device which
2286 // have different rates for fma or all f64 operations.
2288 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
2289 // regardless of which device (although the number of cycles differs between
2290 // devices), so it is always profitable for f64.
2292 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
2293 // only on full rate devices. Normally, we should prefer selecting v_mad_f32
2294 // which we can always do even without fused FP ops since it returns the same
2295 // result as the separate operations and since it is always full
2296 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
2297 // however does not support denormals, so we do report fma as faster if we have
2298 // a fast fma device and require denormals.
2300 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
2301 VT = VT.getScalarType();
2303 switch (VT.getSimpleVT().SimpleTy) {
2305 // This is as fast on some subtargets. However, we always have full rate f32
2306 // mad available which returns the same result as the separate operations
2307 // which we should prefer over fma. We can't use this if we want to support
2308 // denormals, so only report this in these cases.
2309 return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32();
2313 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
2321 //===----------------------------------------------------------------------===//
2322 // Custom DAG Lowering Operations
2323 //===----------------------------------------------------------------------===//
2325 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
2326 switch (Op.getOpcode()) {
2327 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
2328 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
2330 SDValue Result = LowerLOAD(Op, DAG);
2331 assert((!Result.getNode() ||
2332 Result.getNode()->getNumValues() == 2) &&
2333 "Load should return a value and a chain");
2339 return LowerTrig(Op, DAG);
2340 case ISD::SELECT: return LowerSELECT(Op, DAG);
2341 case ISD::FDIV: return LowerFDIV(Op, DAG);
2342 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
2343 case ISD::STORE: return LowerSTORE(Op, DAG);
2344 case ISD::GlobalAddress: {
2345 MachineFunction &MF = DAG.getMachineFunction();
2346 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
2347 return LowerGlobalAddress(MFI, Op, DAG);
2349 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2350 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
2351 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
2352 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
2353 case ISD::INSERT_VECTOR_ELT:
2354 return lowerINSERT_VECTOR_ELT(Op, DAG);
2355 case ISD::EXTRACT_VECTOR_ELT:
2356 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
2358 return lowerFP_ROUND(Op, DAG);
2361 case ISD::DEBUGTRAP:
2362 return lowerTRAP(Op, DAG);
2367 void SITargetLowering::ReplaceNodeResults(SDNode *N,
2368 SmallVectorImpl<SDValue> &Results,
2369 SelectionDAG &DAG) const {
2370 switch (N->getOpcode()) {
2371 case ISD::INSERT_VECTOR_ELT: {
2372 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
2373 Results.push_back(Res);
2376 case ISD::EXTRACT_VECTOR_ELT: {
2377 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
2378 Results.push_back(Res);
2381 case ISD::INTRINSIC_WO_CHAIN: {
2382 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
2383 if (IID == Intrinsic::amdgcn_cvt_pkrtz) {
2384 SDValue Src0 = N->getOperand(1);
2385 SDValue Src1 = N->getOperand(2);
2387 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
2389 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
2396 EVT VT = N->getValueType(0);
2397 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2398 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
2399 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
2401 EVT SelectVT = NewVT;
2402 if (NewVT.bitsLT(MVT::i32)) {
2403 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
2404 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
2405 SelectVT = MVT::i32;
2408 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
2409 N->getOperand(0), LHS, RHS);
2411 if (NewVT != SelectVT)
2412 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
2413 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
2421 /// \brief Helper function for LowerBRCOND
2422 static SDNode *findUser(SDValue Value, unsigned Opcode) {
2424 SDNode *Parent = Value.getNode();
2425 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
2428 if (I.getUse().get() != Value)
2431 if (I->getOpcode() == Opcode)
2437 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
2438 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
2439 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
2440 case Intrinsic::amdgcn_if:
2441 return AMDGPUISD::IF;
2442 case Intrinsic::amdgcn_else:
2443 return AMDGPUISD::ELSE;
2444 case Intrinsic::amdgcn_loop:
2445 return AMDGPUISD::LOOP;
2446 case Intrinsic::amdgcn_end_cf:
2447 llvm_unreachable("should not occur");
2453 // break, if_break, else_break are all only used as inputs to loop, not
2454 // directly as branch conditions.
2458 void SITargetLowering::createDebuggerPrologueStackObjects(
2459 MachineFunction &MF) const {
2460 // Create stack objects that are used for emitting debugger prologue.
2462 // Debugger prologue writes work group IDs and work item IDs to scratch memory
2463 // at fixed location in the following format:
2464 // offset 0: work group ID x
2465 // offset 4: work group ID y
2466 // offset 8: work group ID z
2467 // offset 16: work item ID x
2468 // offset 20: work item ID y
2469 // offset 24: work item ID z
2470 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2473 // For each dimension:
2474 for (unsigned i = 0; i < 3; ++i) {
2475 // Create fixed stack object for work group ID.
2476 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true);
2477 Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx);
2478 // Create fixed stack object for work item ID.
2479 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true);
2480 Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx);
2484 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
2485 const Triple &TT = getTargetMachine().getTargetTriple();
2486 return GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS &&
2487 AMDGPU::shouldEmitConstantsToTextSection(TT);
2490 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
2491 return (GV->getType()->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
2492 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) &&
2493 !shouldEmitFixup(GV) &&
2494 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
2497 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
2498 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
2501 /// This transforms the control flow intrinsics to get the branch destination as
2502 /// last parameter, also switches branch target with BR if the need arise
2503 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
2504 SelectionDAG &DAG) const {
2507 SDNode *Intr = BRCOND.getOperand(1).getNode();
2508 SDValue Target = BRCOND.getOperand(2);
2509 SDNode *BR = nullptr;
2510 SDNode *SetCC = nullptr;
2512 if (Intr->getOpcode() == ISD::SETCC) {
2513 // As long as we negate the condition everything is fine
2515 Intr = SetCC->getOperand(0).getNode();
2518 // Get the target from BR if we don't negate the condition
2519 BR = findUser(BRCOND, ISD::BR);
2520 Target = BR->getOperand(1);
2523 // FIXME: This changes the types of the intrinsics instead of introducing new
2524 // nodes with the correct types.
2525 // e.g. llvm.amdgcn.loop
2527 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
2528 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
2530 unsigned CFNode = isCFIntrinsic(Intr);
2532 // This is a uniform branch so we don't need to legalize.
2536 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
2537 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
2540 (SetCC->getConstantOperandVal(1) == 1 &&
2541 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
2544 // operands of the new intrinsic call
2545 SmallVector<SDValue, 4> Ops;
2547 Ops.push_back(BRCOND.getOperand(0));
2549 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end());
2550 Ops.push_back(Target);
2552 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
2554 // build the new intrinsic call
2555 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
2560 BRCOND.getOperand(0)
2563 Result = DAG.getMergeValues(Ops, DL).getNode();
2567 // Give the branch instruction our target
2570 BRCOND.getOperand(2)
2572 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
2573 DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
2574 BR = NewBR.getNode();
2577 SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
2579 // Copy the intrinsic results to registers
2580 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
2581 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
2585 Chain = DAG.getCopyToReg(
2587 CopyToReg->getOperand(1),
2588 SDValue(Result, i - 1),
2591 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
2594 // Remove the old intrinsic from the chain
2595 DAG.ReplaceAllUsesOfValueWith(
2596 SDValue(Intr, Intr->getNumValues() - 1),
2597 Intr->getOperand(0));
2602 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
2606 return Op.getValueType().bitsLE(VT) ?
2607 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
2608 DAG.getNode(ISD::FTRUNC, DL, VT, Op);
2611 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
2612 assert(Op.getValueType() == MVT::f16 &&
2613 "Do not know how to custom lower FP_ROUND for non-f16 type");
2615 SDValue Src = Op.getOperand(0);
2616 EVT SrcVT = Src.getValueType();
2617 if (SrcVT != MVT::f64)
2622 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
2623 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
2624 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
2627 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
2629 MachineFunction &MF = DAG.getMachineFunction();
2630 SDValue Chain = Op.getOperand(0);
2632 unsigned TrapID = Op.getOpcode() == ISD::DEBUGTRAP ?
2633 SISubtarget::TrapIDLLVMDebugTrap : SISubtarget::TrapIDLLVMTrap;
2635 if (Subtarget->getTrapHandlerAbi() == SISubtarget::TrapHandlerAbiHsa &&
2636 Subtarget->isTrapHandlerEnabled()) {
2637 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2638 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
2639 assert(UserSGPR != AMDGPU::NoRegister);
2641 SDValue QueuePtr = CreateLiveInRegister(
2642 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
2644 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
2646 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
2647 QueuePtr, SDValue());
2651 DAG.getTargetConstant(TrapID, SL, MVT::i16),
2656 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
2660 case SISubtarget::TrapIDLLVMTrap:
2661 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
2662 case SISubtarget::TrapIDLLVMDebugTrap: {
2663 DiagnosticInfoUnsupported NoTrap(*MF.getFunction(),
2664 "debugtrap handler not supported",
2667 LLVMContext &Ctx = MF.getFunction()->getContext();
2668 Ctx.diagnose(NoTrap);
2672 llvm_unreachable("unsupported trap handler type!");
2678 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
2679 SelectionDAG &DAG) const {
2680 // FIXME: Use inline constants (src_{shared, private}_base) instead.
2681 if (Subtarget->hasApertureRegs()) {
2682 unsigned Offset = AS == AMDGPUASI.LOCAL_ADDRESS ?
2683 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
2684 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
2685 unsigned WidthM1 = AS == AMDGPUASI.LOCAL_ADDRESS ?
2686 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
2687 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
2689 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
2690 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
2691 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
2693 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
2694 SDValue ApertureReg = SDValue(
2695 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
2696 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
2697 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
2700 MachineFunction &MF = DAG.getMachineFunction();
2701 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2702 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
2703 assert(UserSGPR != AMDGPU::NoRegister);
2705 SDValue QueuePtr = CreateLiveInRegister(
2706 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
2708 // Offset into amd_queue_t for group_segment_aperture_base_hi /
2709 // private_segment_aperture_base_hi.
2710 uint32_t StructOffset = (AS == AMDGPUASI.LOCAL_ADDRESS) ? 0x40 : 0x44;
2712 SDValue Ptr = DAG.getNode(ISD::ADD, DL, MVT::i64, QueuePtr,
2713 DAG.getConstant(StructOffset, DL, MVT::i64));
2715 // TODO: Use custom target PseudoSourceValue.
2716 // TODO: We should use the value from the IR intrinsic call, but it might not
2717 // be available and how do we get it?
2718 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
2719 AMDGPUASI.CONSTANT_ADDRESS));
2721 MachinePointerInfo PtrInfo(V, StructOffset);
2722 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
2723 MinAlign(64, StructOffset),
2724 MachineMemOperand::MODereferenceable |
2725 MachineMemOperand::MOInvariant);
2728 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
2729 SelectionDAG &DAG) const {
2731 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
2733 SDValue Src = ASC->getOperand(0);
2734 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
2736 const AMDGPUTargetMachine &TM =
2737 static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
2739 // flat -> local/private
2740 if (ASC->getSrcAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
2741 unsigned DestAS = ASC->getDestAddressSpace();
2743 if (DestAS == AMDGPUASI.LOCAL_ADDRESS ||
2744 DestAS == AMDGPUASI.PRIVATE_ADDRESS) {
2745 unsigned NullVal = TM.getNullPointerValue(DestAS);
2746 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
2747 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
2748 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
2750 return DAG.getNode(ISD::SELECT, SL, MVT::i32,
2751 NonNull, Ptr, SegmentNullPtr);
2755 // local/private -> flat
2756 if (ASC->getDestAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
2757 unsigned SrcAS = ASC->getSrcAddressSpace();
2759 if (SrcAS == AMDGPUASI.LOCAL_ADDRESS ||
2760 SrcAS == AMDGPUASI.PRIVATE_ADDRESS) {
2761 unsigned NullVal = TM.getNullPointerValue(SrcAS);
2762 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
2765 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
2767 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
2769 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
2771 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
2772 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
2777 // global <-> flat are no-ops and never emitted.
2779 const MachineFunction &MF = DAG.getMachineFunction();
2780 DiagnosticInfoUnsupported InvalidAddrSpaceCast(
2781 *MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
2782 DAG.getContext()->diagnose(InvalidAddrSpaceCast);
2784 return DAG.getUNDEF(ASC->getValueType(0));
2787 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
2788 SelectionDAG &DAG) const {
2789 SDValue Idx = Op.getOperand(2);
2790 if (isa<ConstantSDNode>(Idx))
2793 // Avoid stack access for dynamic indexing.
2795 SDValue Vec = Op.getOperand(0);
2796 SDValue Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Op.getOperand(1));
2798 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
2799 SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Val);
2801 // Convert vector index to bit-index.
2802 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx,
2803 DAG.getConstant(16, SL, MVT::i32));
2805 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
2807 SDValue BFM = DAG.getNode(ISD::SHL, SL, MVT::i32,
2808 DAG.getConstant(0xffff, SL, MVT::i32),
2811 SDValue LHS = DAG.getNode(ISD::AND, SL, MVT::i32, BFM, ExtVal);
2812 SDValue RHS = DAG.getNode(ISD::AND, SL, MVT::i32,
2813 DAG.getNOT(SL, BFM, MVT::i32), BCVec);
2815 SDValue BFI = DAG.getNode(ISD::OR, SL, MVT::i32, LHS, RHS);
2816 return DAG.getNode(ISD::BITCAST, SL, Op.getValueType(), BFI);
2819 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
2820 SelectionDAG &DAG) const {
2823 EVT ResultVT = Op.getValueType();
2824 SDValue Vec = Op.getOperand(0);
2825 SDValue Idx = Op.getOperand(1);
2827 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
2829 // Make sure we we do any optimizations that will make it easier to fold
2830 // source modifiers before obscuring it with bit operations.
2832 // XXX - Why doesn't this get called when vector_shuffle is expanded?
2833 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
2836 if (const ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) {
2837 SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
2839 if (CIdx->getZExtValue() == 1) {
2840 Result = DAG.getNode(ISD::SRL, SL, MVT::i32, Result,
2841 DAG.getConstant(16, SL, MVT::i32));
2843 assert(CIdx->getZExtValue() == 0);
2846 if (ResultVT.bitsLT(MVT::i32))
2847 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result);
2848 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
2851 SDValue Sixteen = DAG.getConstant(16, SL, MVT::i32);
2853 // Convert vector index to bit-index.
2854 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, Sixteen);
2856 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
2857 SDValue Elt = DAG.getNode(ISD::SRL, SL, MVT::i32, BC, ScaledIdx);
2859 SDValue Result = Elt;
2860 if (ResultVT.bitsLT(MVT::i32))
2861 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result);
2863 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
2867 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
2868 // We can fold offsets for anything that doesn't require a GOT relocation.
2869 return (GA->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
2870 GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) &&
2871 !shouldEmitGOTReloc(GA->getGlobal());
2875 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
2876 const SDLoc &DL, unsigned Offset, EVT PtrVT,
2877 unsigned GAFlags = SIInstrInfo::MO_NONE) {
2878 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
2879 // lowered to the following code sequence:
2881 // For constant address space:
2882 // s_getpc_b64 s[0:1]
2883 // s_add_u32 s0, s0, $symbol
2884 // s_addc_u32 s1, s1, 0
2886 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
2887 // a fixup or relocation is emitted to replace $symbol with a literal
2888 // constant, which is a pc-relative offset from the encoding of the $symbol
2889 // operand to the global variable.
2891 // For global address space:
2892 // s_getpc_b64 s[0:1]
2893 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
2894 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
2896 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
2897 // fixups or relocations are emitted to replace $symbol@*@lo and
2898 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
2899 // which is a 64-bit pc-relative offset from the encoding of the $symbol
2900 // operand to the global variable.
2902 // What we want here is an offset from the value returned by s_getpc
2903 // (which is the address of the s_add_u32 instruction) to the global
2904 // variable, but since the encoding of $symbol starts 4 bytes after the start
2905 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
2906 // small. This requires us to add 4 to the global variable offset in order to
2907 // compute the correct address.
2908 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
2910 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
2911 GAFlags == SIInstrInfo::MO_NONE ?
2912 GAFlags : GAFlags + 1);
2913 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
2916 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
2918 SelectionDAG &DAG) const {
2919 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
2921 if (GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS &&
2922 GSD->getAddressSpace() != AMDGPUASI.GLOBAL_ADDRESS)
2923 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
2926 const GlobalValue *GV = GSD->getGlobal();
2927 EVT PtrVT = Op.getValueType();
2929 if (shouldEmitFixup(GV))
2930 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
2931 else if (shouldEmitPCReloc(GV))
2932 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
2933 SIInstrInfo::MO_REL32);
2935 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
2936 SIInstrInfo::MO_GOTPCREL32);
2938 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
2939 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
2940 const DataLayout &DataLayout = DAG.getDataLayout();
2941 unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
2942 // FIXME: Use a PseudoSourceValue once those can be assigned an address space.
2943 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
2945 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
2946 MachineMemOperand::MODereferenceable |
2947 MachineMemOperand::MOInvariant);
2950 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
2951 const SDLoc &DL, SDValue V) const {
2952 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
2953 // the destination register.
2955 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
2956 // so we will end up with redundant moves to m0.
2958 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
2960 // A Null SDValue creates a glue result.
2961 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
2963 return SDValue(M0, 0);
2966 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
2969 unsigned Offset) const {
2971 SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
2972 DAG.getEntryNode(), Offset, false);
2973 // The local size values will have the hi 16-bits as zero.
2974 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
2975 DAG.getValueType(VT));
2978 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
2980 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
2981 "non-hsa intrinsic with hsa target",
2983 DAG.getContext()->diagnose(BadIntrin);
2984 return DAG.getUNDEF(VT);
2987 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
2989 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
2990 "intrinsic not supported on subtarget",
2992 DAG.getContext()->diagnose(BadIntrin);
2993 return DAG.getUNDEF(VT);
2996 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
2997 SelectionDAG &DAG) const {
2998 MachineFunction &MF = DAG.getMachineFunction();
2999 auto MFI = MF.getInfo<SIMachineFunctionInfo>();
3000 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
3002 EVT VT = Op.getValueType();
3004 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3006 // TODO: Should this propagate fast-math-flags?
3008 switch (IntrinsicID) {
3009 case Intrinsic::amdgcn_implicit_buffer_ptr: {
3010 if (getSubtarget()->isAmdCodeObjectV2(MF))
3011 return emitNonHSAIntrinsicError(DAG, DL, VT);
3013 unsigned Reg = TRI->getPreloadedValue(MF,
3014 SIRegisterInfo::IMPLICIT_BUFFER_PTR);
3015 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT);
3017 case Intrinsic::amdgcn_dispatch_ptr:
3018 case Intrinsic::amdgcn_queue_ptr: {
3019 if (!Subtarget->isAmdCodeObjectV2(MF)) {
3020 DiagnosticInfoUnsupported BadIntrin(
3021 *MF.getFunction(), "unsupported hsa intrinsic without hsa target",
3023 DAG.getContext()->diagnose(BadIntrin);
3024 return DAG.getUNDEF(VT);
3027 auto Reg = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
3028 SIRegisterInfo::DISPATCH_PTR : SIRegisterInfo::QUEUE_PTR;
3029 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass,
3030 TRI->getPreloadedValue(MF, Reg), VT);
3032 case Intrinsic::amdgcn_implicitarg_ptr: {
3033 unsigned offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT);
3034 return lowerKernArgParameterPtr(DAG, DL, DAG.getEntryNode(), offset);
3036 case Intrinsic::amdgcn_kernarg_segment_ptr: {
3038 = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR);
3039 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT);
3041 case Intrinsic::amdgcn_dispatch_id: {
3042 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::DISPATCH_ID);
3043 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT);
3045 case Intrinsic::amdgcn_rcp:
3046 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
3047 case Intrinsic::amdgcn_rsq:
3048 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
3049 case Intrinsic::amdgcn_rsq_legacy:
3050 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
3051 return emitRemovedIntrinsicError(DAG, DL, VT);
3053 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
3054 case Intrinsic::amdgcn_rcp_legacy:
3055 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
3056 return emitRemovedIntrinsicError(DAG, DL, VT);
3057 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
3058 case Intrinsic::amdgcn_rsq_clamp: {
3059 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
3060 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
3062 Type *Type = VT.getTypeForEVT(*DAG.getContext());
3063 APFloat Max = APFloat::getLargest(Type->getFltSemantics());
3064 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
3066 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
3067 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
3068 DAG.getConstantFP(Max, DL, VT));
3069 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
3070 DAG.getConstantFP(Min, DL, VT));
3072 case Intrinsic::r600_read_ngroups_x:
3073 if (Subtarget->isAmdHsaOS())
3074 return emitNonHSAIntrinsicError(DAG, DL, VT);
3076 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
3077 SI::KernelInputOffsets::NGROUPS_X, false);
3078 case Intrinsic::r600_read_ngroups_y:
3079 if (Subtarget->isAmdHsaOS())
3080 return emitNonHSAIntrinsicError(DAG, DL, VT);
3082 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
3083 SI::KernelInputOffsets::NGROUPS_Y, false);
3084 case Intrinsic::r600_read_ngroups_z:
3085 if (Subtarget->isAmdHsaOS())
3086 return emitNonHSAIntrinsicError(DAG, DL, VT);
3088 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
3089 SI::KernelInputOffsets::NGROUPS_Z, false);
3090 case Intrinsic::r600_read_global_size_x:
3091 if (Subtarget->isAmdHsaOS())
3092 return emitNonHSAIntrinsicError(DAG, DL, VT);
3094 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
3095 SI::KernelInputOffsets::GLOBAL_SIZE_X, false);
3096 case Intrinsic::r600_read_global_size_y:
3097 if (Subtarget->isAmdHsaOS())
3098 return emitNonHSAIntrinsicError(DAG, DL, VT);
3100 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
3101 SI::KernelInputOffsets::GLOBAL_SIZE_Y, false);
3102 case Intrinsic::r600_read_global_size_z:
3103 if (Subtarget->isAmdHsaOS())
3104 return emitNonHSAIntrinsicError(DAG, DL, VT);
3106 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
3107 SI::KernelInputOffsets::GLOBAL_SIZE_Z, false);
3108 case Intrinsic::r600_read_local_size_x:
3109 if (Subtarget->isAmdHsaOS())
3110 return emitNonHSAIntrinsicError(DAG, DL, VT);
3112 return lowerImplicitZextParam(DAG, Op, MVT::i16,
3113 SI::KernelInputOffsets::LOCAL_SIZE_X);
3114 case Intrinsic::r600_read_local_size_y:
3115 if (Subtarget->isAmdHsaOS())
3116 return emitNonHSAIntrinsicError(DAG, DL, VT);
3118 return lowerImplicitZextParam(DAG, Op, MVT::i16,
3119 SI::KernelInputOffsets::LOCAL_SIZE_Y);
3120 case Intrinsic::r600_read_local_size_z:
3121 if (Subtarget->isAmdHsaOS())
3122 return emitNonHSAIntrinsicError(DAG, DL, VT);
3124 return lowerImplicitZextParam(DAG, Op, MVT::i16,
3125 SI::KernelInputOffsets::LOCAL_SIZE_Z);
3126 case Intrinsic::amdgcn_workgroup_id_x:
3127 case Intrinsic::r600_read_tgid_x:
3128 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass,
3129 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_X), VT);
3130 case Intrinsic::amdgcn_workgroup_id_y:
3131 case Intrinsic::r600_read_tgid_y:
3132 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass,
3133 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Y), VT);
3134 case Intrinsic::amdgcn_workgroup_id_z:
3135 case Intrinsic::r600_read_tgid_z:
3136 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass,
3137 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Z), VT);
3138 case Intrinsic::amdgcn_workitem_id_x:
3139 case Intrinsic::r600_read_tidig_x:
3140 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
3141 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X), VT);
3142 case Intrinsic::amdgcn_workitem_id_y:
3143 case Intrinsic::r600_read_tidig_y:
3144 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
3145 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y), VT);
3146 case Intrinsic::amdgcn_workitem_id_z:
3147 case Intrinsic::r600_read_tidig_z:
3148 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
3149 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z), VT);
3150 case AMDGPUIntrinsic::SI_load_const: {
3156 MachineMemOperand *MMO = MF.getMachineMemOperand(
3157 MachinePointerInfo(),
3158 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3159 MachineMemOperand::MOInvariant,
3160 VT.getStoreSize(), 4);
3161 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL,
3162 Op->getVTList(), Ops, VT, MMO);
3164 case Intrinsic::amdgcn_fdiv_fast:
3165 return lowerFDIV_FAST(Op, DAG);
3166 case Intrinsic::amdgcn_interp_mov: {
3167 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
3168 SDValue Glue = M0.getValue(1);
3169 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1),
3170 Op.getOperand(2), Op.getOperand(3), Glue);
3172 case Intrinsic::amdgcn_interp_p1: {
3173 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
3174 SDValue Glue = M0.getValue(1);
3175 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1),
3176 Op.getOperand(2), Op.getOperand(3), Glue);
3178 case Intrinsic::amdgcn_interp_p2: {
3179 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
3180 SDValue Glue = SDValue(M0.getNode(), 1);
3181 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1),
3182 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
3185 case Intrinsic::amdgcn_sin:
3186 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
3188 case Intrinsic::amdgcn_cos:
3189 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
3191 case Intrinsic::amdgcn_log_clamp: {
3192 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
3195 DiagnosticInfoUnsupported BadIntrin(
3196 *MF.getFunction(), "intrinsic not supported on subtarget",
3198 DAG.getContext()->diagnose(BadIntrin);
3199 return DAG.getUNDEF(VT);
3201 case Intrinsic::amdgcn_ldexp:
3202 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
3203 Op.getOperand(1), Op.getOperand(2));
3205 case Intrinsic::amdgcn_fract:
3206 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
3208 case Intrinsic::amdgcn_class:
3209 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
3210 Op.getOperand(1), Op.getOperand(2));
3211 case Intrinsic::amdgcn_div_fmas:
3212 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
3213 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
3216 case Intrinsic::amdgcn_div_fixup:
3217 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
3218 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3220 case Intrinsic::amdgcn_trig_preop:
3221 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
3222 Op.getOperand(1), Op.getOperand(2));
3223 case Intrinsic::amdgcn_div_scale: {
3224 // 3rd parameter required to be a constant.
3225 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
3227 return DAG.getUNDEF(VT);
3229 // Translate to the operands expected by the machine instruction. The
3230 // first parameter must be the same as the first instruction.
3231 SDValue Numerator = Op.getOperand(1);
3232 SDValue Denominator = Op.getOperand(2);
3234 // Note this order is opposite of the machine instruction's operations,
3235 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
3236 // intrinsic has the numerator as the first operand to match a normal
3237 // division operation.
3239 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
3241 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
3242 Denominator, Numerator);
3244 case Intrinsic::amdgcn_icmp: {
3245 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
3247 return DAG.getUNDEF(VT);
3249 int CondCode = CD->getSExtValue();
3250 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
3251 CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
3252 return DAG.getUNDEF(VT);
3254 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
3255 ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
3256 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
3257 Op.getOperand(2), DAG.getCondCode(CCOpcode));
3259 case Intrinsic::amdgcn_fcmp: {
3260 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
3262 return DAG.getUNDEF(VT);
3264 int CondCode = CD->getSExtValue();
3265 if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
3266 CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE)
3267 return DAG.getUNDEF(VT);
3269 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
3270 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
3271 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
3272 Op.getOperand(2), DAG.getCondCode(CCOpcode));
3274 case Intrinsic::amdgcn_fmed3:
3275 return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
3276 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3277 case Intrinsic::amdgcn_fmul_legacy:
3278 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
3279 Op.getOperand(1), Op.getOperand(2));
3280 case Intrinsic::amdgcn_sffbh:
3281 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
3282 case Intrinsic::amdgcn_sbfe:
3283 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
3284 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3285 case Intrinsic::amdgcn_ubfe:
3286 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
3287 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3288 case Intrinsic::amdgcn_cvt_pkrtz: {
3289 // FIXME: Stop adding cast if v2f16 legal.
3290 EVT VT = Op.getValueType();
3291 SDValue Node = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, DL, MVT::i32,
3292 Op.getOperand(1), Op.getOperand(2));
3293 return DAG.getNode(ISD::BITCAST, DL, VT, Node);
3300 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
3301 SelectionDAG &DAG) const {
3302 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
3304 MachineFunction &MF = DAG.getMachineFunction();
3307 case Intrinsic::amdgcn_atomic_inc:
3308 case Intrinsic::amdgcn_atomic_dec: {
3309 MemSDNode *M = cast<MemSDNode>(Op);
3310 unsigned Opc = (IntrID == Intrinsic::amdgcn_atomic_inc) ?
3311 AMDGPUISD::ATOMIC_INC : AMDGPUISD::ATOMIC_DEC;
3313 M->getOperand(0), // Chain
3314 M->getOperand(2), // Ptr
3315 M->getOperand(3) // Value
3318 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
3319 M->getMemoryVT(), M->getMemOperand());
3321 case Intrinsic::amdgcn_buffer_load:
3322 case Intrinsic::amdgcn_buffer_load_format: {
3324 Op.getOperand(0), // Chain
3325 Op.getOperand(2), // rsrc
3326 Op.getOperand(3), // vindex
3327 Op.getOperand(4), // offset
3328 Op.getOperand(5), // glc
3329 Op.getOperand(6) // slc
3331 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
3333 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
3334 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
3335 EVT VT = Op.getValueType();
3336 EVT IntVT = VT.changeTypeToInteger();
3338 MachineMemOperand *MMO = MF.getMachineMemOperand(
3339 MachinePointerInfo(MFI->getBufferPSV()),
3340 MachineMemOperand::MOLoad,
3341 VT.getStoreSize(), VT.getStoreSize());
3343 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, MMO);
3345 case Intrinsic::amdgcn_tbuffer_load: {
3347 Op.getOperand(0), // Chain
3348 Op.getOperand(2), // rsrc
3349 Op.getOperand(3), // vindex
3350 Op.getOperand(4), // voffset
3351 Op.getOperand(5), // soffset
3352 Op.getOperand(6), // offset
3353 Op.getOperand(7), // dfmt
3354 Op.getOperand(8), // nfmt
3355 Op.getOperand(9), // glc
3356 Op.getOperand(10) // slc
3359 EVT VT = Op.getOperand(2).getValueType();
3361 MachineMemOperand *MMO = MF.getMachineMemOperand(
3362 MachinePointerInfo(),
3363 MachineMemOperand::MOLoad,
3364 VT.getStoreSize(), VT.getStoreSize());
3365 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
3366 Op->getVTList(), Ops, VT, MMO);
3369 case Intrinsic::amdgcn_image_sample:
3370 case Intrinsic::amdgcn_image_sample_cl:
3371 case Intrinsic::amdgcn_image_sample_d:
3372 case Intrinsic::amdgcn_image_sample_d_cl:
3373 case Intrinsic::amdgcn_image_sample_l:
3374 case Intrinsic::amdgcn_image_sample_b:
3375 case Intrinsic::amdgcn_image_sample_b_cl:
3376 case Intrinsic::amdgcn_image_sample_lz:
3377 case Intrinsic::amdgcn_image_sample_cd:
3378 case Intrinsic::amdgcn_image_sample_cd_cl:
3380 // Sample with comparison.
3381 case Intrinsic::amdgcn_image_sample_c:
3382 case Intrinsic::amdgcn_image_sample_c_cl:
3383 case Intrinsic::amdgcn_image_sample_c_d:
3384 case Intrinsic::amdgcn_image_sample_c_d_cl:
3385 case Intrinsic::amdgcn_image_sample_c_l:
3386 case Intrinsic::amdgcn_image_sample_c_b:
3387 case Intrinsic::amdgcn_image_sample_c_b_cl:
3388 case Intrinsic::amdgcn_image_sample_c_lz:
3389 case Intrinsic::amdgcn_image_sample_c_cd:
3390 case Intrinsic::amdgcn_image_sample_c_cd_cl:
3392 // Sample with offsets.
3393 case Intrinsic::amdgcn_image_sample_o:
3394 case Intrinsic::amdgcn_image_sample_cl_o:
3395 case Intrinsic::amdgcn_image_sample_d_o:
3396 case Intrinsic::amdgcn_image_sample_d_cl_o:
3397 case Intrinsic::amdgcn_image_sample_l_o:
3398 case Intrinsic::amdgcn_image_sample_b_o:
3399 case Intrinsic::amdgcn_image_sample_b_cl_o:
3400 case Intrinsic::amdgcn_image_sample_lz_o:
3401 case Intrinsic::amdgcn_image_sample_cd_o:
3402 case Intrinsic::amdgcn_image_sample_cd_cl_o:
3404 // Sample with comparison and offsets.
3405 case Intrinsic::amdgcn_image_sample_c_o:
3406 case Intrinsic::amdgcn_image_sample_c_cl_o:
3407 case Intrinsic::amdgcn_image_sample_c_d_o:
3408 case Intrinsic::amdgcn_image_sample_c_d_cl_o:
3409 case Intrinsic::amdgcn_image_sample_c_l_o:
3410 case Intrinsic::amdgcn_image_sample_c_b_o:
3411 case Intrinsic::amdgcn_image_sample_c_b_cl_o:
3412 case Intrinsic::amdgcn_image_sample_c_lz_o:
3413 case Intrinsic::amdgcn_image_sample_c_cd_o:
3414 case Intrinsic::amdgcn_image_sample_c_cd_cl_o:
3416 case Intrinsic::amdgcn_image_getlod: {
3417 // Replace dmask with everything disabled with undef.
3418 const ConstantSDNode *DMask = dyn_cast<ConstantSDNode>(Op.getOperand(5));
3419 if (!DMask || DMask->isNullValue()) {
3420 SDValue Undef = DAG.getUNDEF(Op.getValueType());
3421 return DAG.getMergeValues({ Undef, Op.getOperand(0) }, SDLoc(Op));
3431 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
3432 SelectionDAG &DAG) const {
3434 SDValue Chain = Op.getOperand(0);
3435 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
3436 MachineFunction &MF = DAG.getMachineFunction();
3438 switch (IntrinsicID) {
3439 case Intrinsic::amdgcn_exp: {
3440 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
3441 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
3442 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8));
3443 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9));
3445 const SDValue Ops[] = {
3447 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
3448 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
3449 Op.getOperand(4), // src0
3450 Op.getOperand(5), // src1
3451 Op.getOperand(6), // src2
3452 Op.getOperand(7), // src3
3453 DAG.getTargetConstant(0, DL, MVT::i1), // compr
3454 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
3457 unsigned Opc = Done->isNullValue() ?
3458 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
3459 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
3461 case Intrinsic::amdgcn_exp_compr: {
3462 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
3463 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
3464 SDValue Src0 = Op.getOperand(4);
3465 SDValue Src1 = Op.getOperand(5);
3466 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
3467 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7));
3469 SDValue Undef = DAG.getUNDEF(MVT::f32);
3470 const SDValue Ops[] = {
3472 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
3473 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
3474 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0),
3475 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1),
3478 DAG.getTargetConstant(1, DL, MVT::i1), // compr
3479 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
3482 unsigned Opc = Done->isNullValue() ?
3483 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
3484 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
3486 case Intrinsic::amdgcn_s_sendmsg:
3487 case Intrinsic::amdgcn_s_sendmsghalt: {
3488 unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ?
3489 AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT;
3490 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3));
3491 SDValue Glue = Chain.getValue(1);
3492 return DAG.getNode(NodeOp, DL, MVT::Other, Chain,
3493 Op.getOperand(2), Glue);
3495 case Intrinsic::amdgcn_init_exec: {
3496 return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain,
3499 case Intrinsic::amdgcn_init_exec_from_input: {
3500 return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain,
3501 Op.getOperand(2), Op.getOperand(3));
3503 case AMDGPUIntrinsic::AMDGPU_kill: {
3504 SDValue Src = Op.getOperand(2);
3505 if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) {
3506 if (!K->isNegative())
3509 SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32);
3510 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne);
3513 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src);
3514 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast);
3516 case Intrinsic::amdgcn_s_barrier: {
3517 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
3518 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
3519 unsigned WGSize = ST.getFlatWorkGroupSizes(*MF.getFunction()).second;
3520 if (WGSize <= ST.getWavefrontSize())
3521 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
3522 Op.getOperand(0)), 0);
3526 case AMDGPUIntrinsic::SI_tbuffer_store: {
3528 // Extract vindex and voffset from vaddr as appropriate
3529 const ConstantSDNode *OffEn = cast<ConstantSDNode>(Op.getOperand(10));
3530 const ConstantSDNode *IdxEn = cast<ConstantSDNode>(Op.getOperand(11));
3531 SDValue VAddr = Op.getOperand(5);
3533 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
3535 assert(!(OffEn->isOne() && IdxEn->isOne()) &&
3536 "Legacy intrinsic doesn't support both offset and index - use new version");
3538 SDValue VIndex = IdxEn->isOne() ? VAddr : Zero;
3539 SDValue VOffset = OffEn->isOne() ? VAddr : Zero;
3541 // Deal with the vec-3 case
3542 const ConstantSDNode *NumChannels = cast<ConstantSDNode>(Op.getOperand(4));
3543 auto Opcode = NumChannels->getZExtValue() == 3 ?
3544 AMDGPUISD::TBUFFER_STORE_FORMAT_X3 : AMDGPUISD::TBUFFER_STORE_FORMAT;
3548 Op.getOperand(3), // vdata
3549 Op.getOperand(2), // rsrc
3552 Op.getOperand(6), // soffset
3553 Op.getOperand(7), // inst_offset
3554 Op.getOperand(8), // dfmt
3555 Op.getOperand(9), // nfmt
3556 Op.getOperand(12), // glc
3557 Op.getOperand(13), // slc
3560 assert((cast<ConstantSDNode>(Op.getOperand(14)))->getZExtValue() == 0 &&
3561 "Value of tfe other than zero is unsupported");
3563 EVT VT = Op.getOperand(3).getValueType();
3564 MachineMemOperand *MMO = MF.getMachineMemOperand(
3565 MachinePointerInfo(),
3566 MachineMemOperand::MOStore,
3567 VT.getStoreSize(), 4);
3568 return DAG.getMemIntrinsicNode(Opcode, DL,
3569 Op->getVTList(), Ops, VT, MMO);
3572 case Intrinsic::amdgcn_tbuffer_store: {
3575 Op.getOperand(2), // vdata
3576 Op.getOperand(3), // rsrc
3577 Op.getOperand(4), // vindex
3578 Op.getOperand(5), // voffset
3579 Op.getOperand(6), // soffset
3580 Op.getOperand(7), // offset
3581 Op.getOperand(8), // dfmt
3582 Op.getOperand(9), // nfmt
3583 Op.getOperand(10), // glc
3584 Op.getOperand(11) // slc
3586 EVT VT = Op.getOperand(3).getValueType();
3587 MachineMemOperand *MMO = MF.getMachineMemOperand(
3588 MachinePointerInfo(),
3589 MachineMemOperand::MOStore,
3590 VT.getStoreSize(), 4);
3591 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL,
3592 Op->getVTList(), Ops, VT, MMO);
3600 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
3602 LoadSDNode *Load = cast<LoadSDNode>(Op);
3603 ISD::LoadExtType ExtType = Load->getExtensionType();
3604 EVT MemVT = Load->getMemoryVT();
3606 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
3607 // FIXME: Copied from PPC
3608 // First, load into 32 bits, then truncate to 1 bit.
3610 SDValue Chain = Load->getChain();
3611 SDValue BasePtr = Load->getBasePtr();
3612 MachineMemOperand *MMO = Load->getMemOperand();
3614 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
3616 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
3617 BasePtr, RealMemVT, MMO);
3620 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
3624 return DAG.getMergeValues(Ops, DL);
3627 if (!MemVT.isVector())
3630 assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
3631 "Custom lowering for non-i32 vectors hasn't been implemented.");
3633 unsigned AS = Load->getAddressSpace();
3634 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
3635 AS, Load->getAlignment())) {
3637 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
3638 return DAG.getMergeValues(Ops, DL);
3641 MachineFunction &MF = DAG.getMachineFunction();
3642 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
3643 // If there is a possibilty that flat instruction access scratch memory
3644 // then we need to use the same legalization rules we use for private.
3645 if (AS == AMDGPUASI.FLAT_ADDRESS)
3646 AS = MFI->hasFlatScratchInit() ?
3647 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
3649 unsigned NumElements = MemVT.getVectorNumElements();
3650 if (AS == AMDGPUASI.CONSTANT_ADDRESS) {
3651 if (isMemOpUniform(Load))
3653 // Non-uniform loads will be selected to MUBUF instructions, so they
3654 // have the same legalization requirements as global and private
3658 if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS) {
3659 if (Subtarget->getScalarizeGlobalBehavior() && isMemOpUniform(Load) &&
3660 !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load))
3662 // Non-uniform loads will be selected to MUBUF instructions, so they
3663 // have the same legalization requirements as global and private
3667 if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS ||
3668 AS == AMDGPUASI.FLAT_ADDRESS) {
3669 if (NumElements > 4)
3670 return SplitVectorLoad(Op, DAG);
3671 // v4 loads are supported for private and global memory.
3674 if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
3675 // Depending on the setting of the private_element_size field in the
3676 // resource descriptor, we can only make private accesses up to a certain
3678 switch (Subtarget->getMaxPrivateElementSize()) {
3680 return scalarizeVectorLoad(Load, DAG);
3682 if (NumElements > 2)
3683 return SplitVectorLoad(Op, DAG);
3686 // Same as global/flat
3687 if (NumElements > 4)
3688 return SplitVectorLoad(Op, DAG);
3691 llvm_unreachable("unsupported private_element_size");
3693 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
3694 if (NumElements > 2)
3695 return SplitVectorLoad(Op, DAG);
3697 if (NumElements == 2)
3700 // If properly aligned, if we split we might be able to use ds_read_b64.
3701 return SplitVectorLoad(Op, DAG);
3706 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3707 if (Op.getValueType() != MVT::i64)
3711 SDValue Cond = Op.getOperand(0);
3713 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
3714 SDValue One = DAG.getConstant(1, DL, MVT::i32);
3716 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
3717 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
3719 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
3720 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
3722 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
3724 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
3725 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
3727 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
3729 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
3730 return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res);
3733 // Catch division cases where we can use shortcuts with rcp and rsq
3735 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
3736 SelectionDAG &DAG) const {
3738 SDValue LHS = Op.getOperand(0);
3739 SDValue RHS = Op.getOperand(1);
3740 EVT VT = Op.getValueType();
3741 const SDNodeFlags Flags = Op->getFlags();
3742 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath ||
3743 Flags.hasUnsafeAlgebra() || Flags.hasAllowReciprocal();
3745 if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals())
3748 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
3749 if (Unsafe || VT == MVT::f32 || VT == MVT::f16) {
3750 if (CLHS->isExactlyValue(1.0)) {
3751 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
3752 // the CI documentation has a worst case error of 1 ulp.
3753 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
3754 // use it as long as we aren't trying to use denormals.
3756 // v_rcp_f16 and v_rsq_f16 DO support denormals.
3758 // 1.0 / sqrt(x) -> rsq(x)
3760 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
3761 // error seems really high at 2^29 ULP.
3762 if (RHS.getOpcode() == ISD::FSQRT)
3763 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
3765 // 1.0 / x -> rcp(x)
3766 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
3769 // Same as for 1.0, but expand the sign out of the constant.
3770 if (CLHS->isExactlyValue(-1.0)) {
3771 // -1.0 / x -> rcp (fneg x)
3772 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3773 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
3779 // Turn into multiply by the reciprocal.
3780 // x / y -> x * (1.0 / y)
3781 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
3782 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
3788 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
3789 EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
3790 if (GlueChain->getNumValues() <= 1) {
3791 return DAG.getNode(Opcode, SL, VT, A, B);
3794 assert(GlueChain->getNumValues() == 3);
3796 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
3798 default: llvm_unreachable("no chain equivalent for opcode");
3800 Opcode = AMDGPUISD::FMUL_W_CHAIN;
3804 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
3805 GlueChain.getValue(2));
3808 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
3809 EVT VT, SDValue A, SDValue B, SDValue C,
3810 SDValue GlueChain) {
3811 if (GlueChain->getNumValues() <= 1) {
3812 return DAG.getNode(Opcode, SL, VT, A, B, C);
3815 assert(GlueChain->getNumValues() == 3);
3817 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
3819 default: llvm_unreachable("no chain equivalent for opcode");
3821 Opcode = AMDGPUISD::FMA_W_CHAIN;
3825 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
3826 GlueChain.getValue(2));
3829 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
3830 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
3834 SDValue Src0 = Op.getOperand(0);
3835 SDValue Src1 = Op.getOperand(1);
3837 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
3838 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
3840 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
3841 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
3843 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
3844 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
3846 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
3849 // Faster 2.5 ULP division that does not support denormals.
3850 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
3852 SDValue LHS = Op.getOperand(1);
3853 SDValue RHS = Op.getOperand(2);
3855 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
3857 const APFloat K0Val(BitsToFloat(0x6f800000));
3858 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
3860 const APFloat K1Val(BitsToFloat(0x2f800000));
3861 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
3863 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
3866 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
3868 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
3870 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
3872 // TODO: Should this propagate fast-math-flags?
3873 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
3875 // rcp does not support denormals.
3876 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
3878 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
3880 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
3883 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
3884 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
3888 SDValue LHS = Op.getOperand(0);
3889 SDValue RHS = Op.getOperand(1);
3891 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
3893 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
3895 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
3897 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
3900 // Denominator is scaled to not be denormal, so using rcp is ok.
3901 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
3903 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
3906 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
3907 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
3908 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
3910 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
3912 if (!Subtarget->hasFP32Denormals()) {
3913 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
3914 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
3916 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
3918 EnableDenormValue, BitField);
3921 EnableDenorm.getValue(0),
3922 EnableDenorm.getValue(1)
3925 NegDivScale0 = DAG.getMergeValues(Ops, SL);
3928 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
3929 ApproxRcp, One, NegDivScale0);
3931 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
3934 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
3937 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
3938 NumeratorScaled, Mul);
3940 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
3942 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
3943 NumeratorScaled, Fma3);
3945 if (!Subtarget->hasFP32Denormals()) {
3946 const SDValue DisableDenormValue =
3947 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
3948 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
3954 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
3955 DisableDenorm, DAG.getRoot());
3956 DAG.setRoot(OutputChain);
3959 SDValue Scale = NumeratorScaled.getValue(1);
3960 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
3961 Fma4, Fma1, Fma3, Scale);
3963 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
3966 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
3967 if (DAG.getTarget().Options.UnsafeFPMath)
3968 return lowerFastUnsafeFDIV(Op, DAG);
3971 SDValue X = Op.getOperand(0);
3972 SDValue Y = Op.getOperand(1);
3974 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
3976 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
3978 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
3980 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
3982 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
3984 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
3986 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
3988 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
3990 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
3992 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
3993 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
3995 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
3996 NegDivScale0, Mul, DivScale1);
4000 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) {
4001 // Workaround a hardware bug on SI where the condition output from div_scale
4004 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
4006 // Figure out if the scale to use for div_fmas.
4007 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
4008 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
4009 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
4010 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
4012 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
4013 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
4016 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
4018 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
4020 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
4021 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
4022 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
4024 Scale = DivScale1.getValue(1);
4027 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
4028 Fma4, Fma3, Mul, Scale);
4030 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
4033 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
4034 EVT VT = Op.getValueType();
4037 return LowerFDIV32(Op, DAG);
4040 return LowerFDIV64(Op, DAG);
4043 return LowerFDIV16(Op, DAG);
4045 llvm_unreachable("Unexpected type for fdiv");
4048 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
4050 StoreSDNode *Store = cast<StoreSDNode>(Op);
4051 EVT VT = Store->getMemoryVT();
4053 if (VT == MVT::i1) {
4054 return DAG.getTruncStore(Store->getChain(), DL,
4055 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
4056 Store->getBasePtr(), MVT::i1, Store->getMemOperand());
4059 assert(VT.isVector() &&
4060 Store->getValue().getValueType().getScalarType() == MVT::i32);
4062 unsigned AS = Store->getAddressSpace();
4063 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
4064 AS, Store->getAlignment())) {
4065 return expandUnalignedStore(Store, DAG);
4068 MachineFunction &MF = DAG.getMachineFunction();
4069 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
4070 // If there is a possibilty that flat instruction access scratch memory
4071 // then we need to use the same legalization rules we use for private.
4072 if (AS == AMDGPUASI.FLAT_ADDRESS)
4073 AS = MFI->hasFlatScratchInit() ?
4074 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
4076 unsigned NumElements = VT.getVectorNumElements();
4077 if (AS == AMDGPUASI.GLOBAL_ADDRESS ||
4078 AS == AMDGPUASI.FLAT_ADDRESS) {
4079 if (NumElements > 4)
4080 return SplitVectorStore(Op, DAG);
4082 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
4083 switch (Subtarget->getMaxPrivateElementSize()) {
4085 return scalarizeVectorStore(Store, DAG);
4087 if (NumElements > 2)
4088 return SplitVectorStore(Op, DAG);
4091 if (NumElements > 4)
4092 return SplitVectorStore(Op, DAG);
4095 llvm_unreachable("unsupported private_element_size");
4097 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
4098 if (NumElements > 2)
4099 return SplitVectorStore(Op, DAG);
4101 if (NumElements == 2)
4104 // If properly aligned, if we split we might be able to use ds_write_b64.
4105 return SplitVectorStore(Op, DAG);
4107 llvm_unreachable("unhandled address space");
4111 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
4113 EVT VT = Op.getValueType();
4114 SDValue Arg = Op.getOperand(0);
4115 // TODO: Should this propagate fast-math-flags?
4116 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT,
4117 DAG.getNode(ISD::FMUL, DL, VT, Arg,
4118 DAG.getConstantFP(0.5/M_PI, DL,
4121 switch (Op.getOpcode()) {
4123 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart);
4125 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart);
4127 llvm_unreachable("Wrong trig opcode");
4131 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
4132 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
4133 assert(AtomicNode->isCompareAndSwap());
4134 unsigned AS = AtomicNode->getAddressSpace();
4136 // No custom lowering required for local address space
4137 if (!isFlatGlobalAddrSpace(AS, AMDGPUASI))
4140 // Non-local address space requires custom lowering for atomic compare
4141 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
4143 SDValue ChainIn = Op.getOperand(0);
4144 SDValue Addr = Op.getOperand(1);
4145 SDValue Old = Op.getOperand(2);
4146 SDValue New = Op.getOperand(3);
4147 EVT VT = Op.getValueType();
4148 MVT SimpleVT = VT.getSimpleVT();
4149 MVT VecType = MVT::getVectorVT(SimpleVT, 2);
4151 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
4152 SDValue Ops[] = { ChainIn, Addr, NewOld };
4154 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
4155 Ops, VT, AtomicNode->getMemOperand());
4158 //===----------------------------------------------------------------------===//
4159 // Custom DAG optimizations
4160 //===----------------------------------------------------------------------===//
4162 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
4163 DAGCombinerInfo &DCI) const {
4164 EVT VT = N->getValueType(0);
4165 EVT ScalarVT = VT.getScalarType();
4166 if (ScalarVT != MVT::f32)
4169 SelectionDAG &DAG = DCI.DAG;
4172 SDValue Src = N->getOperand(0);
4173 EVT SrcVT = Src.getValueType();
4175 // TODO: We could try to match extracting the higher bytes, which would be
4176 // easier if i8 vectors weren't promoted to i32 vectors, particularly after
4177 // types are legalized. v4i8 -> v4f32 is probably the only case to worry
4178 // about in practice.
4179 if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) {
4180 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
4181 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
4182 DCI.AddToWorklist(Cvt.getNode());
4190 /// \brief Return true if the given offset Size in bytes can be folded into
4191 /// the immediate offsets of a memory instruction for the given address space.
4192 static bool canFoldOffset(unsigned OffsetSize, unsigned AS,
4193 const SISubtarget &STI) {
4194 auto AMDGPUASI = STI.getAMDGPUAS();
4195 if (AS == AMDGPUASI.GLOBAL_ADDRESS) {
4196 // MUBUF instructions a 12-bit offset in bytes.
4197 return isUInt<12>(OffsetSize);
4199 if (AS == AMDGPUASI.CONSTANT_ADDRESS) {
4200 // SMRD instructions have an 8-bit offset in dwords on SI and
4201 // a 20-bit offset in bytes on VI.
4202 if (STI.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
4203 return isUInt<20>(OffsetSize);
4205 return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4);
4207 if (AS == AMDGPUASI.LOCAL_ADDRESS ||
4208 AS == AMDGPUASI.REGION_ADDRESS) {
4209 // The single offset versions have a 16-bit offset in bytes.
4210 return isUInt<16>(OffsetSize);
4212 // Indirect register addressing does not use any offsets.
4216 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
4218 // This is a variant of
4219 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
4221 // The normal DAG combiner will do this, but only if the add has one use since
4222 // that would increase the number of instructions.
4224 // This prevents us from seeing a constant offset that can be folded into a
4225 // memory instruction's addressing mode. If we know the resulting add offset of
4226 // a pointer can be folded into an addressing offset, we can replace the pointer
4227 // operand with the add of new constant offset. This eliminates one of the uses,
4228 // and may allow the remaining use to also be simplified.
4230 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
4232 DAGCombinerInfo &DCI) const {
4233 SDValue N0 = N->getOperand(0);
4234 SDValue N1 = N->getOperand(1);
4236 if (N0.getOpcode() != ISD::ADD)
4239 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
4243 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
4247 // If the resulting offset is too large, we can't fold it into the addressing
4249 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
4250 if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *getSubtarget()))
4253 SelectionDAG &DAG = DCI.DAG;
4255 EVT VT = N->getValueType(0);
4257 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
4258 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
4260 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset);
4263 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
4264 DAGCombinerInfo &DCI) const {
4265 SDValue Ptr = N->getBasePtr();
4266 SelectionDAG &DAG = DCI.DAG;
4269 // TODO: We could also do this for multiplies.
4270 unsigned AS = N->getAddressSpace();
4271 if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUASI.PRIVATE_ADDRESS) {
4272 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI);
4274 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
4276 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
4277 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
4284 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
4285 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
4286 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
4287 (Opc == ISD::XOR && Val == 0);
4290 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
4291 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
4292 // integer combine opportunities since most 64-bit operations are decomposed
4293 // this way. TODO: We won't want this for SALU especially if it is an inline
4295 SDValue SITargetLowering::splitBinaryBitConstantOp(
4296 DAGCombinerInfo &DCI,
4298 unsigned Opc, SDValue LHS,
4299 const ConstantSDNode *CRHS) const {
4300 uint64_t Val = CRHS->getZExtValue();
4301 uint32_t ValLo = Lo_32(Val);
4302 uint32_t ValHi = Hi_32(Val);
4303 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
4305 if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
4306 bitOpWithConstantIsReducible(Opc, ValHi)) ||
4307 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
4308 // If we need to materialize a 64-bit immediate, it will be split up later
4309 // anyway. Avoid creating the harder to understand 64-bit immediate
4311 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
4317 // Returns true if argument is a boolean value which is not serialized into
4318 // memory or argument and does not require v_cmdmask_b32 to be deserialized.
4319 static bool isBoolSGPR(SDValue V) {
4320 if (V.getValueType() != MVT::i1)
4322 switch (V.getOpcode()) {
4328 case AMDGPUISD::FP_CLASS:
4334 SDValue SITargetLowering::performAndCombine(SDNode *N,
4335 DAGCombinerInfo &DCI) const {
4336 if (DCI.isBeforeLegalize())
4339 SelectionDAG &DAG = DCI.DAG;
4340 EVT VT = N->getValueType(0);
4341 SDValue LHS = N->getOperand(0);
4342 SDValue RHS = N->getOperand(1);
4345 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
4346 if (VT == MVT::i64 && CRHS) {
4348 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
4352 if (CRHS && VT == MVT::i32) {
4353 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
4354 // nb = number of trailing zeroes in mask
4355 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
4356 // given that we are selecting 8 or 16 bit fields starting at byte boundary.
4357 uint64_t Mask = CRHS->getZExtValue();
4358 unsigned Bits = countPopulation(Mask);
4359 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
4360 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
4361 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
4362 unsigned Shift = CShift->getZExtValue();
4363 unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
4364 unsigned Offset = NB + Shift;
4365 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
4367 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
4369 DAG.getConstant(Offset, SL, MVT::i32),
4370 DAG.getConstant(Bits, SL, MVT::i32));
4371 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
4372 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
4373 DAG.getValueType(NarrowVT));
4374 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
4375 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
4382 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
4383 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
4384 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
4385 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
4386 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
4388 SDValue X = LHS.getOperand(0);
4389 SDValue Y = RHS.getOperand(0);
4390 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
4393 if (LCC == ISD::SETO) {
4394 if (X != LHS.getOperand(1))
4397 if (RCC == ISD::SETUNE) {
4398 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
4399 if (!C1 || !C1->isInfinity() || C1->isNegative())
4402 const uint32_t Mask = SIInstrFlags::N_NORMAL |
4403 SIInstrFlags::N_SUBNORMAL |
4404 SIInstrFlags::N_ZERO |
4405 SIInstrFlags::P_ZERO |
4406 SIInstrFlags::P_SUBNORMAL |
4407 SIInstrFlags::P_NORMAL;
4409 static_assert(((~(SIInstrFlags::S_NAN |
4410 SIInstrFlags::Q_NAN |
4411 SIInstrFlags::N_INFINITY |
4412 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
4416 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
4417 X, DAG.getConstant(Mask, DL, MVT::i32));
4422 if (VT == MVT::i32 &&
4423 (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
4424 // and x, (sext cc from i1) => select cc, x, 0
4425 if (RHS.getOpcode() != ISD::SIGN_EXTEND)
4426 std::swap(LHS, RHS);
4427 if (isBoolSGPR(RHS.getOperand(0)))
4428 return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
4429 LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
4435 SDValue SITargetLowering::performOrCombine(SDNode *N,
4436 DAGCombinerInfo &DCI) const {
4437 SelectionDAG &DAG = DCI.DAG;
4438 SDValue LHS = N->getOperand(0);
4439 SDValue RHS = N->getOperand(1);
4441 EVT VT = N->getValueType(0);
4442 if (VT == MVT::i1) {
4443 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
4444 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
4445 RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
4446 SDValue Src = LHS.getOperand(0);
4447 if (Src != RHS.getOperand(0))
4450 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
4451 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
4455 // Only 10 bits are used.
4456 static const uint32_t MaxMask = 0x3ff;
4458 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
4460 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
4461 Src, DAG.getConstant(NewMask, DL, MVT::i32));
4470 // TODO: This could be a generic combine with a predicate for extracting the
4471 // high half of an integer being free.
4473 // (or i64:x, (zero_extend i32:y)) ->
4474 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
4475 if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
4476 RHS.getOpcode() != ISD::ZERO_EXTEND)
4477 std::swap(LHS, RHS);
4479 if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
4480 SDValue ExtSrc = RHS.getOperand(0);
4481 EVT SrcVT = ExtSrc.getValueType();
4482 if (SrcVT == MVT::i32) {
4484 SDValue LowLHS, HiBits;
4485 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
4486 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
4488 DCI.AddToWorklist(LowOr.getNode());
4489 DCI.AddToWorklist(HiBits.getNode());
4491 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
4493 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
4497 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
4500 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
4507 SDValue SITargetLowering::performXorCombine(SDNode *N,
4508 DAGCombinerInfo &DCI) const {
4509 EVT VT = N->getValueType(0);
4513 SDValue LHS = N->getOperand(0);
4514 SDValue RHS = N->getOperand(1);
4516 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
4519 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
4526 // Instructions that will be lowered with a final instruction that zeros the
4527 // high result bits.
4528 // XXX - probably only need to list legal operations.
4529 static bool fp16SrcZerosHighBits(unsigned Opc) {
4538 case ISD::FCANONICALIZE:
4540 case ISD::UINT_TO_FP:
4541 case ISD::SINT_TO_FP:
4543 // Fabs is lowered to a bit operation, but it's an and which will clear the
4544 // high bits anyway.
4558 case ISD::FNEARBYINT:
4563 case AMDGPUISD::FRACT:
4564 case AMDGPUISD::CLAMP:
4565 case AMDGPUISD::COS_HW:
4566 case AMDGPUISD::SIN_HW:
4567 case AMDGPUISD::FMIN3:
4568 case AMDGPUISD::FMAX3:
4569 case AMDGPUISD::FMED3:
4570 case AMDGPUISD::FMAD_FTZ:
4571 case AMDGPUISD::RCP:
4572 case AMDGPUISD::RSQ:
4573 case AMDGPUISD::LDEXP:
4576 // fcopysign, select and others may be lowered to 32-bit bit operations
4577 // which don't zero the high bits.
4582 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
4583 DAGCombinerInfo &DCI) const {
4584 if (!Subtarget->has16BitInsts() ||
4585 DCI.getDAGCombineLevel() < AfterLegalizeDAG)
4588 EVT VT = N->getValueType(0);
4592 SDValue Src = N->getOperand(0);
4593 if (Src.getValueType() != MVT::i16)
4596 // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
4597 // FIXME: It is not universally true that the high bits are zeroed on gfx9.
4598 if (Src.getOpcode() == ISD::BITCAST) {
4599 SDValue BCSrc = Src.getOperand(0);
4600 if (BCSrc.getValueType() == MVT::f16 &&
4601 fp16SrcZerosHighBits(BCSrc.getOpcode()))
4602 return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
4608 SDValue SITargetLowering::performClassCombine(SDNode *N,
4609 DAGCombinerInfo &DCI) const {
4610 SelectionDAG &DAG = DCI.DAG;
4611 SDValue Mask = N->getOperand(1);
4613 // fp_class x, 0 -> false
4614 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
4615 if (CMask->isNullValue())
4616 return DAG.getConstant(0, SDLoc(N), MVT::i1);
4619 if (N->getOperand(0).isUndef())
4620 return DAG.getUNDEF(MVT::i1);
4625 static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) {
4626 if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions())
4629 return DAG.isKnownNeverNaN(Op);
4632 static bool isCanonicalized(SelectionDAG &DAG, SDValue Op,
4633 const SISubtarget *ST, unsigned MaxDepth=5) {
4634 // If source is a result of another standard FP operation it is already in
4637 switch (Op.getOpcode()) {
4641 // These will flush denorms if required.
4651 case ISD::FCANONICALIZE:
4655 return Op.getValueType().getScalarType() != MVT::f16 ||
4656 ST->hasFP16Denormals();
4658 case ISD::FP_EXTEND:
4659 return Op.getOperand(0).getValueType().getScalarType() != MVT::f16 ||
4660 ST->hasFP16Denormals();
4662 case ISD::FP16_TO_FP:
4663 case ISD::FP_TO_FP16:
4664 return ST->hasFP16Denormals();
4666 // It can/will be lowered or combined as a bit operation.
4667 // Need to check their input recursively to handle.
4670 return (MaxDepth > 0) &&
4671 isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1);
4676 return Op.getValueType().getScalarType() != MVT::f16;
4678 // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms.
4679 // For such targets need to check their input recursively.
4685 if (ST->supportsMinMaxDenormModes() &&
4686 DAG.isKnownNeverNaN(Op.getOperand(0)) &&
4687 DAG.isKnownNeverNaN(Op.getOperand(1)))
4690 return (MaxDepth > 0) &&
4691 isCanonicalized(DAG, Op.getOperand(0), ST, MaxDepth - 1) &&
4692 isCanonicalized(DAG, Op.getOperand(1), ST, MaxDepth - 1);
4694 case ISD::ConstantFP: {
4695 auto F = cast<ConstantFPSDNode>(Op)->getValueAPF();
4696 return !F.isDenormal() && !(F.isNaN() && F.isSignaling());
4702 // Constant fold canonicalize.
4703 SDValue SITargetLowering::performFCanonicalizeCombine(
4705 DAGCombinerInfo &DCI) const {
4706 SelectionDAG &DAG = DCI.DAG;
4707 ConstantFPSDNode *CFP = isConstOrConstSplatFP(N->getOperand(0));
4710 SDValue N0 = N->getOperand(0);
4711 EVT VT = N0.getValueType().getScalarType();
4712 auto ST = getSubtarget();
4714 if (((VT == MVT::f32 && ST->hasFP32Denormals()) ||
4715 (VT == MVT::f64 && ST->hasFP64Denormals()) ||
4716 (VT == MVT::f16 && ST->hasFP16Denormals())) &&
4717 DAG.isKnownNeverNaN(N0))
4720 bool IsIEEEMode = Subtarget->enableIEEEBit(DAG.getMachineFunction());
4722 if ((IsIEEEMode || isKnownNeverSNan(DAG, N0)) &&
4723 isCanonicalized(DAG, N0, ST))
4729 const APFloat &C = CFP->getValueAPF();
4731 // Flush denormals to 0 if not enabled.
4732 if (C.isDenormal()) {
4733 EVT VT = N->getValueType(0);
4734 EVT SVT = VT.getScalarType();
4735 if (SVT == MVT::f32 && !Subtarget->hasFP32Denormals())
4736 return DAG.getConstantFP(0.0, SDLoc(N), VT);
4738 if (SVT == MVT::f64 && !Subtarget->hasFP64Denormals())
4739 return DAG.getConstantFP(0.0, SDLoc(N), VT);
4741 if (SVT == MVT::f16 && !Subtarget->hasFP16Denormals())
4742 return DAG.getConstantFP(0.0, SDLoc(N), VT);
4746 EVT VT = N->getValueType(0);
4747 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
4748 if (C.isSignaling()) {
4749 // Quiet a signaling NaN.
4750 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
4753 // Make sure it is the canonical NaN bitpattern.
4755 // TODO: Can we use -1 as the canonical NaN value since it's an inline
4757 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
4758 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
4761 return N->getOperand(0);
4764 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
4767 return AMDGPUISD::FMAX3;
4769 return AMDGPUISD::SMAX3;
4771 return AMDGPUISD::UMAX3;
4773 return AMDGPUISD::FMIN3;
4775 return AMDGPUISD::SMIN3;
4777 return AMDGPUISD::UMIN3;
4779 llvm_unreachable("Not a min/max opcode");
4783 SDValue SITargetLowering::performIntMed3ImmCombine(
4784 SelectionDAG &DAG, const SDLoc &SL,
4785 SDValue Op0, SDValue Op1, bool Signed) const {
4786 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
4790 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
4795 if (K0->getAPIntValue().sge(K1->getAPIntValue()))
4798 if (K0->getAPIntValue().uge(K1->getAPIntValue()))
4802 EVT VT = K0->getValueType(0);
4803 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
4804 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
4805 return DAG.getNode(Med3Opc, SL, VT,
4806 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
4809 // If there isn't a 16-bit med3 operation, convert to 32-bit.
4811 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
4813 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
4814 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
4815 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
4817 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
4818 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
4821 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
4824 SDValue Op1) const {
4825 ConstantFPSDNode *K1 = dyn_cast<ConstantFPSDNode>(Op1);
4829 ConstantFPSDNode *K0 = dyn_cast<ConstantFPSDNode>(Op0.getOperand(1));
4833 // Ordered >= (although NaN inputs should have folded away by now).
4834 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
4835 if (Cmp == APFloat::cmpGreaterThan)
4838 // TODO: Check IEEE bit enabled?
4839 EVT VT = K0->getValueType(0);
4840 if (Subtarget->enableDX10Clamp()) {
4841 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
4842 // hardware fmed3 behavior converting to a min.
4843 // FIXME: Should this be allowing -0.0?
4844 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
4845 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
4848 // med3 for f16 is only available on gfx9+.
4849 if (VT == MVT::f64 || (VT == MVT::f16 && !Subtarget->hasMed3_16()))
4852 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
4853 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would then
4854 // give the other result, which is different from med3 with a NaN input.
4855 SDValue Var = Op0.getOperand(0);
4856 if (!isKnownNeverSNan(DAG, Var))
4859 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
4860 Var, SDValue(K0, 0), SDValue(K1, 0));
4863 SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
4864 DAGCombinerInfo &DCI) const {
4865 SelectionDAG &DAG = DCI.DAG;
4867 EVT VT = N->getValueType(0);
4868 unsigned Opc = N->getOpcode();
4869 SDValue Op0 = N->getOperand(0);
4870 SDValue Op1 = N->getOperand(1);
4872 // Only do this if the inner op has one use since this will just increases
4873 // register pressure for no benefit.
4876 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
4878 ((VT != MVT::f16 && VT != MVT::i16) || Subtarget->hasMin3Max3_16())) {
4879 // max(max(a, b), c) -> max3(a, b, c)
4880 // min(min(a, b), c) -> min3(a, b, c)
4881 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
4883 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
4892 // max(a, max(b, c)) -> max3(a, b, c)
4893 // min(a, min(b, c)) -> min3(a, b, c)
4894 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
4896 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
4905 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
4906 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
4907 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
4911 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
4912 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
4916 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
4917 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
4918 (Opc == AMDGPUISD::FMIN_LEGACY &&
4919 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
4920 (VT == MVT::f32 || VT == MVT::f64 ||
4921 (VT == MVT::f16 && Subtarget->has16BitInsts())) &&
4923 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
4930 static bool isClampZeroToOne(SDValue A, SDValue B) {
4931 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
4932 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
4933 // FIXME: Should this be allowing -0.0?
4934 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
4935 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
4942 // FIXME: Should only worry about snans for version with chain.
4943 SDValue SITargetLowering::performFMed3Combine(SDNode *N,
4944 DAGCombinerInfo &DCI) const {
4945 EVT VT = N->getValueType(0);
4946 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
4947 // NaNs. With a NaN input, the order of the operands may change the result.
4949 SelectionDAG &DAG = DCI.DAG;
4952 SDValue Src0 = N->getOperand(0);
4953 SDValue Src1 = N->getOperand(1);
4954 SDValue Src2 = N->getOperand(2);
4956 if (isClampZeroToOne(Src0, Src1)) {
4957 // const_a, const_b, x -> clamp is safe in all cases including signaling
4959 // FIXME: Should this be allowing -0.0?
4960 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
4963 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
4964 // handling no dx10-clamp?
4965 if (Subtarget->enableDX10Clamp()) {
4966 // If NaNs is clamped to 0, we are free to reorder the inputs.
4968 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
4969 std::swap(Src0, Src1);
4971 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
4972 std::swap(Src1, Src2);
4974 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
4975 std::swap(Src0, Src1);
4977 if (isClampZeroToOne(Src1, Src2))
4978 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
4984 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
4985 DAGCombinerInfo &DCI) const {
4986 SDValue Src0 = N->getOperand(0);
4987 SDValue Src1 = N->getOperand(1);
4988 if (Src0.isUndef() && Src1.isUndef())
4989 return DCI.DAG.getUNDEF(N->getValueType(0));
4993 SDValue SITargetLowering::performExtractVectorEltCombine(
4994 SDNode *N, DAGCombinerInfo &DCI) const {
4995 SDValue Vec = N->getOperand(0);
4997 SelectionDAG &DAG= DCI.DAG;
4998 if (Vec.getOpcode() == ISD::FNEG && allUsesHaveSourceMods(N)) {
5000 EVT EltVT = N->getValueType(0);
5001 SDValue Idx = N->getOperand(1);
5002 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
5003 Vec.getOperand(0), Idx);
5004 return DAG.getNode(ISD::FNEG, SL, EltVT, Elt);
5011 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
5013 const SDNode *N1) const {
5014 EVT VT = N0->getValueType(0);
5016 // Only do this if we are not trying to support denormals. v_mad_f32 does not
5017 // support denormals ever.
5018 if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) ||
5019 (VT == MVT::f16 && !Subtarget->hasFP16Denormals()))
5022 const TargetOptions &Options = DAG.getTarget().Options;
5023 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
5024 (N0->getFlags().hasUnsafeAlgebra() &&
5025 N1->getFlags().hasUnsafeAlgebra())) &&
5026 isFMAFasterThanFMulAndFAdd(VT)) {
5033 SDValue SITargetLowering::performAddCombine(SDNode *N,
5034 DAGCombinerInfo &DCI) const {
5035 SelectionDAG &DAG = DCI.DAG;
5036 EVT VT = N->getValueType(0);
5042 SDValue LHS = N->getOperand(0);
5043 SDValue RHS = N->getOperand(1);
5045 // add x, zext (setcc) => addcarry x, 0, setcc
5046 // add x, sext (setcc) => subcarry x, 0, setcc
5047 unsigned Opc = LHS.getOpcode();
5048 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
5049 Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
5050 std::swap(RHS, LHS);
5052 Opc = RHS.getOpcode();
5055 case ISD::ZERO_EXTEND:
5056 case ISD::SIGN_EXTEND:
5057 case ISD::ANY_EXTEND: {
5058 auto Cond = RHS.getOperand(0);
5059 if (!isBoolSGPR(Cond))
5061 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
5062 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
5063 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
5064 return DAG.getNode(Opc, SL, VTList, Args);
5066 case ISD::ADDCARRY: {
5067 // add x, (addcarry y, 0, cc) => addcarry x, y, cc
5068 auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
5069 if (!C || C->getZExtValue() != 0) break;
5070 SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
5071 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
5077 SDValue SITargetLowering::performSubCombine(SDNode *N,
5078 DAGCombinerInfo &DCI) const {
5079 SelectionDAG &DAG = DCI.DAG;
5080 EVT VT = N->getValueType(0);
5086 SDValue LHS = N->getOperand(0);
5087 SDValue RHS = N->getOperand(1);
5089 unsigned Opc = LHS.getOpcode();
5090 if (Opc != ISD::SUBCARRY)
5091 std::swap(RHS, LHS);
5093 if (LHS.getOpcode() == ISD::SUBCARRY) {
5094 // sub (subcarry x, 0, cc), y => subcarry x, y, cc
5095 auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
5096 if (!C || C->getZExtValue() != 0)
5098 SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
5099 return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
5104 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
5105 DAGCombinerInfo &DCI) const {
5107 if (N->getValueType(0) != MVT::i32)
5110 auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
5111 if (!C || C->getZExtValue() != 0)
5114 SelectionDAG &DAG = DCI.DAG;
5115 SDValue LHS = N->getOperand(0);
5117 // addcarry (add x, y), 0, cc => addcarry x, y, cc
5118 // subcarry (sub x, y), 0, cc => subcarry x, y, cc
5119 unsigned LHSOpc = LHS.getOpcode();
5120 unsigned Opc = N->getOpcode();
5121 if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
5122 (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
5123 SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
5124 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
5129 SDValue SITargetLowering::performFAddCombine(SDNode *N,
5130 DAGCombinerInfo &DCI) const {
5131 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
5134 SelectionDAG &DAG = DCI.DAG;
5135 EVT VT = N->getValueType(0);
5138 SDValue LHS = N->getOperand(0);
5139 SDValue RHS = N->getOperand(1);
5141 // These should really be instruction patterns, but writing patterns with
5142 // source modiifiers is a pain.
5144 // fadd (fadd (a, a), b) -> mad 2.0, a, b
5145 if (LHS.getOpcode() == ISD::FADD) {
5146 SDValue A = LHS.getOperand(0);
5147 if (A == LHS.getOperand(1)) {
5148 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
5150 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
5151 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
5156 // fadd (b, fadd (a, a)) -> mad 2.0, a, b
5157 if (RHS.getOpcode() == ISD::FADD) {
5158 SDValue A = RHS.getOperand(0);
5159 if (A == RHS.getOperand(1)) {
5160 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
5162 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
5163 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
5171 SDValue SITargetLowering::performFSubCombine(SDNode *N,
5172 DAGCombinerInfo &DCI) const {
5173 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
5176 SelectionDAG &DAG = DCI.DAG;
5178 EVT VT = N->getValueType(0);
5179 assert(!VT.isVector());
5181 // Try to get the fneg to fold into the source modifier. This undoes generic
5182 // DAG combines and folds them into the mad.
5184 // Only do this if we are not trying to support denormals. v_mad_f32 does
5185 // not support denormals ever.
5186 SDValue LHS = N->getOperand(0);
5187 SDValue RHS = N->getOperand(1);
5188 if (LHS.getOpcode() == ISD::FADD) {
5189 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
5190 SDValue A = LHS.getOperand(0);
5191 if (A == LHS.getOperand(1)) {
5192 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
5194 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
5195 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
5197 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
5202 if (RHS.getOpcode() == ISD::FADD) {
5203 // (fsub c, (fadd a, a)) -> mad -2.0, a, c
5205 SDValue A = RHS.getOperand(0);
5206 if (A == RHS.getOperand(1)) {
5207 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
5209 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
5210 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
5218 SDValue SITargetLowering::performSetCCCombine(SDNode *N,
5219 DAGCombinerInfo &DCI) const {
5220 SelectionDAG &DAG = DCI.DAG;
5223 SDValue LHS = N->getOperand(0);
5224 SDValue RHS = N->getOperand(1);
5225 EVT VT = LHS.getValueType();
5226 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
5228 auto CRHS = dyn_cast<ConstantSDNode>(RHS);
5230 CRHS = dyn_cast<ConstantSDNode>(LHS);
5232 std::swap(LHS, RHS);
5233 CC = getSetCCSwappedOperands(CC);
5237 if (CRHS && VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
5238 isBoolSGPR(LHS.getOperand(0))) {
5239 // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
5240 // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
5241 // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1
5242 // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc
5243 if ((CRHS->isAllOnesValue() &&
5244 (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
5245 (CRHS->isNullValue() &&
5246 (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
5247 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
5248 DAG.getConstant(-1, SL, MVT::i1));
5249 if ((CRHS->isAllOnesValue() &&
5250 (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
5251 (CRHS->isNullValue() &&
5252 (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
5253 return LHS.getOperand(0);
5256 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
5260 // Match isinf pattern
5261 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
5262 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) {
5263 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
5267 const APFloat &APF = CRHS->getValueAPF();
5268 if (APF.isInfinity() && !APF.isNegative()) {
5269 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY;
5270 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
5271 DAG.getConstant(Mask, SL, MVT::i32));
5278 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
5279 DAGCombinerInfo &DCI) const {
5280 SelectionDAG &DAG = DCI.DAG;
5282 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
5284 SDValue Src = N->getOperand(0);
5285 SDValue Srl = N->getOperand(0);
5286 if (Srl.getOpcode() == ISD::ZERO_EXTEND)
5287 Srl = Srl.getOperand(0);
5289 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
5290 if (Srl.getOpcode() == ISD::SRL) {
5291 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
5292 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
5293 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
5295 if (const ConstantSDNode *C =
5296 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
5297 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
5300 unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
5301 if (SrcOffset < 32 && SrcOffset % 8 == 0) {
5302 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL,
5308 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
5311 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
5312 !DCI.isBeforeLegalizeOps());
5313 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5314 if (TLI.ShrinkDemandedConstant(Src, Demanded, TLO) ||
5315 TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
5316 DCI.CommitTargetLoweringOpt(TLO);
5322 SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
5323 DAGCombinerInfo &DCI) const {
5324 switch (N->getOpcode()) {
5326 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
5328 return performAddCombine(N, DCI);
5330 return performSubCombine(N, DCI);
5333 return performAddCarrySubCarryCombine(N, DCI);
5335 return performFAddCombine(N, DCI);
5337 return performFSubCombine(N, DCI);
5339 return performSetCCCombine(N, DCI);
5346 case AMDGPUISD::FMIN_LEGACY:
5347 case AMDGPUISD::FMAX_LEGACY: {
5348 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG &&
5349 getTargetMachine().getOptLevel() > CodeGenOpt::None)
5350 return performMinMaxCombine(N, DCI);
5355 case ISD::ATOMIC_LOAD:
5356 case ISD::ATOMIC_STORE:
5357 case ISD::ATOMIC_CMP_SWAP:
5358 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
5359 case ISD::ATOMIC_SWAP:
5360 case ISD::ATOMIC_LOAD_ADD:
5361 case ISD::ATOMIC_LOAD_SUB:
5362 case ISD::ATOMIC_LOAD_AND:
5363 case ISD::ATOMIC_LOAD_OR:
5364 case ISD::ATOMIC_LOAD_XOR:
5365 case ISD::ATOMIC_LOAD_NAND:
5366 case ISD::ATOMIC_LOAD_MIN:
5367 case ISD::ATOMIC_LOAD_MAX:
5368 case ISD::ATOMIC_LOAD_UMIN:
5369 case ISD::ATOMIC_LOAD_UMAX:
5370 case AMDGPUISD::ATOMIC_INC:
5371 case AMDGPUISD::ATOMIC_DEC: // TODO: Target mem intrinsics.
5372 if (DCI.isBeforeLegalize())
5374 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
5376 return performAndCombine(N, DCI);
5378 return performOrCombine(N, DCI);
5380 return performXorCombine(N, DCI);
5381 case ISD::ZERO_EXTEND:
5382 return performZeroExtendCombine(N, DCI);
5383 case AMDGPUISD::FP_CLASS:
5384 return performClassCombine(N, DCI);
5385 case ISD::FCANONICALIZE:
5386 return performFCanonicalizeCombine(N, DCI);
5387 case AMDGPUISD::FRACT:
5388 case AMDGPUISD::RCP:
5389 case AMDGPUISD::RSQ:
5390 case AMDGPUISD::RCP_LEGACY:
5391 case AMDGPUISD::RSQ_LEGACY:
5392 case AMDGPUISD::RSQ_CLAMP:
5393 case AMDGPUISD::LDEXP: {
5394 SDValue Src = N->getOperand(0);
5399 case ISD::SINT_TO_FP:
5400 case ISD::UINT_TO_FP:
5401 return performUCharToFloatCombine(N, DCI);
5402 case AMDGPUISD::CVT_F32_UBYTE0:
5403 case AMDGPUISD::CVT_F32_UBYTE1:
5404 case AMDGPUISD::CVT_F32_UBYTE2:
5405 case AMDGPUISD::CVT_F32_UBYTE3:
5406 return performCvtF32UByteNCombine(N, DCI);
5407 case AMDGPUISD::FMED3:
5408 return performFMed3Combine(N, DCI);
5409 case AMDGPUISD::CVT_PKRTZ_F16_F32:
5410 return performCvtPkRTZCombine(N, DCI);
5411 case ISD::SCALAR_TO_VECTOR: {
5412 SelectionDAG &DAG = DCI.DAG;
5413 EVT VT = N->getValueType(0);
5415 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
5416 if (VT == MVT::v2i16 || VT == MVT::v2f16) {
5418 SDValue Src = N->getOperand(0);
5419 EVT EltVT = Src.getValueType();
5420 if (EltVT == MVT::f16)
5421 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
5423 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
5424 return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
5429 case ISD::EXTRACT_VECTOR_ELT:
5430 return performExtractVectorEltCombine(N, DCI);
5432 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
5435 /// \brief Helper function for adjustWritemask
5436 static unsigned SubIdx2Lane(unsigned Idx) {
5439 case AMDGPU::sub0: return 0;
5440 case AMDGPU::sub1: return 1;
5441 case AMDGPU::sub2: return 2;
5442 case AMDGPU::sub3: return 3;
5446 /// \brief Adjust the writemask of MIMG instructions
5447 void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
5448 SelectionDAG &DAG) const {
5449 SDNode *Users[4] = { };
5451 unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3;
5452 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
5453 unsigned NewDmask = 0;
5455 // Try to figure out the used register components
5456 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
5459 // Don't look at users of the chain.
5460 if (I.getUse().getResNo() != 0)
5463 // Abort if we can't understand the usage
5464 if (!I->isMachineOpcode() ||
5465 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
5468 // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used.
5469 // Note that subregs are packed, i.e. Lane==0 is the first bit set
5470 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
5472 Lane = SubIdx2Lane(I->getConstantOperandVal(1));
5474 // Set which texture component corresponds to the lane.
5476 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) {
5478 Comp = countTrailingZeros(Dmask);
5479 Dmask &= ~(1 << Comp);
5482 // Abort if we have more than one user per component
5487 NewDmask |= 1 << Comp;
5490 // Abort if there's no change
5491 if (NewDmask == OldDmask)
5494 // Adjust the writemask in the node
5495 std::vector<SDValue> Ops;
5496 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
5497 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
5498 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
5499 Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops);
5501 // If we only got one lane, replace it with a copy
5502 // (if NewDmask has only one bit set...)
5503 if (NewDmask && (NewDmask & (NewDmask-1)) == 0) {
5504 SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, SDLoc(),
5506 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
5507 SDLoc(), Users[Lane]->getValueType(0),
5508 SDValue(Node, 0), RC);
5509 DAG.ReplaceAllUsesWith(Users[Lane], Copy);
5513 // Update the users of the node with the new indices
5514 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) {
5515 SDNode *User = Users[i];
5519 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
5520 DAG.UpdateNodeOperands(User, User->getOperand(0), Op);
5524 case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
5525 case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
5526 case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
5531 static bool isFrameIndexOp(SDValue Op) {
5532 if (Op.getOpcode() == ISD::AssertZext)
5533 Op = Op.getOperand(0);
5535 return isa<FrameIndexSDNode>(Op);
5538 /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG)
5539 /// with frame index operands.
5540 /// LLVM assumes that inputs are to these instructions are registers.
5541 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
5542 SelectionDAG &DAG) const {
5543 if (Node->getOpcode() == ISD::CopyToReg) {
5544 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
5545 SDValue SrcVal = Node->getOperand(2);
5547 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
5548 // to try understanding copies to physical registers.
5549 if (SrcVal.getValueType() == MVT::i1 &&
5550 TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) {
5552 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
5553 SDValue VReg = DAG.getRegister(
5554 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
5556 SDNode *Glued = Node->getGluedNode();
5558 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
5559 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
5561 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
5562 VReg, ToVReg.getValue(1));
5563 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
5564 DAG.RemoveDeadNode(Node);
5565 return ToResultReg.getNode();
5569 SmallVector<SDValue, 8> Ops;
5570 for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
5571 if (!isFrameIndexOp(Node->getOperand(i))) {
5572 Ops.push_back(Node->getOperand(i));
5577 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
5578 Node->getOperand(i).getValueType(),
5579 Node->getOperand(i)), 0));
5582 DAG.UpdateNodeOperands(Node, Ops);
5586 /// \brief Fold the instructions after selecting them.
5587 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
5588 SelectionDAG &DAG) const {
5589 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
5590 unsigned Opcode = Node->getMachineOpcode();
5592 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
5593 !TII->isGather4(Opcode))
5594 adjustWritemask(Node, DAG);
5596 if (Opcode == AMDGPU::INSERT_SUBREG ||
5597 Opcode == AMDGPU::REG_SEQUENCE) {
5598 legalizeTargetIndependentNode(Node, DAG);
5604 /// \brief Assign the register class depending on the number of
5605 /// bits set in the writemask
5606 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
5607 SDNode *Node) const {
5608 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
5610 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
5612 if (TII->isVOP3(MI.getOpcode())) {
5613 // Make sure constant bus requirements are respected.
5614 TII->legalizeOperandsVOP3(MRI, MI);
5618 if (TII->isMIMG(MI)) {
5619 unsigned VReg = MI.getOperand(0).getReg();
5620 const TargetRegisterClass *RC = MRI.getRegClass(VReg);
5621 // TODO: Need mapping tables to handle other cases (register classes).
5622 if (RC != &AMDGPU::VReg_128RegClass)
5625 unsigned DmaskIdx = MI.getNumOperands() == 12 ? 3 : 4;
5626 unsigned Writemask = MI.getOperand(DmaskIdx).getImm();
5627 unsigned BitsSet = 0;
5628 for (unsigned i = 0; i < 4; ++i)
5629 BitsSet += Writemask & (1 << i) ? 1 : 0;
5632 case 1: RC = &AMDGPU::VGPR_32RegClass; break;
5633 case 2: RC = &AMDGPU::VReg_64RegClass; break;
5634 case 3: RC = &AMDGPU::VReg_96RegClass; break;
5637 unsigned NewOpcode = TII->getMaskedMIMGOp(MI.getOpcode(), BitsSet);
5638 MI.setDesc(TII->get(NewOpcode));
5639 MRI.setRegClass(VReg, RC);
5643 // Replace unused atomics with the no return version.
5644 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
5645 if (NoRetAtomicOp != -1) {
5646 if (!Node->hasAnyUseOfValue(0)) {
5647 MI.setDesc(TII->get(NoRetAtomicOp));
5648 MI.RemoveOperand(0);
5652 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
5653 // instruction, because the return type of these instructions is a vec2 of
5654 // the memory type, so it can be tied to the input operand.
5655 // This means these instructions always have a use, so we need to add a
5656 // special case to check if the atomic has only one extract_subreg use,
5657 // which itself has no uses.
5658 if ((Node->hasNUsesOfValue(1, 0) &&
5659 Node->use_begin()->isMachineOpcode() &&
5660 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
5661 !Node->use_begin()->hasAnyUseOfValue(0))) {
5662 unsigned Def = MI.getOperand(0).getReg();
5664 // Change this into a noret atomic.
5665 MI.setDesc(TII->get(NoRetAtomicOp));
5666 MI.RemoveOperand(0);
5668 // If we only remove the def operand from the atomic instruction, the
5669 // extract_subreg will be left with a use of a vreg without a def.
5670 // So we need to insert an implicit_def to avoid machine verifier
5672 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
5673 TII->get(AMDGPU::IMPLICIT_DEF), Def);
5679 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
5681 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
5682 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
5685 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
5687 SDValue Ptr) const {
5688 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
5690 // Build the half of the subregister with the constants before building the
5691 // full 128-bit register. If we are building multiple resource descriptors,
5692 // this will allow CSEing of the 2-component register.
5693 const SDValue Ops0[] = {
5694 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
5695 buildSMovImm32(DAG, DL, 0),
5696 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
5697 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
5698 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
5701 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
5702 MVT::v2i32, Ops0), 0);
5704 // Combine the constants and the pointer.
5705 const SDValue Ops1[] = {
5706 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
5708 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
5710 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
5713 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
5716 /// \brief Return a resource descriptor with the 'Add TID' bit enabled
5717 /// The TID (Thread ID) is multiplied by the stride value (bits [61:48]
5718 /// of the resource descriptor) to create an offset, which is added to
5719 /// the resource pointer.
5720 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
5721 SDValue Ptr, uint32_t RsrcDword1,
5722 uint64_t RsrcDword2And3) const {
5723 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
5724 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
5726 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
5727 DAG.getConstant(RsrcDword1, DL, MVT::i32)),
5731 SDValue DataLo = buildSMovImm32(DAG, DL,
5732 RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
5733 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
5735 const SDValue Ops[] = {
5736 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
5738 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
5740 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
5742 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
5744 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
5747 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
5750 //===----------------------------------------------------------------------===//
5751 // SI Inline Assembly Support
5752 //===----------------------------------------------------------------------===//
5754 std::pair<unsigned, const TargetRegisterClass *>
5755 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
5756 StringRef Constraint,
5758 if (!isTypeLegal(VT))
5759 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
5761 if (Constraint.size() == 1) {
5762 switch (Constraint[0]) {
5765 switch (VT.getSizeInBits()) {
5767 return std::make_pair(0U, nullptr);
5770 return std::make_pair(0U, &AMDGPU::SReg_32_XM0RegClass);
5772 return std::make_pair(0U, &AMDGPU::SGPR_64RegClass);
5774 return std::make_pair(0U, &AMDGPU::SReg_128RegClass);
5776 return std::make_pair(0U, &AMDGPU::SReg_256RegClass);
5778 return std::make_pair(0U, &AMDGPU::SReg_512RegClass);
5782 switch (VT.getSizeInBits()) {
5784 return std::make_pair(0U, nullptr);
5787 return std::make_pair(0U, &AMDGPU::VGPR_32RegClass);
5789 return std::make_pair(0U, &AMDGPU::VReg_64RegClass);
5791 return std::make_pair(0U, &AMDGPU::VReg_96RegClass);
5793 return std::make_pair(0U, &AMDGPU::VReg_128RegClass);
5795 return std::make_pair(0U, &AMDGPU::VReg_256RegClass);
5797 return std::make_pair(0U, &AMDGPU::VReg_512RegClass);
5802 if (Constraint.size() > 1) {
5803 const TargetRegisterClass *RC = nullptr;
5804 if (Constraint[1] == 'v') {
5805 RC = &AMDGPU::VGPR_32RegClass;
5806 } else if (Constraint[1] == 's') {
5807 RC = &AMDGPU::SGPR_32RegClass;
5812 bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
5813 if (!Failed && Idx < RC->getNumRegs())
5814 return std::make_pair(RC->getRegister(Idx), RC);
5817 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
5820 SITargetLowering::ConstraintType
5821 SITargetLowering::getConstraintType(StringRef Constraint) const {
5822 if (Constraint.size() == 1) {
5823 switch (Constraint[0]) {
5827 return C_RegisterClass;
5830 return TargetLowering::getConstraintType(Constraint);
5833 // Figure out which registers should be reserved for stack access. Only after
5834 // the function is legalized do we know all of the non-spill stack objects or if
5835 // calls are present.
5836 void SITargetLowering::finalizeLowering(MachineFunction &MF) const {
5837 MachineRegisterInfo &MRI = MF.getRegInfo();
5838 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
5839 const MachineFrameInfo &MFI = MF.getFrameInfo();
5840 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
5841 const SIRegisterInfo *TRI = ST.getRegisterInfo();
5843 if (Info->isEntryFunction()) {
5844 // Callable functions have fixed registers used for stack access.
5845 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info);
5848 // We have to assume the SP is needed in case there are calls in the function
5849 // during lowering. Calls are only detected after the function is
5850 // lowered. We're about to reserve registers, so don't bother using it if we
5851 // aren't really going to use it.
5852 bool NeedSP = !Info->isEntryFunction() ||
5853 MFI.hasVarSizedObjects() ||
5857 unsigned ReservedStackPtrOffsetReg = TRI->reservedStackPtrOffsetReg(MF);
5858 Info->setStackPtrOffsetReg(ReservedStackPtrOffsetReg);
5860 assert(Info->getStackPtrOffsetReg() != Info->getFrameOffsetReg());
5861 assert(!TRI->isSubRegister(Info->getScratchRSrcReg(),
5862 Info->getStackPtrOffsetReg()));
5863 MRI.replaceRegWith(AMDGPU::SP_REG, Info->getStackPtrOffsetReg());
5866 MRI.replaceRegWith(AMDGPU::PRIVATE_RSRC_REG, Info->getScratchRSrcReg());
5867 MRI.replaceRegWith(AMDGPU::FP_REG, Info->getFrameOffsetReg());
5868 MRI.replaceRegWith(AMDGPU::SCRATCH_WAVE_OFFSET_REG,
5869 Info->getScratchWaveOffsetReg());
5871 TargetLoweringBase::finalizeLowering(MF);