1 //===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief Custom DAG lowering for SI
13 //===----------------------------------------------------------------------===//
17 #define _USE_MATH_DEFINES
20 #include "SIISelLowering.h"
22 #include "AMDGPUIntrinsicInfo.h"
23 #include "AMDGPUSubtarget.h"
24 #include "AMDGPUTargetMachine.h"
25 #include "SIDefines.h"
26 #include "SIInstrInfo.h"
27 #include "SIMachineFunctionInfo.h"
28 #include "SIRegisterInfo.h"
29 #include "Utils/AMDGPUBaseInfo.h"
30 #include "llvm/ADT/APFloat.h"
31 #include "llvm/ADT/APInt.h"
32 #include "llvm/ADT/ArrayRef.h"
33 #include "llvm/ADT/BitVector.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/ADT/Twine.h"
38 #include "llvm/CodeGen/Analysis.h"
39 #include "llvm/CodeGen/CallingConvLower.h"
40 #include "llvm/CodeGen/DAGCombine.h"
41 #include "llvm/CodeGen/ISDOpcodes.h"
42 #include "llvm/CodeGen/MachineBasicBlock.h"
43 #include "llvm/CodeGen/MachineFrameInfo.h"
44 #include "llvm/CodeGen/MachineFunction.h"
45 #include "llvm/CodeGen/MachineInstr.h"
46 #include "llvm/CodeGen/MachineInstrBuilder.h"
47 #include "llvm/CodeGen/MachineMemOperand.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/MachineValueType.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/ValueTypes.h"
54 #include "llvm/IR/Constants.h"
55 #include "llvm/IR/DataLayout.h"
56 #include "llvm/IR/DebugLoc.h"
57 #include "llvm/IR/DerivedTypes.h"
58 #include "llvm/IR/DiagnosticInfo.h"
59 #include "llvm/IR/Function.h"
60 #include "llvm/IR/GlobalValue.h"
61 #include "llvm/IR/InstrTypes.h"
62 #include "llvm/IR/Instruction.h"
63 #include "llvm/IR/Instructions.h"
64 #include "llvm/IR/IntrinsicInst.h"
65 #include "llvm/IR/Type.h"
66 #include "llvm/Support/Casting.h"
67 #include "llvm/Support/CodeGen.h"
68 #include "llvm/Support/CommandLine.h"
69 #include "llvm/Support/Compiler.h"
70 #include "llvm/Support/ErrorHandling.h"
71 #include "llvm/Support/KnownBits.h"
72 #include "llvm/Support/MathExtras.h"
73 #include "llvm/Target/TargetCallingConv.h"
74 #include "llvm/Target/TargetOptions.h"
75 #include "llvm/Target/TargetRegisterInfo.h"
86 static cl::opt<bool> EnableVGPRIndexMode(
87 "amdgpu-vgpr-index-mode",
88 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
91 static unsigned findFirstFreeSGPR(CCState &CCInfo) {
92 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
93 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
94 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
95 return AMDGPU::SGPR0 + Reg;
98 llvm_unreachable("Cannot allocate sgpr");
101 SITargetLowering::SITargetLowering(const TargetMachine &TM,
102 const SISubtarget &STI)
103 : AMDGPUTargetLowering(TM, STI) {
104 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
105 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
107 addRegisterClass(MVT::i32, &AMDGPU::SReg_32_XM0RegClass);
108 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
110 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
111 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
112 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
114 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
115 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
117 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
118 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
120 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
121 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
123 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
124 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
126 if (Subtarget->has16BitInsts()) {
127 addRegisterClass(MVT::i16, &AMDGPU::SReg_32_XM0RegClass);
128 addRegisterClass(MVT::f16, &AMDGPU::SReg_32_XM0RegClass);
131 if (Subtarget->hasVOP3PInsts()) {
132 addRegisterClass(MVT::v2i16, &AMDGPU::SReg_32_XM0RegClass);
133 addRegisterClass(MVT::v2f16, &AMDGPU::SReg_32_XM0RegClass);
136 computeRegisterProperties(STI.getRegisterInfo());
138 // We need to custom lower vector stores from local memory
139 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
140 setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
141 setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
142 setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
143 setOperationAction(ISD::LOAD, MVT::i1, Custom);
145 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
146 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
147 setOperationAction(ISD::STORE, MVT::v8i32, Custom);
148 setOperationAction(ISD::STORE, MVT::v16i32, Custom);
149 setOperationAction(ISD::STORE, MVT::i1, Custom);
151 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
152 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
153 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
154 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
155 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Expand);
156 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
157 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Expand);
158 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Expand);
159 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
160 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Expand);
162 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
163 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
164 setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand);
166 setOperationAction(ISD::SELECT, MVT::i1, Promote);
167 setOperationAction(ISD::SELECT, MVT::i64, Custom);
168 setOperationAction(ISD::SELECT, MVT::f64, Promote);
169 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
171 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
172 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
173 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
174 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
175 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
177 setOperationAction(ISD::SETCC, MVT::i1, Promote);
178 setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
179 setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
180 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
182 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
183 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
185 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
186 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
187 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
188 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
189 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
190 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
191 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
193 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
194 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
195 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
196 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v2f16, Custom);
198 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
200 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
201 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2i16, Custom);
202 setOperationAction(ISD::INTRINSIC_VOID, MVT::v2f16, Custom);
204 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
205 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
206 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
207 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
208 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
209 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
211 setOperationAction(ISD::UADDO, MVT::i32, Legal);
212 setOperationAction(ISD::USUBO, MVT::i32, Legal);
214 setOperationAction(ISD::ADDCARRY, MVT::i32, Legal);
215 setOperationAction(ISD::SUBCARRY, MVT::i32, Legal);
217 // We only support LOAD/STORE and vector manipulation ops for vectors
218 // with > 4 elements.
219 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32,
220 MVT::v2i64, MVT::v2f64}) {
221 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
225 case ISD::BUILD_VECTOR:
227 case ISD::EXTRACT_VECTOR_ELT:
228 case ISD::INSERT_VECTOR_ELT:
229 case ISD::INSERT_SUBVECTOR:
230 case ISD::EXTRACT_SUBVECTOR:
231 case ISD::SCALAR_TO_VECTOR:
233 case ISD::CONCAT_VECTORS:
234 setOperationAction(Op, VT, Custom);
237 setOperationAction(Op, VT, Expand);
243 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
244 // is expanded to avoid having two separate loops in case the index is a VGPR.
246 // Most operations are naturally 32-bit vector operations. We only support
247 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
248 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
249 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
250 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
252 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
253 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
255 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
256 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
258 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
259 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
262 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
263 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
264 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
265 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
267 // Avoid stack access for these.
268 // TODO: Generalize to more vector types.
269 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Custom);
270 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f16, Custom);
271 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
272 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
274 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
275 // and output demarshalling
276 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
277 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
279 // We can't return success/failure, only the old value,
280 // let LLVM add the comparison
281 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
282 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
284 if (getSubtarget()->hasFlatAddressSpace()) {
285 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
286 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
289 setOperationAction(ISD::BSWAP, MVT::i32, Legal);
290 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
292 // On SI this is s_memtime and s_memrealtime on VI.
293 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
294 setOperationAction(ISD::TRAP, MVT::Other, Custom);
295 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Custom);
297 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
298 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
300 if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) {
301 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
302 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
303 setOperationAction(ISD::FRINT, MVT::f64, Legal);
306 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
308 setOperationAction(ISD::FSIN, MVT::f32, Custom);
309 setOperationAction(ISD::FCOS, MVT::f32, Custom);
310 setOperationAction(ISD::FDIV, MVT::f32, Custom);
311 setOperationAction(ISD::FDIV, MVT::f64, Custom);
313 if (Subtarget->has16BitInsts()) {
314 setOperationAction(ISD::Constant, MVT::i16, Legal);
316 setOperationAction(ISD::SMIN, MVT::i16, Legal);
317 setOperationAction(ISD::SMAX, MVT::i16, Legal);
319 setOperationAction(ISD::UMIN, MVT::i16, Legal);
320 setOperationAction(ISD::UMAX, MVT::i16, Legal);
322 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
323 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
325 setOperationAction(ISD::ROTR, MVT::i16, Promote);
326 setOperationAction(ISD::ROTL, MVT::i16, Promote);
328 setOperationAction(ISD::SDIV, MVT::i16, Promote);
329 setOperationAction(ISD::UDIV, MVT::i16, Promote);
330 setOperationAction(ISD::SREM, MVT::i16, Promote);
331 setOperationAction(ISD::UREM, MVT::i16, Promote);
333 setOperationAction(ISD::BSWAP, MVT::i16, Promote);
334 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
336 setOperationAction(ISD::CTTZ, MVT::i16, Promote);
337 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
338 setOperationAction(ISD::CTLZ, MVT::i16, Promote);
339 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
341 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
343 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
345 setOperationAction(ISD::LOAD, MVT::i16, Custom);
347 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
349 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
350 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
351 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
352 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
354 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
355 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
356 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
357 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
359 // F16 - Constant Actions.
360 setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
362 // F16 - Load/Store Actions.
363 setOperationAction(ISD::LOAD, MVT::f16, Promote);
364 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
365 setOperationAction(ISD::STORE, MVT::f16, Promote);
366 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
368 // F16 - VOP1 Actions.
369 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
370 setOperationAction(ISD::FCOS, MVT::f16, Promote);
371 setOperationAction(ISD::FSIN, MVT::f16, Promote);
372 setOperationAction(ISD::FP_TO_SINT, MVT::f16, Promote);
373 setOperationAction(ISD::FP_TO_UINT, MVT::f16, Promote);
374 setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
375 setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
376 setOperationAction(ISD::FROUND, MVT::f16, Custom);
378 // F16 - VOP2 Actions.
379 setOperationAction(ISD::BR_CC, MVT::f16, Expand);
380 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
381 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
382 setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
383 setOperationAction(ISD::FDIV, MVT::f16, Custom);
385 // F16 - VOP3 Actions.
386 setOperationAction(ISD::FMA, MVT::f16, Legal);
387 if (!Subtarget->hasFP16Denormals())
388 setOperationAction(ISD::FMAD, MVT::f16, Legal);
391 if (Subtarget->hasVOP3PInsts()) {
392 for (MVT VT : {MVT::v2i16, MVT::v2f16}) {
393 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
397 case ISD::BUILD_VECTOR:
399 case ISD::EXTRACT_VECTOR_ELT:
400 case ISD::INSERT_VECTOR_ELT:
401 case ISD::INSERT_SUBVECTOR:
402 case ISD::EXTRACT_SUBVECTOR:
403 case ISD::SCALAR_TO_VECTOR:
405 case ISD::CONCAT_VECTORS:
406 setOperationAction(Op, VT, Custom);
409 setOperationAction(Op, VT, Expand);
415 // XXX - Do these do anything? Vector constants turn into build_vector.
416 setOperationAction(ISD::Constant, MVT::v2i16, Legal);
417 setOperationAction(ISD::ConstantFP, MVT::v2f16, Legal);
419 setOperationAction(ISD::STORE, MVT::v2i16, Promote);
420 AddPromotedToType(ISD::STORE, MVT::v2i16, MVT::i32);
421 setOperationAction(ISD::STORE, MVT::v2f16, Promote);
422 AddPromotedToType(ISD::STORE, MVT::v2f16, MVT::i32);
424 setOperationAction(ISD::LOAD, MVT::v2i16, Promote);
425 AddPromotedToType(ISD::LOAD, MVT::v2i16, MVT::i32);
426 setOperationAction(ISD::LOAD, MVT::v2f16, Promote);
427 AddPromotedToType(ISD::LOAD, MVT::v2f16, MVT::i32);
429 setOperationAction(ISD::AND, MVT::v2i16, Promote);
430 AddPromotedToType(ISD::AND, MVT::v2i16, MVT::i32);
431 setOperationAction(ISD::OR, MVT::v2i16, Promote);
432 AddPromotedToType(ISD::OR, MVT::v2i16, MVT::i32);
433 setOperationAction(ISD::XOR, MVT::v2i16, Promote);
434 AddPromotedToType(ISD::XOR, MVT::v2i16, MVT::i32);
435 setOperationAction(ISD::SELECT, MVT::v2i16, Promote);
436 AddPromotedToType(ISD::SELECT, MVT::v2i16, MVT::i32);
437 setOperationAction(ISD::SELECT, MVT::v2f16, Promote);
438 AddPromotedToType(ISD::SELECT, MVT::v2f16, MVT::i32);
440 setOperationAction(ISD::ADD, MVT::v2i16, Legal);
441 setOperationAction(ISD::SUB, MVT::v2i16, Legal);
442 setOperationAction(ISD::MUL, MVT::v2i16, Legal);
443 setOperationAction(ISD::SHL, MVT::v2i16, Legal);
444 setOperationAction(ISD::SRL, MVT::v2i16, Legal);
445 setOperationAction(ISD::SRA, MVT::v2i16, Legal);
446 setOperationAction(ISD::SMIN, MVT::v2i16, Legal);
447 setOperationAction(ISD::UMIN, MVT::v2i16, Legal);
448 setOperationAction(ISD::SMAX, MVT::v2i16, Legal);
449 setOperationAction(ISD::UMAX, MVT::v2i16, Legal);
451 setOperationAction(ISD::FADD, MVT::v2f16, Legal);
452 setOperationAction(ISD::FNEG, MVT::v2f16, Legal);
453 setOperationAction(ISD::FMUL, MVT::v2f16, Legal);
454 setOperationAction(ISD::FMA, MVT::v2f16, Legal);
455 setOperationAction(ISD::FMINNUM, MVT::v2f16, Legal);
456 setOperationAction(ISD::FMAXNUM, MVT::v2f16, Legal);
458 // This isn't really legal, but this avoids the legalizer unrolling it (and
459 // allows matching fneg (fabs x) patterns)
460 setOperationAction(ISD::FABS, MVT::v2f16, Legal);
462 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
463 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
465 setOperationAction(ISD::ZERO_EXTEND, MVT::v2i32, Expand);
466 setOperationAction(ISD::SIGN_EXTEND, MVT::v2i32, Expand);
467 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
469 setOperationAction(ISD::SELECT, MVT::v2i16, Custom);
470 setOperationAction(ISD::SELECT, MVT::v2f16, Custom);
473 for (MVT VT : { MVT::v4i16, MVT::v4f16, MVT::v2i8, MVT::v4i8, MVT::v8i8 }) {
474 setOperationAction(ISD::SELECT, VT, Custom);
477 setTargetDAGCombine(ISD::ADD);
478 setTargetDAGCombine(ISD::ADDCARRY);
479 setTargetDAGCombine(ISD::SUB);
480 setTargetDAGCombine(ISD::SUBCARRY);
481 setTargetDAGCombine(ISD::FADD);
482 setTargetDAGCombine(ISD::FSUB);
483 setTargetDAGCombine(ISD::FMINNUM);
484 setTargetDAGCombine(ISD::FMAXNUM);
485 setTargetDAGCombine(ISD::SMIN);
486 setTargetDAGCombine(ISD::SMAX);
487 setTargetDAGCombine(ISD::UMIN);
488 setTargetDAGCombine(ISD::UMAX);
489 setTargetDAGCombine(ISD::SETCC);
490 setTargetDAGCombine(ISD::AND);
491 setTargetDAGCombine(ISD::OR);
492 setTargetDAGCombine(ISD::XOR);
493 setTargetDAGCombine(ISD::SINT_TO_FP);
494 setTargetDAGCombine(ISD::UINT_TO_FP);
495 setTargetDAGCombine(ISD::FCANONICALIZE);
496 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
497 setTargetDAGCombine(ISD::ZERO_EXTEND);
498 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
500 // All memory operations. Some folding on the pointer operand is done to help
501 // matching the constant offsets in the addressing modes.
502 setTargetDAGCombine(ISD::LOAD);
503 setTargetDAGCombine(ISD::STORE);
504 setTargetDAGCombine(ISD::ATOMIC_LOAD);
505 setTargetDAGCombine(ISD::ATOMIC_STORE);
506 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
507 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
508 setTargetDAGCombine(ISD::ATOMIC_SWAP);
509 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
510 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
511 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
512 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
513 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
514 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
515 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
516 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
517 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
518 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
520 setSchedulingPreference(Sched::RegPressure);
523 const SISubtarget *SITargetLowering::getSubtarget() const {
524 return static_cast<const SISubtarget *>(Subtarget);
527 //===----------------------------------------------------------------------===//
528 // TargetLowering queries
529 //===----------------------------------------------------------------------===//
531 bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &,
533 // SI has some legal vector types, but no legal vector operations. Say no
534 // shuffles are legal in order to prefer scalarizing some vector operations.
538 bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
540 unsigned IntrID) const {
542 case Intrinsic::amdgcn_atomic_inc:
543 case Intrinsic::amdgcn_atomic_dec: {
544 Info.opc = ISD::INTRINSIC_W_CHAIN;
545 Info.memVT = MVT::getVT(CI.getType());
546 Info.ptrVal = CI.getOperand(0);
549 const ConstantInt *Vol = dyn_cast<ConstantInt>(CI.getOperand(4));
550 Info.vol = !Vol || !Vol->isZero();
552 Info.writeMem = true;
560 bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
561 SmallVectorImpl<Value*> &Ops,
562 Type *&AccessTy) const {
563 switch (II->getIntrinsicID()) {
564 case Intrinsic::amdgcn_atomic_inc:
565 case Intrinsic::amdgcn_atomic_dec: {
566 Value *Ptr = II->getArgOperand(0);
567 AccessTy = II->getType();
576 bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
577 if (!Subtarget->hasFlatInstOffsets()) {
578 // Flat instructions do not have offsets, and only have the register
580 return AM.BaseOffs == 0 && AM.Scale == 0;
583 // GFX9 added a 13-bit signed offset. When using regular flat instructions,
584 // the sign bit is ignored and is treated as a 12-bit unsigned offset.
587 return isUInt<12>(AM.BaseOffs) && AM.Scale == 0;
590 bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
591 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
592 // additionally can do r + r + i with addr64. 32-bit has more addressing
593 // mode options. Depending on the resource constant, it can also do
594 // (i64 r0) + (i32 r1) * (i14 i).
596 // Private arrays end up using a scratch buffer most of the time, so also
597 // assume those use MUBUF instructions. Scratch loads / stores are currently
598 // implemented as mubuf instructions with offen bit set, so slightly
599 // different than the normal addr64.
600 if (!isUInt<12>(AM.BaseOffs))
603 // FIXME: Since we can split immediate into soffset and immediate offset,
604 // would it make sense to allow any immediate?
607 case 0: // r + i or just i, depending on HasBaseReg.
610 return true; // We have r + r or r + i.
617 // Allow 2 * r as r + r
618 // Or 2 * r + i is allowed as r + r + i.
620 default: // Don't allow n * r
625 bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
626 const AddrMode &AM, Type *Ty,
628 // No global is ever allowed as a base.
632 if (AS == AMDGPUASI.GLOBAL_ADDRESS) {
633 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
634 // Assume the we will use FLAT for all global memory accesses
636 // FIXME: This assumption is currently wrong. On VI we still use
637 // MUBUF instructions for the r + i addressing mode. As currently
638 // implemented, the MUBUF instructions only work on buffer < 4GB.
639 // It may be possible to support > 4GB buffers with MUBUF instructions,
640 // by setting the stride value in the resource descriptor which would
641 // increase the size limit to (stride * 4GB). However, this is risky,
642 // because it has never been validated.
643 return isLegalFlatAddressingMode(AM);
646 return isLegalMUBUFAddressingMode(AM);
647 } else if (AS == AMDGPUASI.CONSTANT_ADDRESS) {
648 // If the offset isn't a multiple of 4, it probably isn't going to be
649 // correctly aligned.
650 // FIXME: Can we get the real alignment here?
651 if (AM.BaseOffs % 4 != 0)
652 return isLegalMUBUFAddressingMode(AM);
654 // There are no SMRD extloads, so if we have to do a small type access we
655 // will use a MUBUF load.
656 // FIXME?: We also need to do this if unaligned, but we don't know the
658 if (DL.getTypeStoreSize(Ty) < 4)
659 return isLegalMUBUFAddressingMode(AM);
661 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) {
662 // SMRD instructions have an 8-bit, dword offset on SI.
663 if (!isUInt<8>(AM.BaseOffs / 4))
665 } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) {
666 // On CI+, this can also be a 32-bit literal constant offset. If it fits
667 // in 8-bits, it can use a smaller encoding.
668 if (!isUInt<32>(AM.BaseOffs / 4))
670 } else if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
671 // On VI, these use the SMEM format and the offset is 20-bit in bytes.
672 if (!isUInt<20>(AM.BaseOffs))
675 llvm_unreachable("unhandled generation");
677 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
680 if (AM.Scale == 1 && AM.HasBaseReg)
685 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
686 return isLegalMUBUFAddressingMode(AM);
687 } else if (AS == AMDGPUASI.LOCAL_ADDRESS ||
688 AS == AMDGPUASI.REGION_ADDRESS) {
689 // Basic, single offset DS instructions allow a 16-bit unsigned immediate
691 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
692 // an 8-bit dword offset but we don't know the alignment here.
693 if (!isUInt<16>(AM.BaseOffs))
696 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
699 if (AM.Scale == 1 && AM.HasBaseReg)
703 } else if (AS == AMDGPUASI.FLAT_ADDRESS ||
704 AS == AMDGPUASI.UNKNOWN_ADDRESS_SPACE) {
705 // For an unknown address space, this usually means that this is for some
706 // reason being used for pure arithmetic, and not based on some addressing
707 // computation. We don't have instructions that compute pointers with any
708 // addressing modes, so treat them as having no offset like flat
710 return isLegalFlatAddressingMode(AM);
712 llvm_unreachable("unhandled address space");
716 bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
717 const SelectionDAG &DAG) const {
718 if (AS == AMDGPUASI.GLOBAL_ADDRESS || AS == AMDGPUASI.FLAT_ADDRESS) {
719 return (MemVT.getSizeInBits() <= 4 * 32);
720 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
721 unsigned MaxPrivateBits = 8 * getSubtarget()->getMaxPrivateElementSize();
722 return (MemVT.getSizeInBits() <= MaxPrivateBits);
723 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
724 return (MemVT.getSizeInBits() <= 2 * 32);
729 bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
732 bool *IsFast) const {
736 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
737 // which isn't a simple VT.
738 // Until MVT is extended to handle this, simply check for the size and
739 // rely on the condition below: allow accesses if the size is a multiple of 4.
740 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
741 VT.getStoreSize() > 16)) {
745 if (AddrSpace == AMDGPUASI.LOCAL_ADDRESS ||
746 AddrSpace == AMDGPUASI.REGION_ADDRESS) {
747 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
748 // aligned, 8 byte access in a single operation using ds_read2/write2_b32
749 // with adjacent offsets.
750 bool AlignedBy4 = (Align % 4 == 0);
752 *IsFast = AlignedBy4;
757 // FIXME: We have to be conservative here and assume that flat operations
758 // will access scratch. If we had access to the IR function, then we
759 // could determine if any private memory was used in the function.
760 if (!Subtarget->hasUnalignedScratchAccess() &&
761 (AddrSpace == AMDGPUASI.PRIVATE_ADDRESS ||
762 AddrSpace == AMDGPUASI.FLAT_ADDRESS)) {
766 if (Subtarget->hasUnalignedBufferAccess()) {
767 // If we have an uniform constant load, it still requires using a slow
768 // buffer instruction if unaligned.
770 *IsFast = (AddrSpace == AMDGPUASI.CONSTANT_ADDRESS) ?
771 (Align % 4 == 0) : true;
777 // Smaller than dword value must be aligned.
778 if (VT.bitsLT(MVT::i32))
781 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
782 // byte-address are ignored, thus forcing Dword alignment.
783 // This applies to private, global, and constant memory.
787 return VT.bitsGT(MVT::i32) && Align % 4 == 0;
790 EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
791 unsigned SrcAlign, bool IsMemset,
794 MachineFunction &MF) const {
795 // FIXME: Should account for address space here.
797 // The default fallback uses the private pointer size as a guess for a type to
798 // use. Make sure we switch these to 64-bit accesses.
800 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
803 if (Size >= 8 && DstAlign >= 4)
810 static bool isFlatGlobalAddrSpace(unsigned AS, AMDGPUAS AMDGPUASI) {
811 return AS == AMDGPUASI.GLOBAL_ADDRESS ||
812 AS == AMDGPUASI.FLAT_ADDRESS ||
813 AS == AMDGPUASI.CONSTANT_ADDRESS;
816 bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
817 unsigned DestAS) const {
818 return isFlatGlobalAddrSpace(SrcAS, AMDGPUASI) &&
819 isFlatGlobalAddrSpace(DestAS, AMDGPUASI);
822 bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const {
823 const MemSDNode *MemNode = cast<MemSDNode>(N);
824 const Value *Ptr = MemNode->getMemOperand()->getValue();
825 const Instruction *I = dyn_cast<Instruction>(Ptr);
826 return I && I->getMetadata("amdgpu.noclobber");
829 bool SITargetLowering::isCheapAddrSpaceCast(unsigned SrcAS,
830 unsigned DestAS) const {
831 // Flat -> private/local is a simple truncate.
832 // Flat -> global is no-op
833 if (SrcAS == AMDGPUASI.FLAT_ADDRESS)
836 return isNoopAddrSpaceCast(SrcAS, DestAS);
839 bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
840 const MemSDNode *MemNode = cast<MemSDNode>(N);
842 return AMDGPU::isUniformMMO(MemNode->getMemOperand());
845 TargetLoweringBase::LegalizeTypeAction
846 SITargetLowering::getPreferredVectorAction(EVT VT) const {
847 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
848 return TypeSplitVector;
850 return TargetLoweringBase::getPreferredVectorAction(VT);
853 bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
855 // FIXME: Could be smarter if called for vector constants.
859 bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
860 if (Subtarget->has16BitInsts() && VT == MVT::i16) {
865 // These operations are done with 32-bit instructions anyway.
877 // SimplifySetCC uses this function to determine whether or not it should
878 // create setcc with i1 operands. We don't have instructions for i1 setcc.
879 if (VT == MVT::i1 && Op == ISD::SETCC)
882 return TargetLowering::isTypeDesirableForOp(Op, VT);
885 SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
888 uint64_t Offset) const {
889 const DataLayout &DL = DAG.getDataLayout();
890 MachineFunction &MF = DAG.getMachineFunction();
891 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
892 unsigned InputPtrReg = TRI->getPreloadedValue(MF,
893 SIRegisterInfo::KERNARG_SEGMENT_PTR);
895 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
896 MVT PtrVT = getPointerTy(DL, AMDGPUASI.CONSTANT_ADDRESS);
897 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
898 MRI.getLiveInVirtReg(InputPtrReg), PtrVT);
899 return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
900 DAG.getConstant(Offset, SL, PtrVT));
903 SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
904 const SDLoc &SL, SDValue Val,
906 const ISD::InputArg *Arg) const {
907 if (Arg && (Arg->Flags.isSExt() || Arg->Flags.isZExt()) &&
909 unsigned Opc = Arg->Flags.isZExt() ? ISD::AssertZext : ISD::AssertSext;
910 Val = DAG.getNode(Opc, SL, MemVT, Val, DAG.getValueType(VT));
913 if (MemVT.isFloatingPoint())
914 Val = getFPExtOrFPTrunc(DAG, Val, SL, VT);
916 Val = DAG.getSExtOrTrunc(Val, SL, VT);
918 Val = DAG.getZExtOrTrunc(Val, SL, VT);
923 SDValue SITargetLowering::lowerKernargMemParameter(
924 SelectionDAG &DAG, EVT VT, EVT MemVT,
925 const SDLoc &SL, SDValue Chain,
926 uint64_t Offset, bool Signed,
927 const ISD::InputArg *Arg) const {
928 const DataLayout &DL = DAG.getDataLayout();
929 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
930 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
931 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
933 unsigned Align = DL.getABITypeAlignment(Ty);
935 SDValue Ptr = lowerKernArgParameterPtr(DAG, SL, Chain, Offset);
936 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
937 MachineMemOperand::MONonTemporal |
938 MachineMemOperand::MODereferenceable |
939 MachineMemOperand::MOInvariant);
941 SDValue Val = convertArgType(DAG, VT, MemVT, SL, Load, Signed, Arg);
942 return DAG.getMergeValues({ Val, Load.getValue(1) }, SL);
945 SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
946 const SDLoc &SL, SDValue Chain,
947 const ISD::InputArg &Arg) const {
948 MachineFunction &MF = DAG.getMachineFunction();
949 MachineFrameInfo &MFI = MF.getFrameInfo();
951 if (Arg.Flags.isByVal()) {
952 unsigned Size = Arg.Flags.getByValSize();
953 int FrameIdx = MFI.CreateFixedObject(Size, VA.getLocMemOffset(), false);
954 return DAG.getFrameIndex(FrameIdx, MVT::i32);
957 unsigned ArgOffset = VA.getLocMemOffset();
958 unsigned ArgSize = VA.getValVT().getStoreSize();
960 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, true);
962 // Create load nodes to retrieve arguments from the stack.
963 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
966 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT)
967 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD;
968 MVT MemVT = VA.getValVT();
970 switch (VA.getLocInfo()) {
973 case CCValAssign::BCvt:
974 MemVT = VA.getLocVT();
976 case CCValAssign::SExt:
977 ExtType = ISD::SEXTLOAD;
979 case CCValAssign::ZExt:
980 ExtType = ISD::ZEXTLOAD;
982 case CCValAssign::AExt:
983 ExtType = ISD::EXTLOAD;
987 ArgValue = DAG.getExtLoad(
988 ExtType, SL, VA.getLocVT(), Chain, FIN,
989 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
994 static void processShaderInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
995 CallingConv::ID CallConv,
996 ArrayRef<ISD::InputArg> Ins,
999 SIMachineFunctionInfo *Info) {
1000 for (unsigned I = 0, E = Ins.size(), PSInputNum = 0; I != E; ++I) {
1001 const ISD::InputArg &Arg = Ins[I];
1003 // First check if it's a PS input addr.
1004 if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() &&
1005 !Arg.Flags.isByVal() && PSInputNum <= 15) {
1007 if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) {
1008 // We can safely skip PS inputs.
1014 Info->markPSInputAllocated(PSInputNum);
1016 Info->markPSInputEnabled(PSInputNum);
1021 // Second split vertices into their elements.
1022 if (Arg.VT.isVector()) {
1023 ISD::InputArg NewArg = Arg;
1024 NewArg.Flags.setSplit();
1025 NewArg.VT = Arg.VT.getVectorElementType();
1027 // We REALLY want the ORIGINAL number of vertex elements here, e.g. a
1028 // three or five element vertex only needs three or five registers,
1029 // NOT four or eight.
1030 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
1031 unsigned NumElements = ParamType->getVectorNumElements();
1033 for (unsigned J = 0; J != NumElements; ++J) {
1034 Splits.push_back(NewArg);
1035 NewArg.PartOffset += NewArg.VT.getStoreSize();
1038 Splits.push_back(Arg);
1043 // Allocate special inputs passed in VGPRs.
1044 static void allocateSpecialInputVGPRs(CCState &CCInfo,
1045 MachineFunction &MF,
1046 const SIRegisterInfo &TRI,
1047 SIMachineFunctionInfo &Info) {
1048 if (Info.hasWorkItemIDX()) {
1049 unsigned Reg = TRI.getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X);
1050 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1051 CCInfo.AllocateReg(Reg);
1054 if (Info.hasWorkItemIDY()) {
1055 unsigned Reg = TRI.getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y);
1056 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1057 CCInfo.AllocateReg(Reg);
1060 if (Info.hasWorkItemIDZ()) {
1061 unsigned Reg = TRI.getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z);
1062 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1063 CCInfo.AllocateReg(Reg);
1067 // Allocate special inputs passed in user SGPRs.
1068 static void allocateHSAUserSGPRs(CCState &CCInfo,
1069 MachineFunction &MF,
1070 const SIRegisterInfo &TRI,
1071 SIMachineFunctionInfo &Info) {
1072 if (Info.hasImplicitBufferPtr()) {
1073 unsigned ImplicitBufferPtrReg = Info.addImplicitBufferPtr(TRI);
1074 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
1075 CCInfo.AllocateReg(ImplicitBufferPtrReg);
1078 // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
1079 if (Info.hasPrivateSegmentBuffer()) {
1080 unsigned PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
1081 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
1082 CCInfo.AllocateReg(PrivateSegmentBufferReg);
1085 if (Info.hasDispatchPtr()) {
1086 unsigned DispatchPtrReg = Info.addDispatchPtr(TRI);
1087 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
1088 CCInfo.AllocateReg(DispatchPtrReg);
1091 if (Info.hasQueuePtr()) {
1092 unsigned QueuePtrReg = Info.addQueuePtr(TRI);
1093 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
1094 CCInfo.AllocateReg(QueuePtrReg);
1097 if (Info.hasKernargSegmentPtr()) {
1098 unsigned InputPtrReg = Info.addKernargSegmentPtr(TRI);
1099 MF.addLiveIn(InputPtrReg, &AMDGPU::SGPR_64RegClass);
1100 CCInfo.AllocateReg(InputPtrReg);
1103 if (Info.hasDispatchID()) {
1104 unsigned DispatchIDReg = Info.addDispatchID(TRI);
1105 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
1106 CCInfo.AllocateReg(DispatchIDReg);
1109 if (Info.hasFlatScratchInit()) {
1110 unsigned FlatScratchInitReg = Info.addFlatScratchInit(TRI);
1111 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
1112 CCInfo.AllocateReg(FlatScratchInitReg);
1115 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
1116 // these from the dispatch pointer.
1119 // Allocate special input registers that are initialized per-wave.
1120 static void allocateSystemSGPRs(CCState &CCInfo,
1121 MachineFunction &MF,
1122 SIMachineFunctionInfo &Info,
1123 CallingConv::ID CallConv,
1125 if (Info.hasWorkGroupIDX()) {
1126 unsigned Reg = Info.addWorkGroupIDX();
1127 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1128 CCInfo.AllocateReg(Reg);
1131 if (Info.hasWorkGroupIDY()) {
1132 unsigned Reg = Info.addWorkGroupIDY();
1133 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1134 CCInfo.AllocateReg(Reg);
1137 if (Info.hasWorkGroupIDZ()) {
1138 unsigned Reg = Info.addWorkGroupIDZ();
1139 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1140 CCInfo.AllocateReg(Reg);
1143 if (Info.hasWorkGroupInfo()) {
1144 unsigned Reg = Info.addWorkGroupInfo();
1145 MF.addLiveIn(Reg, &AMDGPU::SReg_32_XM0RegClass);
1146 CCInfo.AllocateReg(Reg);
1149 if (Info.hasPrivateSegmentWaveByteOffset()) {
1150 // Scratch wave offset passed in system SGPR.
1151 unsigned PrivateSegmentWaveByteOffsetReg;
1154 PrivateSegmentWaveByteOffsetReg =
1155 Info.getPrivateSegmentWaveByteOffsetSystemSGPR();
1157 // This is true if the scratch wave byte offset doesn't have a fixed
1159 if (PrivateSegmentWaveByteOffsetReg == AMDGPU::NoRegister) {
1160 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
1161 Info.setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
1164 PrivateSegmentWaveByteOffsetReg = Info.addPrivateSegmentWaveByteOffset();
1166 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
1167 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
1171 static void reservePrivateMemoryRegs(const TargetMachine &TM,
1172 MachineFunction &MF,
1173 const SIRegisterInfo &TRI,
1174 SIMachineFunctionInfo &Info,
1176 // Now that we've figured out where the scratch register inputs are, see if
1177 // should reserve the arguments and use them directly.
1178 MachineFrameInfo &MFI = MF.getFrameInfo();
1179 bool HasStackObjects = MFI.hasStackObjects();
1181 // Record that we know we have non-spill stack objects so we don't need to
1182 // check all stack objects later.
1183 if (HasStackObjects)
1184 Info.setHasNonSpillStackObjects(true);
1186 // Everything live out of a block is spilled with fast regalloc, so it's
1187 // almost certain that spilling will be required.
1188 if (TM.getOptLevel() == CodeGenOpt::None)
1189 HasStackObjects = true;
1191 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1192 if (ST.isAmdCodeObjectV2(MF)) {
1193 if (HasStackObjects) {
1194 // If we have stack objects, we unquestionably need the private buffer
1195 // resource. For the Code Object V2 ABI, this will be the first 4 user
1196 // SGPR inputs. We can reserve those and use them directly.
1198 unsigned PrivateSegmentBufferReg = TRI.getPreloadedValue(
1199 MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER);
1200 Info.setScratchRSrcReg(PrivateSegmentBufferReg);
1202 unsigned PrivateSegmentWaveByteOffsetReg = TRI.getPreloadedValue(
1203 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1204 Info.setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg);
1206 unsigned ReservedBufferReg
1207 = TRI.reservedPrivateSegmentBufferReg(MF);
1208 unsigned ReservedOffsetReg
1209 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1211 // We tentatively reserve the last registers (skipping the last two
1212 // which may contain VCC). After register allocation, we'll replace
1213 // these with the ones immediately after those which were really
1214 // allocated. In the prologue copies will be inserted from the argument
1215 // to these reserved registers.
1216 Info.setScratchRSrcReg(ReservedBufferReg);
1217 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1220 unsigned ReservedBufferReg = TRI.reservedPrivateSegmentBufferReg(MF);
1222 // Without HSA, relocations are used for the scratch pointer and the
1223 // buffer resource setup is always inserted in the prologue. Scratch wave
1224 // offset is still in an input SGPR.
1225 Info.setScratchRSrcReg(ReservedBufferReg);
1227 if (HasStackObjects) {
1228 unsigned ScratchWaveOffsetReg = TRI.getPreloadedValue(
1229 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1230 Info.setScratchWaveOffsetReg(ScratchWaveOffsetReg);
1232 unsigned ReservedOffsetReg
1233 = TRI.reservedPrivateSegmentWaveByteOffsetReg(MF);
1234 Info.setScratchWaveOffsetReg(ReservedOffsetReg);
1239 unsigned ReservedStackPtrOffsetReg = TRI.reservedStackPtrOffsetReg(MF);
1240 Info.setStackPtrOffsetReg(ReservedStackPtrOffsetReg);
1242 assert(Info.getStackPtrOffsetReg() != Info.getFrameOffsetReg());
1243 assert(!TRI.isSubRegister(Info.getScratchRSrcReg(),
1244 Info.getStackPtrOffsetReg()));
1248 SDValue SITargetLowering::LowerFormalArguments(
1249 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1250 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1251 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1252 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1254 MachineFunction &MF = DAG.getMachineFunction();
1255 FunctionType *FType = MF.getFunction()->getFunctionType();
1256 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1257 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
1259 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
1260 const Function *Fn = MF.getFunction();
1261 DiagnosticInfoUnsupported NoGraphicsHSA(
1262 *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
1263 DAG.getContext()->diagnose(NoGraphicsHSA);
1264 return DAG.getEntryNode();
1267 // Create stack objects that are used for emitting debugger prologue if
1268 // "amdgpu-debugger-emit-prologue" attribute was specified.
1269 if (ST.debuggerEmitPrologue())
1270 createDebuggerPrologueStackObjects(MF);
1272 SmallVector<ISD::InputArg, 16> Splits;
1273 SmallVector<CCValAssign, 16> ArgLocs;
1274 BitVector Skipped(Ins.size());
1275 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1278 bool IsShader = AMDGPU::isShader(CallConv);
1279 bool IsKernel = AMDGPU::isKernel(CallConv);
1280 bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CallConv);
1283 processShaderInputArgs(Splits, CallConv, Ins, Skipped, FType, Info);
1285 // At least one interpolation mode must be enabled or else the GPU will
1288 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
1289 // set PSInputAddr, the user wants to enable some bits after the compilation
1290 // based on run-time states. Since we can't know what the final PSInputEna
1291 // will look like, so we shouldn't do anything here and the user should take
1292 // responsibility for the correct programming.
1294 // Otherwise, the following restrictions apply:
1295 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
1296 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
1298 if (CallConv == CallingConv::AMDGPU_PS &&
1299 ((Info->getPSInputAddr() & 0x7F) == 0 ||
1300 ((Info->getPSInputAddr() & 0xF) == 0 &&
1301 Info->isPSInputAllocated(11)))) {
1302 CCInfo.AllocateReg(AMDGPU::VGPR0);
1303 CCInfo.AllocateReg(AMDGPU::VGPR1);
1304 Info->markPSInputAllocated(0);
1305 Info->markPSInputEnabled(0);
1308 assert(!Info->hasDispatchPtr() &&
1309 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
1310 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
1311 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
1312 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
1313 !Info->hasWorkItemIDZ());
1314 } else if (IsKernel) {
1315 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
1317 Splits.append(Ins.begin(), Ins.end());
1321 allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
1322 allocateHSAUserSGPRs(CCInfo, MF, *TRI, *Info);
1326 analyzeFormalArgumentsCompute(CCInfo, Ins);
1328 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, isVarArg);
1329 CCInfo.AnalyzeFormalArguments(Splits, AssignFn);
1332 SmallVector<SDValue, 16> Chains;
1334 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
1335 const ISD::InputArg &Arg = Ins[i];
1337 InVals.push_back(DAG.getUNDEF(Arg.VT));
1341 CCValAssign &VA = ArgLocs[ArgIdx++];
1342 MVT VT = VA.getLocVT();
1344 if (IsEntryFunc && VA.isMemLoc()) {
1346 EVT MemVT = VA.getLocVT();
1348 const uint64_t Offset = Subtarget->getExplicitKernelArgOffset(MF) +
1349 VA.getLocMemOffset();
1350 Info->setABIArgOffset(Offset + MemVT.getStoreSize());
1352 // The first 36 bytes of the input buffer contains information about
1353 // thread group and global sizes.
1354 SDValue Arg = lowerKernargMemParameter(
1355 DAG, VT, MemVT, DL, Chain, Offset, Ins[i].Flags.isSExt(), &Ins[i]);
1356 Chains.push_back(Arg.getValue(1));
1359 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
1360 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
1361 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
1362 // On SI local pointers are just offsets into LDS, so they are always
1363 // less than 16-bits. On CI and newer they could potentially be
1364 // real pointers, so we can't guarantee their size.
1365 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
1366 DAG.getValueType(MVT::i16));
1369 InVals.push_back(Arg);
1371 } else if (!IsEntryFunc && VA.isMemLoc()) {
1372 SDValue Val = lowerStackParameter(DAG, VA, DL, Chain, Arg);
1373 InVals.push_back(Val);
1374 if (!Arg.Flags.isByVal())
1375 Chains.push_back(Val.getValue(1));
1379 assert(VA.isRegLoc() && "Parameter must be in a register!");
1381 unsigned Reg = VA.getLocReg();
1382 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
1384 Reg = MF.addLiveIn(Reg, RC);
1385 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
1387 if (IsShader && Arg.VT.isVector()) {
1388 // Build a vector from the registers
1389 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
1390 unsigned NumElements = ParamType->getVectorNumElements();
1392 SmallVector<SDValue, 4> Regs;
1393 Regs.push_back(Val);
1394 for (unsigned j = 1; j != NumElements; ++j) {
1395 Reg = ArgLocs[ArgIdx++].getLocReg();
1396 Reg = MF.addLiveIn(Reg, RC);
1398 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT);
1399 Regs.push_back(Copy);
1402 // Fill up the missing vector elements
1403 NumElements = Arg.VT.getVectorNumElements() - NumElements;
1404 Regs.append(NumElements, DAG.getUNDEF(VT));
1406 InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs));
1410 InVals.push_back(Val);
1413 const MachineFrameInfo &FrameInfo = MF.getFrameInfo();
1415 // TODO: Could maybe omit SP if only tail calls?
1416 bool NeedSP = FrameInfo.hasCalls() || FrameInfo.hasVarSizedObjects();
1418 // Start adding system SGPRs.
1420 allocateSystemSGPRs(CCInfo, MF, *Info, CallConv, IsShader);
1421 reservePrivateMemoryRegs(getTargetMachine(), MF, *TRI, *Info, NeedSP);
1423 CCInfo.AllocateReg(Info->getScratchRSrcReg());
1424 CCInfo.AllocateReg(Info->getScratchWaveOffsetReg());
1425 CCInfo.AllocateReg(Info->getFrameOffsetReg());
1428 unsigned StackPtrReg = findFirstFreeSGPR(CCInfo);
1429 CCInfo.AllocateReg(StackPtrReg);
1430 Info->setStackPtrOffsetReg(StackPtrReg);
1434 return Chains.empty() ? Chain :
1435 DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
1438 // TODO: If return values can't fit in registers, we should return as many as
1439 // possible in registers before passing on stack.
1440 bool SITargetLowering::CanLowerReturn(
1441 CallingConv::ID CallConv,
1442 MachineFunction &MF, bool IsVarArg,
1443 const SmallVectorImpl<ISD::OutputArg> &Outs,
1444 LLVMContext &Context) const {
1445 // Replacing returns with sret/stack usage doesn't make sense for shaders.
1446 // FIXME: Also sort of a workaround for custom vector splitting in LowerReturn
1447 // for shaders. Vector types should be explicitly handled by CC.
1448 if (AMDGPU::isEntryFunctionCC(CallConv))
1451 SmallVector<CCValAssign, 16> RVLocs;
1452 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
1453 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, IsVarArg));
1457 SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1459 const SmallVectorImpl<ISD::OutputArg> &Outs,
1460 const SmallVectorImpl<SDValue> &OutVals,
1461 const SDLoc &DL, SelectionDAG &DAG) const {
1462 MachineFunction &MF = DAG.getMachineFunction();
1463 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1465 if (AMDGPU::isKernel(CallConv)) {
1466 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
1470 bool IsShader = AMDGPU::isShader(CallConv);
1472 Info->setIfReturnsVoid(Outs.size() == 0);
1473 bool IsWaveEnd = Info->returnsVoid() && IsShader;
1475 SmallVector<ISD::OutputArg, 48> Splits;
1476 SmallVector<SDValue, 48> SplitVals;
1478 // Split vectors into their elements.
1479 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1480 const ISD::OutputArg &Out = Outs[i];
1482 if (IsShader && Out.VT.isVector()) {
1483 MVT VT = Out.VT.getVectorElementType();
1484 ISD::OutputArg NewOut = Out;
1485 NewOut.Flags.setSplit();
1488 // We want the original number of vector elements here, e.g.
1489 // three or five, not four or eight.
1490 unsigned NumElements = Out.ArgVT.getVectorNumElements();
1492 for (unsigned j = 0; j != NumElements; ++j) {
1493 SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i],
1494 DAG.getConstant(j, DL, MVT::i32));
1495 SplitVals.push_back(Elem);
1496 Splits.push_back(NewOut);
1497 NewOut.PartOffset += NewOut.VT.getStoreSize();
1500 SplitVals.push_back(OutVals[i]);
1501 Splits.push_back(Out);
1505 // CCValAssign - represent the assignment of the return value to a location.
1506 SmallVector<CCValAssign, 48> RVLocs;
1508 // CCState - Info about the registers and stack slots.
1509 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1512 // Analyze outgoing return values.
1513 CCInfo.AnalyzeReturn(Splits, CCAssignFnForReturn(CallConv, isVarArg));
1516 SmallVector<SDValue, 48> RetOps;
1517 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
1519 // Add return address for callable functions.
1520 if (!Info->isEntryFunction()) {
1521 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
1522 SDValue ReturnAddrReg = CreateLiveInRegister(
1523 DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
1525 // FIXME: Should be able to use a vreg here, but need a way to prevent it
1526 // from being allcoated to a CSR.
1528 SDValue PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
1531 Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, Flag);
1532 Flag = Chain.getValue(1);
1534 RetOps.push_back(PhysReturnAddrReg);
1537 // Copy the result values into the output registers.
1538 for (unsigned i = 0, realRVLocIdx = 0;
1540 ++i, ++realRVLocIdx) {
1541 CCValAssign &VA = RVLocs[i];
1542 assert(VA.isRegLoc() && "Can only return in registers!");
1543 // TODO: Partially return in registers if return values don't fit.
1545 SDValue Arg = SplitVals[realRVLocIdx];
1547 // Copied from other backends.
1548 switch (VA.getLocInfo()) {
1549 case CCValAssign::Full:
1551 case CCValAssign::BCvt:
1552 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1554 case CCValAssign::SExt:
1555 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1557 case CCValAssign::ZExt:
1558 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1560 case CCValAssign::AExt:
1561 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1564 llvm_unreachable("Unknown loc info!");
1567 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
1568 Flag = Chain.getValue(1);
1569 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1572 // FIXME: Does sret work properly?
1574 // Update chain and glue.
1577 RetOps.push_back(Flag);
1579 unsigned Opc = AMDGPUISD::ENDPGM;
1581 Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
1582 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
1585 unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
1586 SelectionDAG &DAG) const {
1587 unsigned Reg = StringSwitch<unsigned>(RegName)
1588 .Case("m0", AMDGPU::M0)
1589 .Case("exec", AMDGPU::EXEC)
1590 .Case("exec_lo", AMDGPU::EXEC_LO)
1591 .Case("exec_hi", AMDGPU::EXEC_HI)
1592 .Case("flat_scratch", AMDGPU::FLAT_SCR)
1593 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1594 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
1595 .Default(AMDGPU::NoRegister);
1597 if (Reg == AMDGPU::NoRegister) {
1598 report_fatal_error(Twine("invalid register name \""
1599 + StringRef(RegName) + "\"."));
1603 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
1604 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
1605 report_fatal_error(Twine("invalid register \""
1606 + StringRef(RegName) + "\" for subtarget."));
1611 case AMDGPU::EXEC_LO:
1612 case AMDGPU::EXEC_HI:
1613 case AMDGPU::FLAT_SCR_LO:
1614 case AMDGPU::FLAT_SCR_HI:
1615 if (VT.getSizeInBits() == 32)
1619 case AMDGPU::FLAT_SCR:
1620 if (VT.getSizeInBits() == 64)
1624 llvm_unreachable("missing register type checking");
1627 report_fatal_error(Twine("invalid type for register \""
1628 + StringRef(RegName) + "\"."));
1631 // If kill is not the last instruction, split the block so kill is always a
1632 // proper terminator.
1633 MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
1634 MachineBasicBlock *BB) const {
1635 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
1637 MachineBasicBlock::iterator SplitPoint(&MI);
1640 if (SplitPoint == BB->end()) {
1641 // Don't bother with a new block.
1642 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR));
1646 MachineFunction *MF = BB->getParent();
1647 MachineBasicBlock *SplitBB
1648 = MF->CreateMachineBasicBlock(BB->getBasicBlock());
1650 MF->insert(++MachineFunction::iterator(BB), SplitBB);
1651 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
1653 SplitBB->transferSuccessorsAndUpdatePHIs(BB);
1654 BB->addSuccessor(SplitBB);
1656 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR));
1660 // Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
1661 // wavefront. If the value is uniform and just happens to be in a VGPR, this
1662 // will only do one iteration. In the worst case, this will loop 64 times.
1664 // TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
1665 static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
1666 const SIInstrInfo *TII,
1667 MachineRegisterInfo &MRI,
1668 MachineBasicBlock &OrigBB,
1669 MachineBasicBlock &LoopBB,
1671 const MachineOperand &IdxReg,
1675 unsigned InitSaveExecReg,
1677 bool UseGPRIdxMode) {
1678 MachineBasicBlock::iterator I = LoopBB.begin();
1680 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1681 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1682 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1683 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1685 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
1691 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
1692 .addReg(InitSaveExecReg)
1697 // Read the next variant <- also loop target.
1698 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
1699 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
1701 // Compare the just read M0 value to all possible Idx values.
1702 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
1703 .addReg(CurrentIdxReg)
1704 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
1706 if (UseGPRIdxMode) {
1709 IdxReg = CurrentIdxReg;
1711 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1712 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
1713 .addReg(CurrentIdxReg, RegState::Kill)
1717 MachineInstr *SetIdx =
1718 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_IDX))
1719 .addReg(IdxReg, RegState::Kill);
1720 SetIdx->getOperand(2).setIsUndef();
1722 // Move index from VCC into M0
1724 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1725 .addReg(CurrentIdxReg, RegState::Kill);
1727 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
1728 .addReg(CurrentIdxReg, RegState::Kill)
1733 // Update EXEC, save the original EXEC value to VCC.
1734 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec)
1735 .addReg(CondReg, RegState::Kill);
1737 MRI.setSimpleHint(NewExec, CondReg);
1739 // Update EXEC, switch all done bits to 0 and all todo bits to 1.
1740 MachineInstr *InsertPt =
1741 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
1742 .addReg(AMDGPU::EXEC)
1745 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
1748 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
1749 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
1752 return InsertPt->getIterator();
1755 // This has slightly sub-optimal regalloc when the source vector is killed by
1756 // the read. The register allocator does not understand that the kill is
1757 // per-workitem, so is kept alive for the whole loop so we end up not re-using a
1758 // subregister from it, using 1 more VGPR than necessary. This was saved when
1759 // this was expanded after register allocation.
1760 static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
1761 MachineBasicBlock &MBB,
1763 unsigned InitResultReg,
1766 bool UseGPRIdxMode) {
1767 MachineFunction *MF = MBB.getParent();
1768 MachineRegisterInfo &MRI = MF->getRegInfo();
1769 const DebugLoc &DL = MI.getDebugLoc();
1770 MachineBasicBlock::iterator I(&MI);
1772 unsigned DstReg = MI.getOperand(0).getReg();
1773 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1774 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1776 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
1778 // Save the EXEC mask
1779 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec)
1780 .addReg(AMDGPU::EXEC);
1782 // To insert the loop we need to split the block. Move everything after this
1783 // point to a new block, and insert a new empty block between the two.
1784 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
1785 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
1786 MachineFunction::iterator MBBI(MBB);
1789 MF->insert(MBBI, LoopBB);
1790 MF->insert(MBBI, RemainderBB);
1792 LoopBB->addSuccessor(LoopBB);
1793 LoopBB->addSuccessor(RemainderBB);
1795 // Move the rest of the block into a new block.
1796 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
1797 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
1799 MBB.addSuccessor(LoopBB);
1801 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
1803 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
1804 InitResultReg, DstReg, PhiReg, TmpExec,
1805 Offset, UseGPRIdxMode);
1807 MachineBasicBlock::iterator First = RemainderBB->begin();
1808 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
1814 // Returns subreg index, offset
1815 static std::pair<unsigned, int>
1816 computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
1817 const TargetRegisterClass *SuperRC,
1820 int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
1822 // Skip out of bounds offsets, or else we would end up using an undefined
1824 if (Offset >= NumElts || Offset < 0)
1825 return std::make_pair(AMDGPU::sub0, Offset);
1827 return std::make_pair(AMDGPU::sub0 + Offset, 0);
1830 // Return true if the index is an SGPR and was set.
1831 static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
1832 MachineRegisterInfo &MRI,
1836 bool IsIndirectSrc) {
1837 MachineBasicBlock *MBB = MI.getParent();
1838 const DebugLoc &DL = MI.getDebugLoc();
1839 MachineBasicBlock::iterator I(&MI);
1841 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
1842 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
1844 assert(Idx->getReg() != AMDGPU::NoRegister);
1846 if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
1849 if (UseGPRIdxMode) {
1850 unsigned IdxMode = IsIndirectSrc ?
1851 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE;
1853 MachineInstr *SetOn =
1854 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
1858 SetOn->getOperand(3).setIsUndef();
1860 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
1861 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
1864 MachineInstr *SetOn =
1865 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
1866 .addReg(Tmp, RegState::Kill)
1869 SetOn->getOperand(3).setIsUndef();
1876 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1879 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
1887 // Control flow needs to be inserted if indexing with a VGPR.
1888 static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
1889 MachineBasicBlock &MBB,
1890 const SISubtarget &ST) {
1891 const SIInstrInfo *TII = ST.getInstrInfo();
1892 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1893 MachineFunction *MF = MBB.getParent();
1894 MachineRegisterInfo &MRI = MF->getRegInfo();
1896 unsigned Dst = MI.getOperand(0).getReg();
1897 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
1898 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
1900 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
1903 std::tie(SubReg, Offset)
1904 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
1906 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
1908 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
1909 MachineBasicBlock::iterator I(&MI);
1910 const DebugLoc &DL = MI.getDebugLoc();
1912 if (UseGPRIdxMode) {
1913 // TODO: Look at the uses to avoid the copy. This may require rescheduling
1914 // to avoid interfering with other uses, so probably requires a new
1915 // optimization pass.
1916 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
1917 .addReg(SrcReg, RegState::Undef, SubReg)
1918 .addReg(SrcReg, RegState::Implicit)
1919 .addReg(AMDGPU::M0, RegState::Implicit);
1920 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
1922 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
1923 .addReg(SrcReg, RegState::Undef, SubReg)
1924 .addReg(SrcReg, RegState::Implicit);
1927 MI.eraseFromParent();
1932 const DebugLoc &DL = MI.getDebugLoc();
1933 MachineBasicBlock::iterator I(&MI);
1935 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1936 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1938 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
1940 if (UseGPRIdxMode) {
1941 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
1942 .addImm(0) // Reset inside loop.
1943 .addImm(VGPRIndexMode::SRC0_ENABLE);
1944 SetOn->getOperand(3).setIsUndef();
1946 // Disable again after the loop.
1947 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
1950 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, UseGPRIdxMode);
1951 MachineBasicBlock *LoopBB = InsPt->getParent();
1953 if (UseGPRIdxMode) {
1954 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
1955 .addReg(SrcReg, RegState::Undef, SubReg)
1956 .addReg(SrcReg, RegState::Implicit)
1957 .addReg(AMDGPU::M0, RegState::Implicit);
1959 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
1960 .addReg(SrcReg, RegState::Undef, SubReg)
1961 .addReg(SrcReg, RegState::Implicit);
1964 MI.eraseFromParent();
1969 static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
1970 const TargetRegisterClass *VecRC) {
1971 switch (TRI.getRegSizeInBits(*VecRC)) {
1973 return AMDGPU::V_MOVRELD_B32_V1;
1975 return AMDGPU::V_MOVRELD_B32_V2;
1976 case 128: // 16 bytes
1977 return AMDGPU::V_MOVRELD_B32_V4;
1978 case 256: // 32 bytes
1979 return AMDGPU::V_MOVRELD_B32_V8;
1980 case 512: // 64 bytes
1981 return AMDGPU::V_MOVRELD_B32_V16;
1983 llvm_unreachable("unsupported size for MOVRELD pseudos");
1987 static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
1988 MachineBasicBlock &MBB,
1989 const SISubtarget &ST) {
1990 const SIInstrInfo *TII = ST.getInstrInfo();
1991 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1992 MachineFunction *MF = MBB.getParent();
1993 MachineRegisterInfo &MRI = MF->getRegInfo();
1995 unsigned Dst = MI.getOperand(0).getReg();
1996 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
1997 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
1998 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
1999 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
2000 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
2002 // This can be an immediate, but will be folded later.
2003 assert(Val->getReg());
2006 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
2009 bool UseGPRIdxMode = ST.useVGPRIndexMode(EnableVGPRIndexMode);
2011 if (Idx->getReg() == AMDGPU::NoRegister) {
2012 MachineBasicBlock::iterator I(&MI);
2013 const DebugLoc &DL = MI.getDebugLoc();
2015 assert(Offset == 0);
2017 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
2022 MI.eraseFromParent();
2026 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
2027 MachineBasicBlock::iterator I(&MI);
2028 const DebugLoc &DL = MI.getDebugLoc();
2030 if (UseGPRIdxMode) {
2031 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
2032 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
2034 .addReg(Dst, RegState::ImplicitDefine)
2035 .addReg(SrcVec->getReg(), RegState::Implicit)
2036 .addReg(AMDGPU::M0, RegState::Implicit);
2038 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
2040 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
2042 BuildMI(MBB, I, DL, MovRelDesc)
2043 .addReg(Dst, RegState::Define)
2044 .addReg(SrcVec->getReg())
2046 .addImm(SubReg - AMDGPU::sub0);
2049 MI.eraseFromParent();
2054 MRI.clearKillFlags(Val->getReg());
2056 const DebugLoc &DL = MI.getDebugLoc();
2058 if (UseGPRIdxMode) {
2059 MachineBasicBlock::iterator I(&MI);
2061 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
2062 .addImm(0) // Reset inside loop.
2063 .addImm(VGPRIndexMode::DST_ENABLE);
2064 SetOn->getOperand(3).setIsUndef();
2066 // Disable again after the loop.
2067 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
2070 unsigned PhiReg = MRI.createVirtualRegister(VecRC);
2072 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
2073 Offset, UseGPRIdxMode);
2074 MachineBasicBlock *LoopBB = InsPt->getParent();
2076 if (UseGPRIdxMode) {
2077 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
2078 .addReg(PhiReg, RegState::Undef, SubReg) // vdst
2080 .addReg(Dst, RegState::ImplicitDefine)
2081 .addReg(PhiReg, RegState::Implicit)
2082 .addReg(AMDGPU::M0, RegState::Implicit);
2084 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
2086 BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
2087 .addReg(Dst, RegState::Define)
2090 .addImm(SubReg - AMDGPU::sub0);
2093 MI.eraseFromParent();
2098 MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
2099 MachineInstr &MI, MachineBasicBlock *BB) const {
2101 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2102 MachineFunction *MF = BB->getParent();
2103 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
2105 if (TII->isMIMG(MI)) {
2106 if (!MI.memoperands_empty())
2108 // Add a memoperand for mimg instructions so that they aren't assumed to
2109 // be ordered memory instuctions.
2111 MachinePointerInfo PtrInfo(MFI->getImagePSV());
2112 MachineMemOperand::Flags Flags = MachineMemOperand::MODereferenceable;
2114 Flags |= MachineMemOperand::MOStore;
2117 Flags |= MachineMemOperand::MOLoad;
2119 auto MMO = MF->getMachineMemOperand(PtrInfo, Flags, 0, 0);
2120 MI.addMemOperand(*MF, MMO);
2124 switch (MI.getOpcode()) {
2125 case AMDGPU::SI_INIT_M0:
2126 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
2127 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2128 .add(MI.getOperand(0));
2129 MI.eraseFromParent();
2132 case AMDGPU::SI_INIT_EXEC:
2133 // This should be before all vector instructions.
2134 BuildMI(*BB, &*BB->begin(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
2136 .addImm(MI.getOperand(0).getImm());
2137 MI.eraseFromParent();
2140 case AMDGPU::SI_INIT_EXEC_FROM_INPUT: {
2141 // Extract the thread count from an SGPR input and set EXEC accordingly.
2142 // Since BFM can't shift by 64, handle that case with CMP + CMOV.
2144 // S_BFE_U32 count, input, {shift, 7}
2145 // S_BFM_B64 exec, count, 0
2146 // S_CMP_EQ_U32 count, 64
2147 // S_CMOV_B64 exec, -1
2148 MachineInstr *FirstMI = &*BB->begin();
2149 MachineRegisterInfo &MRI = MF->getRegInfo();
2150 unsigned InputReg = MI.getOperand(0).getReg();
2151 unsigned CountReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2154 // Move the COPY of the input reg to the beginning, so that we can use it.
2155 for (auto I = BB->begin(); I != &MI; I++) {
2156 if (I->getOpcode() != TargetOpcode::COPY ||
2157 I->getOperand(0).getReg() != InputReg)
2161 FirstMI = &*++BB->begin();
2163 I->removeFromParent();
2164 BB->insert(FirstMI, &*I);
2172 // This should be before all vector instructions.
2173 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFE_U32), CountReg)
2175 .addImm((MI.getOperand(1).getImm() & 0x7f) | 0x70000);
2176 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_BFM_B64),
2180 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMP_EQ_U32))
2181 .addReg(CountReg, RegState::Kill)
2183 BuildMI(*BB, FirstMI, DebugLoc(), TII->get(AMDGPU::S_CMOV_B64),
2186 MI.eraseFromParent();
2190 case AMDGPU::GET_GROUPSTATICSIZE: {
2191 DebugLoc DL = MI.getDebugLoc();
2192 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
2193 .add(MI.getOperand(0))
2194 .addImm(MFI->getLDSSize());
2195 MI.eraseFromParent();
2198 case AMDGPU::SI_INDIRECT_SRC_V1:
2199 case AMDGPU::SI_INDIRECT_SRC_V2:
2200 case AMDGPU::SI_INDIRECT_SRC_V4:
2201 case AMDGPU::SI_INDIRECT_SRC_V8:
2202 case AMDGPU::SI_INDIRECT_SRC_V16:
2203 return emitIndirectSrc(MI, *BB, *getSubtarget());
2204 case AMDGPU::SI_INDIRECT_DST_V1:
2205 case AMDGPU::SI_INDIRECT_DST_V2:
2206 case AMDGPU::SI_INDIRECT_DST_V4:
2207 case AMDGPU::SI_INDIRECT_DST_V8:
2208 case AMDGPU::SI_INDIRECT_DST_V16:
2209 return emitIndirectDst(MI, *BB, *getSubtarget());
2210 case AMDGPU::SI_KILL:
2211 return splitKillBlock(MI, BB);
2212 case AMDGPU::V_CNDMASK_B64_PSEUDO: {
2213 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
2215 unsigned Dst = MI.getOperand(0).getReg();
2216 unsigned Src0 = MI.getOperand(1).getReg();
2217 unsigned Src1 = MI.getOperand(2).getReg();
2218 const DebugLoc &DL = MI.getDebugLoc();
2219 unsigned SrcCond = MI.getOperand(3).getReg();
2221 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2222 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2224 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
2225 .addReg(Src0, 0, AMDGPU::sub0)
2226 .addReg(Src1, 0, AMDGPU::sub0)
2228 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
2229 .addReg(Src0, 0, AMDGPU::sub1)
2230 .addReg(Src1, 0, AMDGPU::sub1)
2233 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
2235 .addImm(AMDGPU::sub0)
2237 .addImm(AMDGPU::sub1);
2238 MI.eraseFromParent();
2241 case AMDGPU::SI_BR_UNDEF: {
2242 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
2243 const DebugLoc &DL = MI.getDebugLoc();
2244 MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1))
2245 .add(MI.getOperand(0));
2246 Br->getOperand(1).setIsUndef(true); // read undef SCC
2247 MI.eraseFromParent();
2251 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
2255 bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
2256 // This currently forces unfolding various combinations of fsub into fma with
2257 // free fneg'd operands. As long as we have fast FMA (controlled by
2258 // isFMAFasterThanFMulAndFAdd), we should perform these.
2260 // When fma is quarter rate, for f64 where add / sub are at best half rate,
2261 // most of these combines appear to be cycle neutral but save on instruction
2262 // count / code size.
2266 EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
2268 if (!VT.isVector()) {
2271 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
2274 MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const {
2275 // TODO: Should i16 be used always if legal? For now it would force VALU
2277 return (VT == MVT::i16) ? MVT::i16 : MVT::i32;
2280 // Answering this is somewhat tricky and depends on the specific device which
2281 // have different rates for fma or all f64 operations.
2283 // v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
2284 // regardless of which device (although the number of cycles differs between
2285 // devices), so it is always profitable for f64.
2287 // v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
2288 // only on full rate devices. Normally, we should prefer selecting v_mad_f32
2289 // which we can always do even without fused FP ops since it returns the same
2290 // result as the separate operations and since it is always full
2291 // rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
2292 // however does not support denormals, so we do report fma as faster if we have
2293 // a fast fma device and require denormals.
2295 bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
2296 VT = VT.getScalarType();
2298 switch (VT.getSimpleVT().SimpleTy) {
2300 // This is as fast on some subtargets. However, we always have full rate f32
2301 // mad available which returns the same result as the separate operations
2302 // which we should prefer over fma. We can't use this if we want to support
2303 // denormals, so only report this in these cases.
2304 return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32();
2308 return Subtarget->has16BitInsts() && Subtarget->hasFP16Denormals();
2316 //===----------------------------------------------------------------------===//
2317 // Custom DAG Lowering Operations
2318 //===----------------------------------------------------------------------===//
2320 SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
2321 switch (Op.getOpcode()) {
2322 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
2323 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
2325 SDValue Result = LowerLOAD(Op, DAG);
2326 assert((!Result.getNode() ||
2327 Result.getNode()->getNumValues() == 2) &&
2328 "Load should return a value and a chain");
2334 return LowerTrig(Op, DAG);
2335 case ISD::SELECT: return LowerSELECT(Op, DAG);
2336 case ISD::FDIV: return LowerFDIV(Op, DAG);
2337 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
2338 case ISD::STORE: return LowerSTORE(Op, DAG);
2339 case ISD::GlobalAddress: {
2340 MachineFunction &MF = DAG.getMachineFunction();
2341 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
2342 return LowerGlobalAddress(MFI, Op, DAG);
2344 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2345 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
2346 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
2347 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
2348 case ISD::INSERT_VECTOR_ELT:
2349 return lowerINSERT_VECTOR_ELT(Op, DAG);
2350 case ISD::EXTRACT_VECTOR_ELT:
2351 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
2353 return lowerFP_ROUND(Op, DAG);
2356 case ISD::DEBUGTRAP:
2357 return lowerTRAP(Op, DAG);
2362 void SITargetLowering::ReplaceNodeResults(SDNode *N,
2363 SmallVectorImpl<SDValue> &Results,
2364 SelectionDAG &DAG) const {
2365 switch (N->getOpcode()) {
2366 case ISD::INSERT_VECTOR_ELT: {
2367 if (SDValue Res = lowerINSERT_VECTOR_ELT(SDValue(N, 0), DAG))
2368 Results.push_back(Res);
2371 case ISD::EXTRACT_VECTOR_ELT: {
2372 if (SDValue Res = lowerEXTRACT_VECTOR_ELT(SDValue(N, 0), DAG))
2373 Results.push_back(Res);
2376 case ISD::INTRINSIC_WO_CHAIN: {
2377 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
2378 if (IID == Intrinsic::amdgcn_cvt_pkrtz) {
2379 SDValue Src0 = N->getOperand(1);
2380 SDValue Src1 = N->getOperand(2);
2382 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, SL, MVT::i32,
2384 Results.push_back(DAG.getNode(ISD::BITCAST, SL, MVT::v2f16, Cvt));
2391 EVT VT = N->getValueType(0);
2392 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2393 SDValue LHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(1));
2394 SDValue RHS = DAG.getNode(ISD::BITCAST, SL, NewVT, N->getOperand(2));
2396 EVT SelectVT = NewVT;
2397 if (NewVT.bitsLT(MVT::i32)) {
2398 LHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, LHS);
2399 RHS = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, RHS);
2400 SelectVT = MVT::i32;
2403 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, SelectVT,
2404 N->getOperand(0), LHS, RHS);
2406 if (NewVT != SelectVT)
2407 NewSelect = DAG.getNode(ISD::TRUNCATE, SL, NewVT, NewSelect);
2408 Results.push_back(DAG.getNode(ISD::BITCAST, SL, VT, NewSelect));
2416 /// \brief Helper function for LowerBRCOND
2417 static SDNode *findUser(SDValue Value, unsigned Opcode) {
2419 SDNode *Parent = Value.getNode();
2420 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
2423 if (I.getUse().get() != Value)
2426 if (I->getOpcode() == Opcode)
2432 unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
2433 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
2434 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
2435 case Intrinsic::amdgcn_if:
2436 return AMDGPUISD::IF;
2437 case Intrinsic::amdgcn_else:
2438 return AMDGPUISD::ELSE;
2439 case Intrinsic::amdgcn_loop:
2440 return AMDGPUISD::LOOP;
2441 case Intrinsic::amdgcn_end_cf:
2442 llvm_unreachable("should not occur");
2448 // break, if_break, else_break are all only used as inputs to loop, not
2449 // directly as branch conditions.
2453 void SITargetLowering::createDebuggerPrologueStackObjects(
2454 MachineFunction &MF) const {
2455 // Create stack objects that are used for emitting debugger prologue.
2457 // Debugger prologue writes work group IDs and work item IDs to scratch memory
2458 // at fixed location in the following format:
2459 // offset 0: work group ID x
2460 // offset 4: work group ID y
2461 // offset 8: work group ID z
2462 // offset 16: work item ID x
2463 // offset 20: work item ID y
2464 // offset 24: work item ID z
2465 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2468 // For each dimension:
2469 for (unsigned i = 0; i < 3; ++i) {
2470 // Create fixed stack object for work group ID.
2471 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true);
2472 Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx);
2473 // Create fixed stack object for work item ID.
2474 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true);
2475 Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx);
2479 bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
2480 const Triple &TT = getTargetMachine().getTargetTriple();
2481 return GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS &&
2482 AMDGPU::shouldEmitConstantsToTextSection(TT);
2485 bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
2486 return (GV->getType()->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
2487 GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) &&
2488 !shouldEmitFixup(GV) &&
2489 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
2492 bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
2493 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
2496 /// This transforms the control flow intrinsics to get the branch destination as
2497 /// last parameter, also switches branch target with BR if the need arise
2498 SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
2499 SelectionDAG &DAG) const {
2502 SDNode *Intr = BRCOND.getOperand(1).getNode();
2503 SDValue Target = BRCOND.getOperand(2);
2504 SDNode *BR = nullptr;
2505 SDNode *SetCC = nullptr;
2507 if (Intr->getOpcode() == ISD::SETCC) {
2508 // As long as we negate the condition everything is fine
2510 Intr = SetCC->getOperand(0).getNode();
2513 // Get the target from BR if we don't negate the condition
2514 BR = findUser(BRCOND, ISD::BR);
2515 Target = BR->getOperand(1);
2518 // FIXME: This changes the types of the intrinsics instead of introducing new
2519 // nodes with the correct types.
2520 // e.g. llvm.amdgcn.loop
2522 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
2523 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
2525 unsigned CFNode = isCFIntrinsic(Intr);
2527 // This is a uniform branch so we don't need to legalize.
2531 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
2532 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
2535 (SetCC->getConstantOperandVal(1) == 1 &&
2536 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
2539 // operands of the new intrinsic call
2540 SmallVector<SDValue, 4> Ops;
2542 Ops.push_back(BRCOND.getOperand(0));
2544 Ops.append(Intr->op_begin() + (HaveChain ? 2 : 1), Intr->op_end());
2545 Ops.push_back(Target);
2547 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
2549 // build the new intrinsic call
2550 SDNode *Result = DAG.getNode(CFNode, DL, DAG.getVTList(Res), Ops).getNode();
2555 BRCOND.getOperand(0)
2558 Result = DAG.getMergeValues(Ops, DL).getNode();
2562 // Give the branch instruction our target
2565 BRCOND.getOperand(2)
2567 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
2568 DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
2569 BR = NewBR.getNode();
2572 SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
2574 // Copy the intrinsic results to registers
2575 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
2576 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
2580 Chain = DAG.getCopyToReg(
2582 CopyToReg->getOperand(1),
2583 SDValue(Result, i - 1),
2586 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
2589 // Remove the old intrinsic from the chain
2590 DAG.ReplaceAllUsesOfValueWith(
2591 SDValue(Intr, Intr->getNumValues() - 1),
2592 Intr->getOperand(0));
2597 SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
2601 return Op.getValueType().bitsLE(VT) ?
2602 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
2603 DAG.getNode(ISD::FTRUNC, DL, VT, Op);
2606 SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
2607 assert(Op.getValueType() == MVT::f16 &&
2608 "Do not know how to custom lower FP_ROUND for non-f16 type");
2610 SDValue Src = Op.getOperand(0);
2611 EVT SrcVT = Src.getValueType();
2612 if (SrcVT != MVT::f64)
2617 SDValue FpToFp16 = DAG.getNode(ISD::FP_TO_FP16, DL, MVT::i32, Src);
2618 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToFp16);
2619 return DAG.getNode(ISD::BITCAST, DL, MVT::f16, Trunc);
2622 SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
2624 MachineFunction &MF = DAG.getMachineFunction();
2625 SDValue Chain = Op.getOperand(0);
2627 unsigned TrapID = Op.getOpcode() == ISD::DEBUGTRAP ?
2628 SISubtarget::TrapIDLLVMDebugTrap : SISubtarget::TrapIDLLVMTrap;
2630 if (Subtarget->getTrapHandlerAbi() == SISubtarget::TrapHandlerAbiHsa &&
2631 Subtarget->isTrapHandlerEnabled()) {
2632 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2633 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
2634 assert(UserSGPR != AMDGPU::NoRegister);
2636 SDValue QueuePtr = CreateLiveInRegister(
2637 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
2639 SDValue SGPR01 = DAG.getRegister(AMDGPU::SGPR0_SGPR1, MVT::i64);
2641 SDValue ToReg = DAG.getCopyToReg(Chain, SL, SGPR01,
2642 QueuePtr, SDValue());
2646 DAG.getTargetConstant(TrapID, SL, MVT::i16),
2651 return DAG.getNode(AMDGPUISD::TRAP, SL, MVT::Other, Ops);
2655 case SISubtarget::TrapIDLLVMTrap:
2656 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
2657 case SISubtarget::TrapIDLLVMDebugTrap: {
2658 DiagnosticInfoUnsupported NoTrap(*MF.getFunction(),
2659 "debugtrap handler not supported",
2662 LLVMContext &Ctx = MF.getFunction()->getContext();
2663 Ctx.diagnose(NoTrap);
2667 llvm_unreachable("unsupported trap handler type!");
2673 SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
2674 SelectionDAG &DAG) const {
2675 // FIXME: Use inline constants (src_{shared, private}_base) instead.
2676 if (Subtarget->hasApertureRegs()) {
2677 unsigned Offset = AS == AMDGPUASI.LOCAL_ADDRESS ?
2678 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
2679 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
2680 unsigned WidthM1 = AS == AMDGPUASI.LOCAL_ADDRESS ?
2681 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
2682 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
2684 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
2685 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
2686 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
2688 SDValue EncodingImm = DAG.getTargetConstant(Encoding, DL, MVT::i16);
2689 SDValue ApertureReg = SDValue(
2690 DAG.getMachineNode(AMDGPU::S_GETREG_B32, DL, MVT::i32, EncodingImm), 0);
2691 SDValue ShiftAmount = DAG.getTargetConstant(WidthM1 + 1, DL, MVT::i32);
2692 return DAG.getNode(ISD::SHL, DL, MVT::i32, ApertureReg, ShiftAmount);
2695 MachineFunction &MF = DAG.getMachineFunction();
2696 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
2697 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
2698 assert(UserSGPR != AMDGPU::NoRegister);
2700 SDValue QueuePtr = CreateLiveInRegister(
2701 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
2703 // Offset into amd_queue_t for group_segment_aperture_base_hi /
2704 // private_segment_aperture_base_hi.
2705 uint32_t StructOffset = (AS == AMDGPUASI.LOCAL_ADDRESS) ? 0x40 : 0x44;
2707 SDValue Ptr = DAG.getNode(ISD::ADD, DL, MVT::i64, QueuePtr,
2708 DAG.getConstant(StructOffset, DL, MVT::i64));
2710 // TODO: Use custom target PseudoSourceValue.
2711 // TODO: We should use the value from the IR intrinsic call, but it might not
2712 // be available and how do we get it?
2713 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
2714 AMDGPUASI.CONSTANT_ADDRESS));
2716 MachinePointerInfo PtrInfo(V, StructOffset);
2717 return DAG.getLoad(MVT::i32, DL, QueuePtr.getValue(1), Ptr, PtrInfo,
2718 MinAlign(64, StructOffset),
2719 MachineMemOperand::MODereferenceable |
2720 MachineMemOperand::MOInvariant);
2723 SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
2724 SelectionDAG &DAG) const {
2726 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
2728 SDValue Src = ASC->getOperand(0);
2729 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
2731 const AMDGPUTargetMachine &TM =
2732 static_cast<const AMDGPUTargetMachine &>(getTargetMachine());
2734 // flat -> local/private
2735 if (ASC->getSrcAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
2736 unsigned DestAS = ASC->getDestAddressSpace();
2738 if (DestAS == AMDGPUASI.LOCAL_ADDRESS ||
2739 DestAS == AMDGPUASI.PRIVATE_ADDRESS) {
2740 unsigned NullVal = TM.getNullPointerValue(DestAS);
2741 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
2742 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
2743 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
2745 return DAG.getNode(ISD::SELECT, SL, MVT::i32,
2746 NonNull, Ptr, SegmentNullPtr);
2750 // local/private -> flat
2751 if (ASC->getDestAddressSpace() == AMDGPUASI.FLAT_ADDRESS) {
2752 unsigned SrcAS = ASC->getSrcAddressSpace();
2754 if (SrcAS == AMDGPUASI.LOCAL_ADDRESS ||
2755 SrcAS == AMDGPUASI.PRIVATE_ADDRESS) {
2756 unsigned NullVal = TM.getNullPointerValue(SrcAS);
2757 SDValue SegmentNullPtr = DAG.getConstant(NullVal, SL, MVT::i32);
2760 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
2762 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), SL, DAG);
2764 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
2766 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
2767 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
2772 // global <-> flat are no-ops and never emitted.
2774 const MachineFunction &MF = DAG.getMachineFunction();
2775 DiagnosticInfoUnsupported InvalidAddrSpaceCast(
2776 *MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
2777 DAG.getContext()->diagnose(InvalidAddrSpaceCast);
2779 return DAG.getUNDEF(ASC->getValueType(0));
2782 SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
2783 SelectionDAG &DAG) const {
2784 SDValue Idx = Op.getOperand(2);
2785 if (isa<ConstantSDNode>(Idx))
2788 // Avoid stack access for dynamic indexing.
2790 SDValue Vec = Op.getOperand(0);
2791 SDValue Val = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Op.getOperand(1));
2793 // v_bfi_b32 (v_bfm_b32 16, (shl idx, 16)), val, vec
2794 SDValue ExtVal = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Val);
2796 // Convert vector index to bit-index.
2797 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx,
2798 DAG.getConstant(16, SL, MVT::i32));
2800 SDValue BCVec = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
2802 SDValue BFM = DAG.getNode(ISD::SHL, SL, MVT::i32,
2803 DAG.getConstant(0xffff, SL, MVT::i32),
2806 SDValue LHS = DAG.getNode(ISD::AND, SL, MVT::i32, BFM, ExtVal);
2807 SDValue RHS = DAG.getNode(ISD::AND, SL, MVT::i32,
2808 DAG.getNOT(SL, BFM, MVT::i32), BCVec);
2810 SDValue BFI = DAG.getNode(ISD::OR, SL, MVT::i32, LHS, RHS);
2811 return DAG.getNode(ISD::BITCAST, SL, Op.getValueType(), BFI);
2814 SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
2815 SelectionDAG &DAG) const {
2818 EVT ResultVT = Op.getValueType();
2819 SDValue Vec = Op.getOperand(0);
2820 SDValue Idx = Op.getOperand(1);
2822 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
2824 // Make sure we we do any optimizations that will make it easier to fold
2825 // source modifiers before obscuring it with bit operations.
2827 // XXX - Why doesn't this get called when vector_shuffle is expanded?
2828 if (SDValue Combined = performExtractVectorEltCombine(Op.getNode(), DCI))
2831 if (const ConstantSDNode *CIdx = dyn_cast<ConstantSDNode>(Idx)) {
2832 SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
2834 if (CIdx->getZExtValue() == 1) {
2835 Result = DAG.getNode(ISD::SRL, SL, MVT::i32, Result,
2836 DAG.getConstant(16, SL, MVT::i32));
2838 assert(CIdx->getZExtValue() == 0);
2841 if (ResultVT.bitsLT(MVT::i32))
2842 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result);
2843 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
2846 SDValue Sixteen = DAG.getConstant(16, SL, MVT::i32);
2848 // Convert vector index to bit-index.
2849 SDValue ScaledIdx = DAG.getNode(ISD::SHL, SL, MVT::i32, Idx, Sixteen);
2851 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
2852 SDValue Elt = DAG.getNode(ISD::SRL, SL, MVT::i32, BC, ScaledIdx);
2854 SDValue Result = Elt;
2855 if (ResultVT.bitsLT(MVT::i32))
2856 Result = DAG.getNode(ISD::TRUNCATE, SL, MVT::i16, Result);
2858 return DAG.getNode(ISD::BITCAST, SL, ResultVT, Result);
2862 SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
2863 // We can fold offsets for anything that doesn't require a GOT relocation.
2864 return (GA->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
2865 GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) &&
2866 !shouldEmitGOTReloc(GA->getGlobal());
2870 buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
2871 const SDLoc &DL, unsigned Offset, EVT PtrVT,
2872 unsigned GAFlags = SIInstrInfo::MO_NONE) {
2873 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
2874 // lowered to the following code sequence:
2876 // For constant address space:
2877 // s_getpc_b64 s[0:1]
2878 // s_add_u32 s0, s0, $symbol
2879 // s_addc_u32 s1, s1, 0
2881 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
2882 // a fixup or relocation is emitted to replace $symbol with a literal
2883 // constant, which is a pc-relative offset from the encoding of the $symbol
2884 // operand to the global variable.
2886 // For global address space:
2887 // s_getpc_b64 s[0:1]
2888 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
2889 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
2891 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
2892 // fixups or relocations are emitted to replace $symbol@*@lo and
2893 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
2894 // which is a 64-bit pc-relative offset from the encoding of the $symbol
2895 // operand to the global variable.
2897 // What we want here is an offset from the value returned by s_getpc
2898 // (which is the address of the s_add_u32 instruction) to the global
2899 // variable, but since the encoding of $symbol starts 4 bytes after the start
2900 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
2901 // small. This requires us to add 4 to the global variable offset in order to
2902 // compute the correct address.
2903 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
2905 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
2906 GAFlags == SIInstrInfo::MO_NONE ?
2907 GAFlags : GAFlags + 1);
2908 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
2911 SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
2913 SelectionDAG &DAG) const {
2914 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
2916 if (GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS &&
2917 GSD->getAddressSpace() != AMDGPUASI.GLOBAL_ADDRESS)
2918 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
2921 const GlobalValue *GV = GSD->getGlobal();
2922 EVT PtrVT = Op.getValueType();
2924 if (shouldEmitFixup(GV))
2925 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
2926 else if (shouldEmitPCReloc(GV))
2927 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
2928 SIInstrInfo::MO_REL32);
2930 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
2931 SIInstrInfo::MO_GOTPCREL32);
2933 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
2934 PointerType *PtrTy = PointerType::get(Ty, AMDGPUASI.CONSTANT_ADDRESS);
2935 const DataLayout &DataLayout = DAG.getDataLayout();
2936 unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
2937 // FIXME: Use a PseudoSourceValue once those can be assigned an address space.
2938 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
2940 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
2941 MachineMemOperand::MODereferenceable |
2942 MachineMemOperand::MOInvariant);
2945 SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
2946 const SDLoc &DL, SDValue V) const {
2947 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
2948 // the destination register.
2950 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
2951 // so we will end up with redundant moves to m0.
2953 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
2955 // A Null SDValue creates a glue result.
2956 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
2958 return SDValue(M0, 0);
2961 SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
2964 unsigned Offset) const {
2966 SDValue Param = lowerKernargMemParameter(DAG, MVT::i32, MVT::i32, SL,
2967 DAG.getEntryNode(), Offset, false);
2968 // The local size values will have the hi 16-bits as zero.
2969 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
2970 DAG.getValueType(VT));
2973 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
2975 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
2976 "non-hsa intrinsic with hsa target",
2978 DAG.getContext()->diagnose(BadIntrin);
2979 return DAG.getUNDEF(VT);
2982 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
2984 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
2985 "intrinsic not supported on subtarget",
2987 DAG.getContext()->diagnose(BadIntrin);
2988 return DAG.getUNDEF(VT);
2991 SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
2992 SelectionDAG &DAG) const {
2993 MachineFunction &MF = DAG.getMachineFunction();
2994 auto MFI = MF.getInfo<SIMachineFunctionInfo>();
2995 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
2997 EVT VT = Op.getValueType();
2999 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3001 // TODO: Should this propagate fast-math-flags?
3003 switch (IntrinsicID) {
3004 case Intrinsic::amdgcn_implicit_buffer_ptr: {
3005 if (getSubtarget()->isAmdCodeObjectV2(MF))
3006 return emitNonHSAIntrinsicError(DAG, DL, VT);
3008 unsigned Reg = TRI->getPreloadedValue(MF,
3009 SIRegisterInfo::IMPLICIT_BUFFER_PTR);
3010 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT);
3012 case Intrinsic::amdgcn_dispatch_ptr:
3013 case Intrinsic::amdgcn_queue_ptr: {
3014 if (!Subtarget->isAmdCodeObjectV2(MF)) {
3015 DiagnosticInfoUnsupported BadIntrin(
3016 *MF.getFunction(), "unsupported hsa intrinsic without hsa target",
3018 DAG.getContext()->diagnose(BadIntrin);
3019 return DAG.getUNDEF(VT);
3022 auto Reg = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
3023 SIRegisterInfo::DISPATCH_PTR : SIRegisterInfo::QUEUE_PTR;
3024 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass,
3025 TRI->getPreloadedValue(MF, Reg), VT);
3027 case Intrinsic::amdgcn_implicitarg_ptr: {
3028 unsigned offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT);
3029 return lowerKernArgParameterPtr(DAG, DL, DAG.getEntryNode(), offset);
3031 case Intrinsic::amdgcn_kernarg_segment_ptr: {
3033 = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR);
3034 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT);
3036 case Intrinsic::amdgcn_dispatch_id: {
3037 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::DISPATCH_ID);
3038 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT);
3040 case Intrinsic::amdgcn_rcp:
3041 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
3042 case Intrinsic::amdgcn_rsq:
3043 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
3044 case Intrinsic::amdgcn_rsq_legacy:
3045 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
3046 return emitRemovedIntrinsicError(DAG, DL, VT);
3048 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
3049 case Intrinsic::amdgcn_rcp_legacy:
3050 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
3051 return emitRemovedIntrinsicError(DAG, DL, VT);
3052 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
3053 case Intrinsic::amdgcn_rsq_clamp: {
3054 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
3055 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
3057 Type *Type = VT.getTypeForEVT(*DAG.getContext());
3058 APFloat Max = APFloat::getLargest(Type->getFltSemantics());
3059 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
3061 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
3062 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
3063 DAG.getConstantFP(Max, DL, VT));
3064 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
3065 DAG.getConstantFP(Min, DL, VT));
3067 case Intrinsic::r600_read_ngroups_x:
3068 if (Subtarget->isAmdHsaOS())
3069 return emitNonHSAIntrinsicError(DAG, DL, VT);
3071 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
3072 SI::KernelInputOffsets::NGROUPS_X, false);
3073 case Intrinsic::r600_read_ngroups_y:
3074 if (Subtarget->isAmdHsaOS())
3075 return emitNonHSAIntrinsicError(DAG, DL, VT);
3077 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
3078 SI::KernelInputOffsets::NGROUPS_Y, false);
3079 case Intrinsic::r600_read_ngroups_z:
3080 if (Subtarget->isAmdHsaOS())
3081 return emitNonHSAIntrinsicError(DAG, DL, VT);
3083 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
3084 SI::KernelInputOffsets::NGROUPS_Z, false);
3085 case Intrinsic::r600_read_global_size_x:
3086 if (Subtarget->isAmdHsaOS())
3087 return emitNonHSAIntrinsicError(DAG, DL, VT);
3089 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
3090 SI::KernelInputOffsets::GLOBAL_SIZE_X, false);
3091 case Intrinsic::r600_read_global_size_y:
3092 if (Subtarget->isAmdHsaOS())
3093 return emitNonHSAIntrinsicError(DAG, DL, VT);
3095 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
3096 SI::KernelInputOffsets::GLOBAL_SIZE_Y, false);
3097 case Intrinsic::r600_read_global_size_z:
3098 if (Subtarget->isAmdHsaOS())
3099 return emitNonHSAIntrinsicError(DAG, DL, VT);
3101 return lowerKernargMemParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
3102 SI::KernelInputOffsets::GLOBAL_SIZE_Z, false);
3103 case Intrinsic::r600_read_local_size_x:
3104 if (Subtarget->isAmdHsaOS())
3105 return emitNonHSAIntrinsicError(DAG, DL, VT);
3107 return lowerImplicitZextParam(DAG, Op, MVT::i16,
3108 SI::KernelInputOffsets::LOCAL_SIZE_X);
3109 case Intrinsic::r600_read_local_size_y:
3110 if (Subtarget->isAmdHsaOS())
3111 return emitNonHSAIntrinsicError(DAG, DL, VT);
3113 return lowerImplicitZextParam(DAG, Op, MVT::i16,
3114 SI::KernelInputOffsets::LOCAL_SIZE_Y);
3115 case Intrinsic::r600_read_local_size_z:
3116 if (Subtarget->isAmdHsaOS())
3117 return emitNonHSAIntrinsicError(DAG, DL, VT);
3119 return lowerImplicitZextParam(DAG, Op, MVT::i16,
3120 SI::KernelInputOffsets::LOCAL_SIZE_Z);
3121 case Intrinsic::amdgcn_workgroup_id_x:
3122 case Intrinsic::r600_read_tgid_x:
3123 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass,
3124 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_X), VT);
3125 case Intrinsic::amdgcn_workgroup_id_y:
3126 case Intrinsic::r600_read_tgid_y:
3127 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass,
3128 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Y), VT);
3129 case Intrinsic::amdgcn_workgroup_id_z:
3130 case Intrinsic::r600_read_tgid_z:
3131 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32_XM0RegClass,
3132 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Z), VT);
3133 case Intrinsic::amdgcn_workitem_id_x:
3134 case Intrinsic::r600_read_tidig_x:
3135 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
3136 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X), VT);
3137 case Intrinsic::amdgcn_workitem_id_y:
3138 case Intrinsic::r600_read_tidig_y:
3139 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
3140 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y), VT);
3141 case Intrinsic::amdgcn_workitem_id_z:
3142 case Intrinsic::r600_read_tidig_z:
3143 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
3144 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z), VT);
3145 case AMDGPUIntrinsic::SI_load_const: {
3151 MachineMemOperand *MMO = MF.getMachineMemOperand(
3152 MachinePointerInfo(),
3153 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3154 MachineMemOperand::MOInvariant,
3155 VT.getStoreSize(), 4);
3156 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL,
3157 Op->getVTList(), Ops, VT, MMO);
3159 case Intrinsic::amdgcn_fdiv_fast:
3160 return lowerFDIV_FAST(Op, DAG);
3161 case Intrinsic::amdgcn_interp_mov: {
3162 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
3163 SDValue Glue = M0.getValue(1);
3164 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32, Op.getOperand(1),
3165 Op.getOperand(2), Op.getOperand(3), Glue);
3167 case Intrinsic::amdgcn_interp_p1: {
3168 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
3169 SDValue Glue = M0.getValue(1);
3170 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1),
3171 Op.getOperand(2), Op.getOperand(3), Glue);
3173 case Intrinsic::amdgcn_interp_p2: {
3174 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
3175 SDValue Glue = SDValue(M0.getNode(), 1);
3176 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1),
3177 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
3180 case Intrinsic::amdgcn_sin:
3181 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
3183 case Intrinsic::amdgcn_cos:
3184 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
3186 case Intrinsic::amdgcn_log_clamp: {
3187 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
3190 DiagnosticInfoUnsupported BadIntrin(
3191 *MF.getFunction(), "intrinsic not supported on subtarget",
3193 DAG.getContext()->diagnose(BadIntrin);
3194 return DAG.getUNDEF(VT);
3196 case Intrinsic::amdgcn_ldexp:
3197 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
3198 Op.getOperand(1), Op.getOperand(2));
3200 case Intrinsic::amdgcn_fract:
3201 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
3203 case Intrinsic::amdgcn_class:
3204 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
3205 Op.getOperand(1), Op.getOperand(2));
3206 case Intrinsic::amdgcn_div_fmas:
3207 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
3208 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
3211 case Intrinsic::amdgcn_div_fixup:
3212 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
3213 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3215 case Intrinsic::amdgcn_trig_preop:
3216 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
3217 Op.getOperand(1), Op.getOperand(2));
3218 case Intrinsic::amdgcn_div_scale: {
3219 // 3rd parameter required to be a constant.
3220 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
3222 return DAG.getUNDEF(VT);
3224 // Translate to the operands expected by the machine instruction. The
3225 // first parameter must be the same as the first instruction.
3226 SDValue Numerator = Op.getOperand(1);
3227 SDValue Denominator = Op.getOperand(2);
3229 // Note this order is opposite of the machine instruction's operations,
3230 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
3231 // intrinsic has the numerator as the first operand to match a normal
3232 // division operation.
3234 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
3236 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
3237 Denominator, Numerator);
3239 case Intrinsic::amdgcn_icmp: {
3240 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
3242 return DAG.getUNDEF(VT);
3244 int CondCode = CD->getSExtValue();
3245 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
3246 CondCode > ICmpInst::Predicate::LAST_ICMP_PREDICATE)
3247 return DAG.getUNDEF(VT);
3249 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
3250 ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
3251 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
3252 Op.getOperand(2), DAG.getCondCode(CCOpcode));
3254 case Intrinsic::amdgcn_fcmp: {
3255 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
3257 return DAG.getUNDEF(VT);
3259 int CondCode = CD->getSExtValue();
3260 if (CondCode < FCmpInst::Predicate::FIRST_FCMP_PREDICATE ||
3261 CondCode > FCmpInst::Predicate::LAST_FCMP_PREDICATE)
3262 return DAG.getUNDEF(VT);
3264 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
3265 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
3266 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
3267 Op.getOperand(2), DAG.getCondCode(CCOpcode));
3269 case Intrinsic::amdgcn_fmed3:
3270 return DAG.getNode(AMDGPUISD::FMED3, DL, VT,
3271 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3272 case Intrinsic::amdgcn_fmul_legacy:
3273 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
3274 Op.getOperand(1), Op.getOperand(2));
3275 case Intrinsic::amdgcn_sffbh:
3276 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
3277 case Intrinsic::amdgcn_sbfe:
3278 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
3279 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3280 case Intrinsic::amdgcn_ubfe:
3281 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
3282 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3283 case Intrinsic::amdgcn_cvt_pkrtz: {
3284 // FIXME: Stop adding cast if v2f16 legal.
3285 EVT VT = Op.getValueType();
3286 SDValue Node = DAG.getNode(AMDGPUISD::CVT_PKRTZ_F16_F32, DL, MVT::i32,
3287 Op.getOperand(1), Op.getOperand(2));
3288 return DAG.getNode(ISD::BITCAST, DL, VT, Node);
3295 SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
3296 SelectionDAG &DAG) const {
3297 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
3299 MachineFunction &MF = DAG.getMachineFunction();
3302 case Intrinsic::amdgcn_atomic_inc:
3303 case Intrinsic::amdgcn_atomic_dec: {
3304 MemSDNode *M = cast<MemSDNode>(Op);
3305 unsigned Opc = (IntrID == Intrinsic::amdgcn_atomic_inc) ?
3306 AMDGPUISD::ATOMIC_INC : AMDGPUISD::ATOMIC_DEC;
3308 M->getOperand(0), // Chain
3309 M->getOperand(2), // Ptr
3310 M->getOperand(3) // Value
3313 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
3314 M->getMemoryVT(), M->getMemOperand());
3316 case Intrinsic::amdgcn_buffer_load:
3317 case Intrinsic::amdgcn_buffer_load_format: {
3319 Op.getOperand(0), // Chain
3320 Op.getOperand(2), // rsrc
3321 Op.getOperand(3), // vindex
3322 Op.getOperand(4), // offset
3323 Op.getOperand(5), // glc
3324 Op.getOperand(6) // slc
3326 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
3328 unsigned Opc = (IntrID == Intrinsic::amdgcn_buffer_load) ?
3329 AMDGPUISD::BUFFER_LOAD : AMDGPUISD::BUFFER_LOAD_FORMAT;
3330 EVT VT = Op.getValueType();
3331 EVT IntVT = VT.changeTypeToInteger();
3333 MachineMemOperand *MMO = MF.getMachineMemOperand(
3334 MachinePointerInfo(MFI->getBufferPSV()),
3335 MachineMemOperand::MOLoad,
3336 VT.getStoreSize(), VT.getStoreSize());
3338 return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(), Ops, IntVT, MMO);
3340 case Intrinsic::amdgcn_tbuffer_load: {
3342 Op.getOperand(0), // Chain
3343 Op.getOperand(2), // rsrc
3344 Op.getOperand(3), // vindex
3345 Op.getOperand(4), // voffset
3346 Op.getOperand(5), // soffset
3347 Op.getOperand(6), // offset
3348 Op.getOperand(7), // dfmt
3349 Op.getOperand(8), // nfmt
3350 Op.getOperand(9), // glc
3351 Op.getOperand(10) // slc
3354 EVT VT = Op.getOperand(2).getValueType();
3356 MachineMemOperand *MMO = MF.getMachineMemOperand(
3357 MachinePointerInfo(),
3358 MachineMemOperand::MOLoad,
3359 VT.getStoreSize(), VT.getStoreSize());
3360 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_LOAD_FORMAT, DL,
3361 Op->getVTList(), Ops, VT, MMO);
3364 case Intrinsic::amdgcn_image_sample:
3365 case Intrinsic::amdgcn_image_sample_cl:
3366 case Intrinsic::amdgcn_image_sample_d:
3367 case Intrinsic::amdgcn_image_sample_d_cl:
3368 case Intrinsic::amdgcn_image_sample_l:
3369 case Intrinsic::amdgcn_image_sample_b:
3370 case Intrinsic::amdgcn_image_sample_b_cl:
3371 case Intrinsic::amdgcn_image_sample_lz:
3372 case Intrinsic::amdgcn_image_sample_cd:
3373 case Intrinsic::amdgcn_image_sample_cd_cl:
3375 // Sample with comparison.
3376 case Intrinsic::amdgcn_image_sample_c:
3377 case Intrinsic::amdgcn_image_sample_c_cl:
3378 case Intrinsic::amdgcn_image_sample_c_d:
3379 case Intrinsic::amdgcn_image_sample_c_d_cl:
3380 case Intrinsic::amdgcn_image_sample_c_l:
3381 case Intrinsic::amdgcn_image_sample_c_b:
3382 case Intrinsic::amdgcn_image_sample_c_b_cl:
3383 case Intrinsic::amdgcn_image_sample_c_lz:
3384 case Intrinsic::amdgcn_image_sample_c_cd:
3385 case Intrinsic::amdgcn_image_sample_c_cd_cl:
3387 // Sample with offsets.
3388 case Intrinsic::amdgcn_image_sample_o:
3389 case Intrinsic::amdgcn_image_sample_cl_o:
3390 case Intrinsic::amdgcn_image_sample_d_o:
3391 case Intrinsic::amdgcn_image_sample_d_cl_o:
3392 case Intrinsic::amdgcn_image_sample_l_o:
3393 case Intrinsic::amdgcn_image_sample_b_o:
3394 case Intrinsic::amdgcn_image_sample_b_cl_o:
3395 case Intrinsic::amdgcn_image_sample_lz_o:
3396 case Intrinsic::amdgcn_image_sample_cd_o:
3397 case Intrinsic::amdgcn_image_sample_cd_cl_o:
3399 // Sample with comparison and offsets.
3400 case Intrinsic::amdgcn_image_sample_c_o:
3401 case Intrinsic::amdgcn_image_sample_c_cl_o:
3402 case Intrinsic::amdgcn_image_sample_c_d_o:
3403 case Intrinsic::amdgcn_image_sample_c_d_cl_o:
3404 case Intrinsic::amdgcn_image_sample_c_l_o:
3405 case Intrinsic::amdgcn_image_sample_c_b_o:
3406 case Intrinsic::amdgcn_image_sample_c_b_cl_o:
3407 case Intrinsic::amdgcn_image_sample_c_lz_o:
3408 case Intrinsic::amdgcn_image_sample_c_cd_o:
3409 case Intrinsic::amdgcn_image_sample_c_cd_cl_o:
3411 case Intrinsic::amdgcn_image_getlod: {
3412 // Replace dmask with everything disabled with undef.
3413 const ConstantSDNode *DMask = dyn_cast<ConstantSDNode>(Op.getOperand(5));
3414 if (!DMask || DMask->isNullValue()) {
3415 SDValue Undef = DAG.getUNDEF(Op.getValueType());
3416 return DAG.getMergeValues({ Undef, Op.getOperand(0) }, SDLoc(Op));
3426 SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
3427 SelectionDAG &DAG) const {
3429 SDValue Chain = Op.getOperand(0);
3430 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
3431 MachineFunction &MF = DAG.getMachineFunction();
3433 switch (IntrinsicID) {
3434 case Intrinsic::amdgcn_exp: {
3435 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
3436 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
3437 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(8));
3438 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(9));
3440 const SDValue Ops[] = {
3442 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
3443 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
3444 Op.getOperand(4), // src0
3445 Op.getOperand(5), // src1
3446 Op.getOperand(6), // src2
3447 Op.getOperand(7), // src3
3448 DAG.getTargetConstant(0, DL, MVT::i1), // compr
3449 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
3452 unsigned Opc = Done->isNullValue() ?
3453 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
3454 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
3456 case Intrinsic::amdgcn_exp_compr: {
3457 const ConstantSDNode *Tgt = cast<ConstantSDNode>(Op.getOperand(2));
3458 const ConstantSDNode *En = cast<ConstantSDNode>(Op.getOperand(3));
3459 SDValue Src0 = Op.getOperand(4);
3460 SDValue Src1 = Op.getOperand(5);
3461 const ConstantSDNode *Done = cast<ConstantSDNode>(Op.getOperand(6));
3462 const ConstantSDNode *VM = cast<ConstantSDNode>(Op.getOperand(7));
3464 SDValue Undef = DAG.getUNDEF(MVT::f32);
3465 const SDValue Ops[] = {
3467 DAG.getTargetConstant(Tgt->getZExtValue(), DL, MVT::i8), // tgt
3468 DAG.getTargetConstant(En->getZExtValue(), DL, MVT::i8), // en
3469 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src0),
3470 DAG.getNode(ISD::BITCAST, DL, MVT::f32, Src1),
3473 DAG.getTargetConstant(1, DL, MVT::i1), // compr
3474 DAG.getTargetConstant(VM->getZExtValue(), DL, MVT::i1)
3477 unsigned Opc = Done->isNullValue() ?
3478 AMDGPUISD::EXPORT : AMDGPUISD::EXPORT_DONE;
3479 return DAG.getNode(Opc, DL, Op->getVTList(), Ops);
3481 case Intrinsic::amdgcn_s_sendmsg:
3482 case Intrinsic::amdgcn_s_sendmsghalt: {
3483 unsigned NodeOp = (IntrinsicID == Intrinsic::amdgcn_s_sendmsg) ?
3484 AMDGPUISD::SENDMSG : AMDGPUISD::SENDMSGHALT;
3485 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3));
3486 SDValue Glue = Chain.getValue(1);
3487 return DAG.getNode(NodeOp, DL, MVT::Other, Chain,
3488 Op.getOperand(2), Glue);
3490 case Intrinsic::amdgcn_init_exec: {
3491 return DAG.getNode(AMDGPUISD::INIT_EXEC, DL, MVT::Other, Chain,
3494 case Intrinsic::amdgcn_init_exec_from_input: {
3495 return DAG.getNode(AMDGPUISD::INIT_EXEC_FROM_INPUT, DL, MVT::Other, Chain,
3496 Op.getOperand(2), Op.getOperand(3));
3498 case AMDGPUIntrinsic::AMDGPU_kill: {
3499 SDValue Src = Op.getOperand(2);
3500 if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) {
3501 if (!K->isNegative())
3504 SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32);
3505 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne);
3508 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src);
3509 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast);
3511 case Intrinsic::amdgcn_s_barrier: {
3512 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
3513 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
3514 unsigned WGSize = ST.getFlatWorkGroupSizes(*MF.getFunction()).second;
3515 if (WGSize <= ST.getWavefrontSize())
3516 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
3517 Op.getOperand(0)), 0);
3521 case AMDGPUIntrinsic::SI_tbuffer_store: {
3523 // Extract vindex and voffset from vaddr as appropriate
3524 const ConstantSDNode *OffEn = cast<ConstantSDNode>(Op.getOperand(10));
3525 const ConstantSDNode *IdxEn = cast<ConstantSDNode>(Op.getOperand(11));
3526 SDValue VAddr = Op.getOperand(5);
3528 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
3530 assert(!(OffEn->isOne() && IdxEn->isOne()) &&
3531 "Legacy intrinsic doesn't support both offset and index - use new version");
3533 SDValue VIndex = IdxEn->isOne() ? VAddr : Zero;
3534 SDValue VOffset = OffEn->isOne() ? VAddr : Zero;
3536 // Deal with the vec-3 case
3537 const ConstantSDNode *NumChannels = cast<ConstantSDNode>(Op.getOperand(4));
3538 auto Opcode = NumChannels->getZExtValue() == 3 ?
3539 AMDGPUISD::TBUFFER_STORE_FORMAT_X3 : AMDGPUISD::TBUFFER_STORE_FORMAT;
3543 Op.getOperand(3), // vdata
3544 Op.getOperand(2), // rsrc
3547 Op.getOperand(6), // soffset
3548 Op.getOperand(7), // inst_offset
3549 Op.getOperand(8), // dfmt
3550 Op.getOperand(9), // nfmt
3551 Op.getOperand(12), // glc
3552 Op.getOperand(13), // slc
3555 assert((cast<ConstantSDNode>(Op.getOperand(14)))->getZExtValue() == 0 &&
3556 "Value of tfe other than zero is unsupported");
3558 EVT VT = Op.getOperand(3).getValueType();
3559 MachineMemOperand *MMO = MF.getMachineMemOperand(
3560 MachinePointerInfo(),
3561 MachineMemOperand::MOStore,
3562 VT.getStoreSize(), 4);
3563 return DAG.getMemIntrinsicNode(Opcode, DL,
3564 Op->getVTList(), Ops, VT, MMO);
3567 case Intrinsic::amdgcn_tbuffer_store: {
3570 Op.getOperand(2), // vdata
3571 Op.getOperand(3), // rsrc
3572 Op.getOperand(4), // vindex
3573 Op.getOperand(5), // voffset
3574 Op.getOperand(6), // soffset
3575 Op.getOperand(7), // offset
3576 Op.getOperand(8), // dfmt
3577 Op.getOperand(9), // nfmt
3578 Op.getOperand(10), // glc
3579 Op.getOperand(11) // slc
3581 EVT VT = Op.getOperand(3).getValueType();
3582 MachineMemOperand *MMO = MF.getMachineMemOperand(
3583 MachinePointerInfo(),
3584 MachineMemOperand::MOStore,
3585 VT.getStoreSize(), 4);
3586 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL,
3587 Op->getVTList(), Ops, VT, MMO);
3595 SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
3597 LoadSDNode *Load = cast<LoadSDNode>(Op);
3598 ISD::LoadExtType ExtType = Load->getExtensionType();
3599 EVT MemVT = Load->getMemoryVT();
3601 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
3602 // FIXME: Copied from PPC
3603 // First, load into 32 bits, then truncate to 1 bit.
3605 SDValue Chain = Load->getChain();
3606 SDValue BasePtr = Load->getBasePtr();
3607 MachineMemOperand *MMO = Load->getMemOperand();
3609 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
3611 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
3612 BasePtr, RealMemVT, MMO);
3615 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
3619 return DAG.getMergeValues(Ops, DL);
3622 if (!MemVT.isVector())
3625 assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
3626 "Custom lowering for non-i32 vectors hasn't been implemented.");
3628 unsigned AS = Load->getAddressSpace();
3629 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
3630 AS, Load->getAlignment())) {
3632 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
3633 return DAG.getMergeValues(Ops, DL);
3636 MachineFunction &MF = DAG.getMachineFunction();
3637 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
3638 // If there is a possibilty that flat instruction access scratch memory
3639 // then we need to use the same legalization rules we use for private.
3640 if (AS == AMDGPUASI.FLAT_ADDRESS)
3641 AS = MFI->hasFlatScratchInit() ?
3642 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
3644 unsigned NumElements = MemVT.getVectorNumElements();
3645 if (AS == AMDGPUASI.CONSTANT_ADDRESS) {
3646 if (isMemOpUniform(Load))
3648 // Non-uniform loads will be selected to MUBUF instructions, so they
3649 // have the same legalization requirements as global and private
3653 if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS) {
3654 if (Subtarget->getScalarizeGlobalBehavior() && isMemOpUniform(Load) &&
3655 !Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load))
3657 // Non-uniform loads will be selected to MUBUF instructions, so they
3658 // have the same legalization requirements as global and private
3662 if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS ||
3663 AS == AMDGPUASI.FLAT_ADDRESS) {
3664 if (NumElements > 4)
3665 return SplitVectorLoad(Op, DAG);
3666 // v4 loads are supported for private and global memory.
3669 if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
3670 // Depending on the setting of the private_element_size field in the
3671 // resource descriptor, we can only make private accesses up to a certain
3673 switch (Subtarget->getMaxPrivateElementSize()) {
3675 return scalarizeVectorLoad(Load, DAG);
3677 if (NumElements > 2)
3678 return SplitVectorLoad(Op, DAG);
3681 // Same as global/flat
3682 if (NumElements > 4)
3683 return SplitVectorLoad(Op, DAG);
3686 llvm_unreachable("unsupported private_element_size");
3688 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
3689 if (NumElements > 2)
3690 return SplitVectorLoad(Op, DAG);
3692 if (NumElements == 2)
3695 // If properly aligned, if we split we might be able to use ds_read_b64.
3696 return SplitVectorLoad(Op, DAG);
3701 SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3702 if (Op.getValueType() != MVT::i64)
3706 SDValue Cond = Op.getOperand(0);
3708 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
3709 SDValue One = DAG.getConstant(1, DL, MVT::i32);
3711 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
3712 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
3714 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
3715 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
3717 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
3719 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
3720 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
3722 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
3724 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
3725 return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res);
3728 // Catch division cases where we can use shortcuts with rcp and rsq
3730 SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
3731 SelectionDAG &DAG) const {
3733 SDValue LHS = Op.getOperand(0);
3734 SDValue RHS = Op.getOperand(1);
3735 EVT VT = Op.getValueType();
3736 const SDNodeFlags Flags = Op->getFlags();
3737 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath ||
3738 Flags.hasUnsafeAlgebra() || Flags.hasAllowReciprocal();
3740 if (!Unsafe && VT == MVT::f32 && Subtarget->hasFP32Denormals())
3743 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
3744 if (Unsafe || VT == MVT::f32 || VT == MVT::f16) {
3745 if (CLHS->isExactlyValue(1.0)) {
3746 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
3747 // the CI documentation has a worst case error of 1 ulp.
3748 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
3749 // use it as long as we aren't trying to use denormals.
3751 // v_rcp_f16 and v_rsq_f16 DO support denormals.
3753 // 1.0 / sqrt(x) -> rsq(x)
3755 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
3756 // error seems really high at 2^29 ULP.
3757 if (RHS.getOpcode() == ISD::FSQRT)
3758 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
3760 // 1.0 / x -> rcp(x)
3761 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
3764 // Same as for 1.0, but expand the sign out of the constant.
3765 if (CLHS->isExactlyValue(-1.0)) {
3766 // -1.0 / x -> rcp (fneg x)
3767 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3768 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
3774 // Turn into multiply by the reciprocal.
3775 // x / y -> x * (1.0 / y)
3776 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
3777 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, Flags);
3783 static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
3784 EVT VT, SDValue A, SDValue B, SDValue GlueChain) {
3785 if (GlueChain->getNumValues() <= 1) {
3786 return DAG.getNode(Opcode, SL, VT, A, B);
3789 assert(GlueChain->getNumValues() == 3);
3791 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
3793 default: llvm_unreachable("no chain equivalent for opcode");
3795 Opcode = AMDGPUISD::FMUL_W_CHAIN;
3799 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B,
3800 GlueChain.getValue(2));
3803 static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
3804 EVT VT, SDValue A, SDValue B, SDValue C,
3805 SDValue GlueChain) {
3806 if (GlueChain->getNumValues() <= 1) {
3807 return DAG.getNode(Opcode, SL, VT, A, B, C);
3810 assert(GlueChain->getNumValues() == 3);
3812 SDVTList VTList = DAG.getVTList(VT, MVT::Other, MVT::Glue);
3814 default: llvm_unreachable("no chain equivalent for opcode");
3816 Opcode = AMDGPUISD::FMA_W_CHAIN;
3820 return DAG.getNode(Opcode, SL, VTList, GlueChain.getValue(1), A, B, C,
3821 GlueChain.getValue(2));
3824 SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const {
3825 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
3829 SDValue Src0 = Op.getOperand(0);
3830 SDValue Src1 = Op.getOperand(1);
3832 SDValue CvtSrc0 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src0);
3833 SDValue CvtSrc1 = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src1);
3835 SDValue RcpSrc1 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, CvtSrc1);
3836 SDValue Quot = DAG.getNode(ISD::FMUL, SL, MVT::f32, CvtSrc0, RcpSrc1);
3838 SDValue FPRoundFlag = DAG.getTargetConstant(0, SL, MVT::i32);
3839 SDValue BestQuot = DAG.getNode(ISD::FP_ROUND, SL, MVT::f16, Quot, FPRoundFlag);
3841 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f16, BestQuot, Src1, Src0);
3844 // Faster 2.5 ULP division that does not support denormals.
3845 SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
3847 SDValue LHS = Op.getOperand(1);
3848 SDValue RHS = Op.getOperand(2);
3850 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
3852 const APFloat K0Val(BitsToFloat(0x6f800000));
3853 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
3855 const APFloat K1Val(BitsToFloat(0x2f800000));
3856 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
3858 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
3861 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
3863 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
3865 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
3867 // TODO: Should this propagate fast-math-flags?
3868 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
3870 // rcp does not support denormals.
3871 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
3873 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
3875 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
3878 SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
3879 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
3883 SDValue LHS = Op.getOperand(0);
3884 SDValue RHS = Op.getOperand(1);
3886 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
3888 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
3890 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
3892 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT,
3895 // Denominator is scaled to not be denormal, so using rcp is ok.
3896 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32,
3898 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32,
3901 const unsigned Denorm32Reg = AMDGPU::Hwreg::ID_MODE |
3902 (4 << AMDGPU::Hwreg::OFFSET_SHIFT_) |
3903 (1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_);
3905 const SDValue BitField = DAG.getTargetConstant(Denorm32Reg, SL, MVT::i16);
3907 if (!Subtarget->hasFP32Denormals()) {
3908 SDVTList BindParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
3909 const SDValue EnableDenormValue = DAG.getConstant(FP_DENORM_FLUSH_NONE,
3911 SDValue EnableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, BindParamVTs,
3913 EnableDenormValue, BitField);
3916 EnableDenorm.getValue(0),
3917 EnableDenorm.getValue(1)
3920 NegDivScale0 = DAG.getMergeValues(Ops, SL);
3923 SDValue Fma0 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0,
3924 ApproxRcp, One, NegDivScale0);
3926 SDValue Fma1 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp,
3929 SDValue Mul = getFPBinOp(DAG, ISD::FMUL, SL, MVT::f32, NumeratorScaled,
3932 SDValue Fma2 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Mul,
3933 NumeratorScaled, Mul);
3935 SDValue Fma3 = getFPTernOp(DAG, ISD::FMA,SL, MVT::f32, Fma2, Fma1, Mul, Fma2);
3937 SDValue Fma4 = getFPTernOp(DAG, ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3,
3938 NumeratorScaled, Fma3);
3940 if (!Subtarget->hasFP32Denormals()) {
3941 const SDValue DisableDenormValue =
3942 DAG.getConstant(FP_DENORM_FLUSH_IN_FLUSH_OUT, SL, MVT::i32);
3943 SDValue DisableDenorm = DAG.getNode(AMDGPUISD::SETREG, SL, MVT::Other,
3949 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
3950 DisableDenorm, DAG.getRoot());
3951 DAG.setRoot(OutputChain);
3954 SDValue Scale = NumeratorScaled.getValue(1);
3955 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32,
3956 Fma4, Fma1, Fma3, Scale);
3958 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
3961 SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
3962 if (DAG.getTarget().Options.UnsafeFPMath)
3963 return lowerFastUnsafeFDIV(Op, DAG);
3966 SDValue X = Op.getOperand(0);
3967 SDValue Y = Op.getOperand(1);
3969 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
3971 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
3973 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
3975 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
3977 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
3979 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
3981 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
3983 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
3985 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
3987 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
3988 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
3990 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
3991 NegDivScale0, Mul, DivScale1);
3995 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) {
3996 // Workaround a hardware bug on SI where the condition output from div_scale
3999 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
4001 // Figure out if the scale to use for div_fmas.
4002 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
4003 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
4004 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
4005 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
4007 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
4008 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
4011 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
4013 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
4015 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
4016 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
4017 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
4019 Scale = DivScale1.getValue(1);
4022 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
4023 Fma4, Fma3, Mul, Scale);
4025 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
4028 SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
4029 EVT VT = Op.getValueType();
4032 return LowerFDIV32(Op, DAG);
4035 return LowerFDIV64(Op, DAG);
4038 return LowerFDIV16(Op, DAG);
4040 llvm_unreachable("Unexpected type for fdiv");
4043 SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
4045 StoreSDNode *Store = cast<StoreSDNode>(Op);
4046 EVT VT = Store->getMemoryVT();
4048 if (VT == MVT::i1) {
4049 return DAG.getTruncStore(Store->getChain(), DL,
4050 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
4051 Store->getBasePtr(), MVT::i1, Store->getMemOperand());
4054 assert(VT.isVector() &&
4055 Store->getValue().getValueType().getScalarType() == MVT::i32);
4057 unsigned AS = Store->getAddressSpace();
4058 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
4059 AS, Store->getAlignment())) {
4060 return expandUnalignedStore(Store, DAG);
4063 MachineFunction &MF = DAG.getMachineFunction();
4064 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
4065 // If there is a possibilty that flat instruction access scratch memory
4066 // then we need to use the same legalization rules we use for private.
4067 if (AS == AMDGPUASI.FLAT_ADDRESS)
4068 AS = MFI->hasFlatScratchInit() ?
4069 AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
4071 unsigned NumElements = VT.getVectorNumElements();
4072 if (AS == AMDGPUASI.GLOBAL_ADDRESS ||
4073 AS == AMDGPUASI.FLAT_ADDRESS) {
4074 if (NumElements > 4)
4075 return SplitVectorStore(Op, DAG);
4077 } else if (AS == AMDGPUASI.PRIVATE_ADDRESS) {
4078 switch (Subtarget->getMaxPrivateElementSize()) {
4080 return scalarizeVectorStore(Store, DAG);
4082 if (NumElements > 2)
4083 return SplitVectorStore(Op, DAG);
4086 if (NumElements > 4)
4087 return SplitVectorStore(Op, DAG);
4090 llvm_unreachable("unsupported private_element_size");
4092 } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
4093 if (NumElements > 2)
4094 return SplitVectorStore(Op, DAG);
4096 if (NumElements == 2)
4099 // If properly aligned, if we split we might be able to use ds_write_b64.
4100 return SplitVectorStore(Op, DAG);
4102 llvm_unreachable("unhandled address space");
4106 SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
4108 EVT VT = Op.getValueType();
4109 SDValue Arg = Op.getOperand(0);
4110 // TODO: Should this propagate fast-math-flags?
4111 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT,
4112 DAG.getNode(ISD::FMUL, DL, VT, Arg,
4113 DAG.getConstantFP(0.5/M_PI, DL,
4116 switch (Op.getOpcode()) {
4118 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart);
4120 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart);
4122 llvm_unreachable("Wrong trig opcode");
4126 SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
4127 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
4128 assert(AtomicNode->isCompareAndSwap());
4129 unsigned AS = AtomicNode->getAddressSpace();
4131 // No custom lowering required for local address space
4132 if (!isFlatGlobalAddrSpace(AS, AMDGPUASI))
4135 // Non-local address space requires custom lowering for atomic compare
4136 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
4138 SDValue ChainIn = Op.getOperand(0);
4139 SDValue Addr = Op.getOperand(1);
4140 SDValue Old = Op.getOperand(2);
4141 SDValue New = Op.getOperand(3);
4142 EVT VT = Op.getValueType();
4143 MVT SimpleVT = VT.getSimpleVT();
4144 MVT VecType = MVT::getVectorVT(SimpleVT, 2);
4146 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
4147 SDValue Ops[] = { ChainIn, Addr, NewOld };
4149 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
4150 Ops, VT, AtomicNode->getMemOperand());
4153 //===----------------------------------------------------------------------===//
4154 // Custom DAG optimizations
4155 //===----------------------------------------------------------------------===//
4157 SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
4158 DAGCombinerInfo &DCI) const {
4159 EVT VT = N->getValueType(0);
4160 EVT ScalarVT = VT.getScalarType();
4161 if (ScalarVT != MVT::f32)
4164 SelectionDAG &DAG = DCI.DAG;
4167 SDValue Src = N->getOperand(0);
4168 EVT SrcVT = Src.getValueType();
4170 // TODO: We could try to match extracting the higher bytes, which would be
4171 // easier if i8 vectors weren't promoted to i32 vectors, particularly after
4172 // types are legalized. v4i8 -> v4f32 is probably the only case to worry
4173 // about in practice.
4174 if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) {
4175 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
4176 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
4177 DCI.AddToWorklist(Cvt.getNode());
4185 /// \brief Return true if the given offset Size in bytes can be folded into
4186 /// the immediate offsets of a memory instruction for the given address space.
4187 static bool canFoldOffset(unsigned OffsetSize, unsigned AS,
4188 const SISubtarget &STI) {
4189 auto AMDGPUASI = STI.getAMDGPUAS();
4190 if (AS == AMDGPUASI.GLOBAL_ADDRESS) {
4191 // MUBUF instructions a 12-bit offset in bytes.
4192 return isUInt<12>(OffsetSize);
4194 if (AS == AMDGPUASI.CONSTANT_ADDRESS) {
4195 // SMRD instructions have an 8-bit offset in dwords on SI and
4196 // a 20-bit offset in bytes on VI.
4197 if (STI.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
4198 return isUInt<20>(OffsetSize);
4200 return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4);
4202 if (AS == AMDGPUASI.LOCAL_ADDRESS ||
4203 AS == AMDGPUASI.REGION_ADDRESS) {
4204 // The single offset versions have a 16-bit offset in bytes.
4205 return isUInt<16>(OffsetSize);
4207 // Indirect register addressing does not use any offsets.
4211 // (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
4213 // This is a variant of
4214 // (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
4216 // The normal DAG combiner will do this, but only if the add has one use since
4217 // that would increase the number of instructions.
4219 // This prevents us from seeing a constant offset that can be folded into a
4220 // memory instruction's addressing mode. If we know the resulting add offset of
4221 // a pointer can be folded into an addressing offset, we can replace the pointer
4222 // operand with the add of new constant offset. This eliminates one of the uses,
4223 // and may allow the remaining use to also be simplified.
4225 SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
4227 DAGCombinerInfo &DCI) const {
4228 SDValue N0 = N->getOperand(0);
4229 SDValue N1 = N->getOperand(1);
4231 if (N0.getOpcode() != ISD::ADD)
4234 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
4238 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
4242 // If the resulting offset is too large, we can't fold it into the addressing
4244 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
4245 if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *getSubtarget()))
4248 SelectionDAG &DAG = DCI.DAG;
4250 EVT VT = N->getValueType(0);
4252 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
4253 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
4255 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset);
4258 SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
4259 DAGCombinerInfo &DCI) const {
4260 SDValue Ptr = N->getBasePtr();
4261 SelectionDAG &DAG = DCI.DAG;
4264 // TODO: We could also do this for multiplies.
4265 unsigned AS = N->getAddressSpace();
4266 if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUASI.PRIVATE_ADDRESS) {
4267 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI);
4269 SmallVector<SDValue, 8> NewOps(N->op_begin(), N->op_end());
4271 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
4272 return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
4279 static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
4280 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
4281 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
4282 (Opc == ISD::XOR && Val == 0);
4285 // Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
4286 // will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
4287 // integer combine opportunities since most 64-bit operations are decomposed
4288 // this way. TODO: We won't want this for SALU especially if it is an inline
4290 SDValue SITargetLowering::splitBinaryBitConstantOp(
4291 DAGCombinerInfo &DCI,
4293 unsigned Opc, SDValue LHS,
4294 const ConstantSDNode *CRHS) const {
4295 uint64_t Val = CRHS->getZExtValue();
4296 uint32_t ValLo = Lo_32(Val);
4297 uint32_t ValHi = Hi_32(Val);
4298 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
4300 if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
4301 bitOpWithConstantIsReducible(Opc, ValHi)) ||
4302 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
4303 // If we need to materialize a 64-bit immediate, it will be split up later
4304 // anyway. Avoid creating the harder to understand 64-bit immediate
4306 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
4312 // Returns true if argument is a boolean value which is not serialized into
4313 // memory or argument and does not require v_cmdmask_b32 to be deserialized.
4314 static bool isBoolSGPR(SDValue V) {
4315 if (V.getValueType() != MVT::i1)
4317 switch (V.getOpcode()) {
4323 case AMDGPUISD::FP_CLASS:
4329 SDValue SITargetLowering::performAndCombine(SDNode *N,
4330 DAGCombinerInfo &DCI) const {
4331 if (DCI.isBeforeLegalize())
4334 SelectionDAG &DAG = DCI.DAG;
4335 EVT VT = N->getValueType(0);
4336 SDValue LHS = N->getOperand(0);
4337 SDValue RHS = N->getOperand(1);
4340 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
4341 if (VT == MVT::i64 && CRHS) {
4343 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
4347 if (CRHS && VT == MVT::i32) {
4348 // and (srl x, c), mask => shl (bfe x, nb + c, mask >> nb), nb
4349 // nb = number of trailing zeroes in mask
4350 // It can be optimized out using SDWA for GFX8+ in the SDWA peephole pass,
4351 // given that we are selecting 8 or 16 bit fields starting at byte boundary.
4352 uint64_t Mask = CRHS->getZExtValue();
4353 unsigned Bits = countPopulation(Mask);
4354 if (getSubtarget()->hasSDWA() && LHS->getOpcode() == ISD::SRL &&
4355 (Bits == 8 || Bits == 16) && isShiftedMask_64(Mask) && !(Mask & 1)) {
4356 if (auto *CShift = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
4357 unsigned Shift = CShift->getZExtValue();
4358 unsigned NB = CRHS->getAPIntValue().countTrailingZeros();
4359 unsigned Offset = NB + Shift;
4360 if ((Offset & (Bits - 1)) == 0) { // Starts at a byte or word boundary.
4362 SDValue BFE = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
4364 DAG.getConstant(Offset, SL, MVT::i32),
4365 DAG.getConstant(Bits, SL, MVT::i32));
4366 EVT NarrowVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
4367 SDValue Ext = DAG.getNode(ISD::AssertZext, SL, VT, BFE,
4368 DAG.getValueType(NarrowVT));
4369 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(LHS), VT, Ext,
4370 DAG.getConstant(NB, SDLoc(CRHS), MVT::i32));
4377 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
4378 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
4379 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
4380 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
4381 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
4383 SDValue X = LHS.getOperand(0);
4384 SDValue Y = RHS.getOperand(0);
4385 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
4388 if (LCC == ISD::SETO) {
4389 if (X != LHS.getOperand(1))
4392 if (RCC == ISD::SETUNE) {
4393 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
4394 if (!C1 || !C1->isInfinity() || C1->isNegative())
4397 const uint32_t Mask = SIInstrFlags::N_NORMAL |
4398 SIInstrFlags::N_SUBNORMAL |
4399 SIInstrFlags::N_ZERO |
4400 SIInstrFlags::P_ZERO |
4401 SIInstrFlags::P_SUBNORMAL |
4402 SIInstrFlags::P_NORMAL;
4404 static_assert(((~(SIInstrFlags::S_NAN |
4405 SIInstrFlags::Q_NAN |
4406 SIInstrFlags::N_INFINITY |
4407 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
4411 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
4412 X, DAG.getConstant(Mask, DL, MVT::i32));
4417 if (VT == MVT::i32 &&
4418 (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) {
4419 // and x, (sext cc from i1) => select cc, x, 0
4420 if (RHS.getOpcode() != ISD::SIGN_EXTEND)
4421 std::swap(LHS, RHS);
4422 if (isBoolSGPR(RHS.getOperand(0)))
4423 return DAG.getSelect(SDLoc(N), MVT::i32, RHS.getOperand(0),
4424 LHS, DAG.getConstant(0, SDLoc(N), MVT::i32));
4430 SDValue SITargetLowering::performOrCombine(SDNode *N,
4431 DAGCombinerInfo &DCI) const {
4432 SelectionDAG &DAG = DCI.DAG;
4433 SDValue LHS = N->getOperand(0);
4434 SDValue RHS = N->getOperand(1);
4436 EVT VT = N->getValueType(0);
4437 if (VT == MVT::i1) {
4438 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
4439 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
4440 RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
4441 SDValue Src = LHS.getOperand(0);
4442 if (Src != RHS.getOperand(0))
4445 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
4446 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
4450 // Only 10 bits are used.
4451 static const uint32_t MaxMask = 0x3ff;
4453 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
4455 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
4456 Src, DAG.getConstant(NewMask, DL, MVT::i32));
4465 // TODO: This could be a generic combine with a predicate for extracting the
4466 // high half of an integer being free.
4468 // (or i64:x, (zero_extend i32:y)) ->
4469 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
4470 if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
4471 RHS.getOpcode() != ISD::ZERO_EXTEND)
4472 std::swap(LHS, RHS);
4474 if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
4475 SDValue ExtSrc = RHS.getOperand(0);
4476 EVT SrcVT = ExtSrc.getValueType();
4477 if (SrcVT == MVT::i32) {
4479 SDValue LowLHS, HiBits;
4480 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
4481 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
4483 DCI.AddToWorklist(LowOr.getNode());
4484 DCI.AddToWorklist(HiBits.getNode());
4486 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
4488 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
4492 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
4495 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
4502 SDValue SITargetLowering::performXorCombine(SDNode *N,
4503 DAGCombinerInfo &DCI) const {
4504 EVT VT = N->getValueType(0);
4508 SDValue LHS = N->getOperand(0);
4509 SDValue RHS = N->getOperand(1);
4511 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
4514 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
4521 // Instructions that will be lowered with a final instruction that zeros the
4522 // high result bits.
4523 // XXX - probably only need to list legal operations.
4524 static bool fp16SrcZerosHighBits(unsigned Opc) {
4533 case ISD::FCANONICALIZE:
4535 case ISD::UINT_TO_FP:
4536 case ISD::SINT_TO_FP:
4538 // Fabs is lowered to a bit operation, but it's an and which will clear the
4539 // high bits anyway.
4553 case ISD::FNEARBYINT:
4558 case AMDGPUISD::FRACT:
4559 case AMDGPUISD::CLAMP:
4560 case AMDGPUISD::COS_HW:
4561 case AMDGPUISD::SIN_HW:
4562 case AMDGPUISD::FMIN3:
4563 case AMDGPUISD::FMAX3:
4564 case AMDGPUISD::FMED3:
4565 case AMDGPUISD::FMAD_FTZ:
4566 case AMDGPUISD::RCP:
4567 case AMDGPUISD::RSQ:
4568 case AMDGPUISD::LDEXP:
4571 // fcopysign, select and others may be lowered to 32-bit bit operations
4572 // which don't zero the high bits.
4577 SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
4578 DAGCombinerInfo &DCI) const {
4579 if (!Subtarget->has16BitInsts() ||
4580 DCI.getDAGCombineLevel() < AfterLegalizeDAG)
4583 EVT VT = N->getValueType(0);
4587 SDValue Src = N->getOperand(0);
4588 if (Src.getValueType() != MVT::i16)
4591 // (i32 zext (i16 (bitcast f16:$src))) -> fp16_zext $src
4592 // FIXME: It is not universally true that the high bits are zeroed on gfx9.
4593 if (Src.getOpcode() == ISD::BITCAST) {
4594 SDValue BCSrc = Src.getOperand(0);
4595 if (BCSrc.getValueType() == MVT::f16 &&
4596 fp16SrcZerosHighBits(BCSrc.getOpcode()))
4597 return DCI.DAG.getNode(AMDGPUISD::FP16_ZEXT, SDLoc(N), VT, BCSrc);
4603 SDValue SITargetLowering::performClassCombine(SDNode *N,
4604 DAGCombinerInfo &DCI) const {
4605 SelectionDAG &DAG = DCI.DAG;
4606 SDValue Mask = N->getOperand(1);
4608 // fp_class x, 0 -> false
4609 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
4610 if (CMask->isNullValue())
4611 return DAG.getConstant(0, SDLoc(N), MVT::i1);
4614 if (N->getOperand(0).isUndef())
4615 return DAG.getUNDEF(MVT::i1);
4620 static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) {
4621 if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions())
4624 return DAG.isKnownNeverNaN(Op);
4627 static bool isCanonicalized(SDValue Op, const SISubtarget *ST,
4628 unsigned MaxDepth=5) {
4629 // If source is a result of another standard FP operation it is already in
4632 switch (Op.getOpcode()) {
4636 // These will flush denorms if required.
4646 case ISD::FCANONICALIZE:
4650 return Op.getValueType().getScalarType() != MVT::f16 ||
4651 ST->hasFP16Denormals();
4653 case ISD::FP_EXTEND:
4654 return Op.getOperand(0).getValueType().getScalarType() != MVT::f16 ||
4655 ST->hasFP16Denormals();
4657 case ISD::FP16_TO_FP:
4658 case ISD::FP_TO_FP16:
4659 return ST->hasFP16Denormals();
4661 // It can/will be lowered or combined as a bit operation.
4662 // Need to check their input recursively to handle.
4665 return (MaxDepth > 0) &&
4666 isCanonicalized(Op.getOperand(0), ST, MaxDepth - 1);
4671 return Op.getValueType().getScalarType() != MVT::f16;
4673 // In pre-GFX9 targets V_MIN_F32 and others do not flush denorms.
4674 // For such targets need to check their input recursively.
4675 // TODO: on GFX9+ we could return true without checking provided no-nan
4676 // mode, since canonicalization is also used to quiet sNaNs.
4682 return (MaxDepth > 0) &&
4683 isCanonicalized(Op.getOperand(0), ST, MaxDepth - 1) &&
4684 isCanonicalized(Op.getOperand(1), ST, MaxDepth - 1);
4686 case ISD::ConstantFP: {
4687 auto F = cast<ConstantFPSDNode>(Op)->getValueAPF();
4688 return !F.isDenormal() && !(F.isNaN() && F.isSignaling());
4694 // Constant fold canonicalize.
4695 SDValue SITargetLowering::performFCanonicalizeCombine(
4697 DAGCombinerInfo &DCI) const {
4698 SelectionDAG &DAG = DCI.DAG;
4699 ConstantFPSDNode *CFP = isConstOrConstSplatFP(N->getOperand(0));
4702 SDValue N0 = N->getOperand(0);
4704 bool IsIEEEMode = Subtarget->enableIEEEBit(DAG.getMachineFunction());
4706 if ((IsIEEEMode || isKnownNeverSNan(DAG, N0)) &&
4707 isCanonicalized(N0, getSubtarget()))
4713 const APFloat &C = CFP->getValueAPF();
4715 // Flush denormals to 0 if not enabled.
4716 if (C.isDenormal()) {
4717 EVT VT = N->getValueType(0);
4718 EVT SVT = VT.getScalarType();
4719 if (SVT == MVT::f32 && !Subtarget->hasFP32Denormals())
4720 return DAG.getConstantFP(0.0, SDLoc(N), VT);
4722 if (SVT == MVT::f64 && !Subtarget->hasFP64Denormals())
4723 return DAG.getConstantFP(0.0, SDLoc(N), VT);
4725 if (SVT == MVT::f16 && !Subtarget->hasFP16Denormals())
4726 return DAG.getConstantFP(0.0, SDLoc(N), VT);
4730 EVT VT = N->getValueType(0);
4731 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
4732 if (C.isSignaling()) {
4733 // Quiet a signaling NaN.
4734 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
4737 // Make sure it is the canonical NaN bitpattern.
4739 // TODO: Can we use -1 as the canonical NaN value since it's an inline
4741 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
4742 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
4745 return N->getOperand(0);
4748 static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
4751 return AMDGPUISD::FMAX3;
4753 return AMDGPUISD::SMAX3;
4755 return AMDGPUISD::UMAX3;
4757 return AMDGPUISD::FMIN3;
4759 return AMDGPUISD::SMIN3;
4761 return AMDGPUISD::UMIN3;
4763 llvm_unreachable("Not a min/max opcode");
4767 SDValue SITargetLowering::performIntMed3ImmCombine(
4768 SelectionDAG &DAG, const SDLoc &SL,
4769 SDValue Op0, SDValue Op1, bool Signed) const {
4770 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
4774 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
4779 if (K0->getAPIntValue().sge(K1->getAPIntValue()))
4782 if (K0->getAPIntValue().uge(K1->getAPIntValue()))
4786 EVT VT = K0->getValueType(0);
4787 unsigned Med3Opc = Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3;
4788 if (VT == MVT::i32 || (VT == MVT::i16 && Subtarget->hasMed3_16())) {
4789 return DAG.getNode(Med3Opc, SL, VT,
4790 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
4793 // If there isn't a 16-bit med3 operation, convert to 32-bit.
4795 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
4797 SDValue Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
4798 SDValue Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
4799 SDValue Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
4801 SDValue Med3 = DAG.getNode(Med3Opc, SL, NVT, Tmp1, Tmp2, Tmp3);
4802 return DAG.getNode(ISD::TRUNCATE, SL, VT, Med3);
4805 SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
4808 SDValue Op1) const {
4809 ConstantFPSDNode *K1 = dyn_cast<ConstantFPSDNode>(Op1);
4813 ConstantFPSDNode *K0 = dyn_cast<ConstantFPSDNode>(Op0.getOperand(1));
4817 // Ordered >= (although NaN inputs should have folded away by now).
4818 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
4819 if (Cmp == APFloat::cmpGreaterThan)
4822 // TODO: Check IEEE bit enabled?
4823 EVT VT = K0->getValueType(0);
4824 if (Subtarget->enableDX10Clamp()) {
4825 // If dx10_clamp is enabled, NaNs clamp to 0.0. This is the same as the
4826 // hardware fmed3 behavior converting to a min.
4827 // FIXME: Should this be allowing -0.0?
4828 if (K1->isExactlyValue(1.0) && K0->isExactlyValue(0.0))
4829 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Op0.getOperand(0));
4832 // med3 for f16 is only available on gfx9+.
4833 if (VT == MVT::f64 || (VT == MVT::f16 && !Subtarget->hasMed3_16()))
4836 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
4837 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would then
4838 // give the other result, which is different from med3 with a NaN input.
4839 SDValue Var = Op0.getOperand(0);
4840 if (!isKnownNeverSNan(DAG, Var))
4843 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
4844 Var, SDValue(K0, 0), SDValue(K1, 0));
4847 SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
4848 DAGCombinerInfo &DCI) const {
4849 SelectionDAG &DAG = DCI.DAG;
4851 EVT VT = N->getValueType(0);
4852 unsigned Opc = N->getOpcode();
4853 SDValue Op0 = N->getOperand(0);
4854 SDValue Op1 = N->getOperand(1);
4856 // Only do this if the inner op has one use since this will just increases
4857 // register pressure for no benefit.
4860 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY &&
4862 ((VT != MVT::f16 && VT != MVT::i16) || Subtarget->hasMin3Max3_16())) {
4863 // max(max(a, b), c) -> max3(a, b, c)
4864 // min(min(a, b), c) -> min3(a, b, c)
4865 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
4867 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
4876 // max(a, max(b, c)) -> max3(a, b, c)
4877 // min(a, min(b, c)) -> min3(a, b, c)
4878 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
4880 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
4889 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
4890 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
4891 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
4895 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
4896 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
4900 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
4901 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
4902 (Opc == AMDGPUISD::FMIN_LEGACY &&
4903 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
4904 (VT == MVT::f32 || VT == MVT::f64 ||
4905 (VT == MVT::f16 && Subtarget->has16BitInsts())) &&
4907 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
4914 static bool isClampZeroToOne(SDValue A, SDValue B) {
4915 if (ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A)) {
4916 if (ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B)) {
4917 // FIXME: Should this be allowing -0.0?
4918 return (CA->isExactlyValue(0.0) && CB->isExactlyValue(1.0)) ||
4919 (CA->isExactlyValue(1.0) && CB->isExactlyValue(0.0));
4926 // FIXME: Should only worry about snans for version with chain.
4927 SDValue SITargetLowering::performFMed3Combine(SDNode *N,
4928 DAGCombinerInfo &DCI) const {
4929 EVT VT = N->getValueType(0);
4930 // v_med3_f32 and v_max_f32 behave identically wrt denorms, exceptions and
4931 // NaNs. With a NaN input, the order of the operands may change the result.
4933 SelectionDAG &DAG = DCI.DAG;
4936 SDValue Src0 = N->getOperand(0);
4937 SDValue Src1 = N->getOperand(1);
4938 SDValue Src2 = N->getOperand(2);
4940 if (isClampZeroToOne(Src0, Src1)) {
4941 // const_a, const_b, x -> clamp is safe in all cases including signaling
4943 // FIXME: Should this be allowing -0.0?
4944 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src2);
4947 // FIXME: dx10_clamp behavior assumed in instcombine. Should we really bother
4948 // handling no dx10-clamp?
4949 if (Subtarget->enableDX10Clamp()) {
4950 // If NaNs is clamped to 0, we are free to reorder the inputs.
4952 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
4953 std::swap(Src0, Src1);
4955 if (isa<ConstantFPSDNode>(Src1) && !isa<ConstantFPSDNode>(Src2))
4956 std::swap(Src1, Src2);
4958 if (isa<ConstantFPSDNode>(Src0) && !isa<ConstantFPSDNode>(Src1))
4959 std::swap(Src0, Src1);
4961 if (isClampZeroToOne(Src1, Src2))
4962 return DAG.getNode(AMDGPUISD::CLAMP, SL, VT, Src0);
4968 SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
4969 DAGCombinerInfo &DCI) const {
4970 SDValue Src0 = N->getOperand(0);
4971 SDValue Src1 = N->getOperand(1);
4972 if (Src0.isUndef() && Src1.isUndef())
4973 return DCI.DAG.getUNDEF(N->getValueType(0));
4977 SDValue SITargetLowering::performExtractVectorEltCombine(
4978 SDNode *N, DAGCombinerInfo &DCI) const {
4979 SDValue Vec = N->getOperand(0);
4981 SelectionDAG &DAG= DCI.DAG;
4982 if (Vec.getOpcode() == ISD::FNEG && allUsesHaveSourceMods(N)) {
4984 EVT EltVT = N->getValueType(0);
4985 SDValue Idx = N->getOperand(1);
4986 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
4987 Vec.getOperand(0), Idx);
4988 return DAG.getNode(ISD::FNEG, SL, EltVT, Elt);
4995 unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
4997 const SDNode *N1) const {
4998 EVT VT = N0->getValueType(0);
5000 // Only do this if we are not trying to support denormals. v_mad_f32 does not
5001 // support denormals ever.
5002 if ((VT == MVT::f32 && !Subtarget->hasFP32Denormals()) ||
5003 (VT == MVT::f16 && !Subtarget->hasFP16Denormals()))
5006 const TargetOptions &Options = DAG.getTarget().Options;
5007 if ((Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath ||
5008 (N0->getFlags().hasUnsafeAlgebra() &&
5009 N1->getFlags().hasUnsafeAlgebra())) &&
5010 isFMAFasterThanFMulAndFAdd(VT)) {
5017 SDValue SITargetLowering::performAddCombine(SDNode *N,
5018 DAGCombinerInfo &DCI) const {
5019 SelectionDAG &DAG = DCI.DAG;
5020 EVT VT = N->getValueType(0);
5026 SDValue LHS = N->getOperand(0);
5027 SDValue RHS = N->getOperand(1);
5029 // add x, zext (setcc) => addcarry x, 0, setcc
5030 // add x, sext (setcc) => subcarry x, 0, setcc
5031 unsigned Opc = LHS.getOpcode();
5032 if (Opc == ISD::ZERO_EXTEND || Opc == ISD::SIGN_EXTEND ||
5033 Opc == ISD::ANY_EXTEND || Opc == ISD::ADDCARRY)
5034 std::swap(RHS, LHS);
5036 Opc = RHS.getOpcode();
5039 case ISD::ZERO_EXTEND:
5040 case ISD::SIGN_EXTEND:
5041 case ISD::ANY_EXTEND: {
5042 auto Cond = RHS.getOperand(0);
5043 if (!isBoolSGPR(Cond))
5045 SDVTList VTList = DAG.getVTList(MVT::i32, MVT::i1);
5046 SDValue Args[] = { LHS, DAG.getConstant(0, SL, MVT::i32), Cond };
5047 Opc = (Opc == ISD::SIGN_EXTEND) ? ISD::SUBCARRY : ISD::ADDCARRY;
5048 return DAG.getNode(Opc, SL, VTList, Args);
5050 case ISD::ADDCARRY: {
5051 // add x, (addcarry y, 0, cc) => addcarry x, y, cc
5052 auto C = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
5053 if (!C || C->getZExtValue() != 0) break;
5054 SDValue Args[] = { LHS, RHS.getOperand(0), RHS.getOperand(2) };
5055 return DAG.getNode(ISD::ADDCARRY, SDLoc(N), RHS->getVTList(), Args);
5061 SDValue SITargetLowering::performSubCombine(SDNode *N,
5062 DAGCombinerInfo &DCI) const {
5063 SelectionDAG &DAG = DCI.DAG;
5064 EVT VT = N->getValueType(0);
5070 SDValue LHS = N->getOperand(0);
5071 SDValue RHS = N->getOperand(1);
5073 unsigned Opc = LHS.getOpcode();
5074 if (Opc != ISD::SUBCARRY)
5075 std::swap(RHS, LHS);
5077 if (LHS.getOpcode() == ISD::SUBCARRY) {
5078 // sub (subcarry x, 0, cc), y => subcarry x, y, cc
5079 auto C = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
5080 if (!C || C->getZExtValue() != 0)
5082 SDValue Args[] = { LHS.getOperand(0), RHS, LHS.getOperand(2) };
5083 return DAG.getNode(ISD::SUBCARRY, SDLoc(N), LHS->getVTList(), Args);
5088 SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
5089 DAGCombinerInfo &DCI) const {
5091 if (N->getValueType(0) != MVT::i32)
5094 auto C = dyn_cast<ConstantSDNode>(N->getOperand(1));
5095 if (!C || C->getZExtValue() != 0)
5098 SelectionDAG &DAG = DCI.DAG;
5099 SDValue LHS = N->getOperand(0);
5101 // addcarry (add x, y), 0, cc => addcarry x, y, cc
5102 // subcarry (sub x, y), 0, cc => subcarry x, y, cc
5103 unsigned LHSOpc = LHS.getOpcode();
5104 unsigned Opc = N->getOpcode();
5105 if ((LHSOpc == ISD::ADD && Opc == ISD::ADDCARRY) ||
5106 (LHSOpc == ISD::SUB && Opc == ISD::SUBCARRY)) {
5107 SDValue Args[] = { LHS.getOperand(0), LHS.getOperand(1), N->getOperand(2) };
5108 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), Args);
5113 SDValue SITargetLowering::performFAddCombine(SDNode *N,
5114 DAGCombinerInfo &DCI) const {
5115 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
5118 SelectionDAG &DAG = DCI.DAG;
5119 EVT VT = N->getValueType(0);
5122 SDValue LHS = N->getOperand(0);
5123 SDValue RHS = N->getOperand(1);
5125 // These should really be instruction patterns, but writing patterns with
5126 // source modiifiers is a pain.
5128 // fadd (fadd (a, a), b) -> mad 2.0, a, b
5129 if (LHS.getOpcode() == ISD::FADD) {
5130 SDValue A = LHS.getOperand(0);
5131 if (A == LHS.getOperand(1)) {
5132 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
5134 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
5135 return DAG.getNode(FusedOp, SL, VT, A, Two, RHS);
5140 // fadd (b, fadd (a, a)) -> mad 2.0, a, b
5141 if (RHS.getOpcode() == ISD::FADD) {
5142 SDValue A = RHS.getOperand(0);
5143 if (A == RHS.getOperand(1)) {
5144 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
5146 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
5147 return DAG.getNode(FusedOp, SL, VT, A, Two, LHS);
5155 SDValue SITargetLowering::performFSubCombine(SDNode *N,
5156 DAGCombinerInfo &DCI) const {
5157 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
5160 SelectionDAG &DAG = DCI.DAG;
5162 EVT VT = N->getValueType(0);
5163 assert(!VT.isVector());
5165 // Try to get the fneg to fold into the source modifier. This undoes generic
5166 // DAG combines and folds them into the mad.
5168 // Only do this if we are not trying to support denormals. v_mad_f32 does
5169 // not support denormals ever.
5170 SDValue LHS = N->getOperand(0);
5171 SDValue RHS = N->getOperand(1);
5172 if (LHS.getOpcode() == ISD::FADD) {
5173 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
5174 SDValue A = LHS.getOperand(0);
5175 if (A == LHS.getOperand(1)) {
5176 unsigned FusedOp = getFusedOpcode(DAG, N, LHS.getNode());
5178 const SDValue Two = DAG.getConstantFP(2.0, SL, VT);
5179 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
5181 return DAG.getNode(FusedOp, SL, VT, A, Two, NegRHS);
5186 if (RHS.getOpcode() == ISD::FADD) {
5187 // (fsub c, (fadd a, a)) -> mad -2.0, a, c
5189 SDValue A = RHS.getOperand(0);
5190 if (A == RHS.getOperand(1)) {
5191 unsigned FusedOp = getFusedOpcode(DAG, N, RHS.getNode());
5193 const SDValue NegTwo = DAG.getConstantFP(-2.0, SL, VT);
5194 return DAG.getNode(FusedOp, SL, VT, A, NegTwo, LHS);
5202 SDValue SITargetLowering::performSetCCCombine(SDNode *N,
5203 DAGCombinerInfo &DCI) const {
5204 SelectionDAG &DAG = DCI.DAG;
5207 SDValue LHS = N->getOperand(0);
5208 SDValue RHS = N->getOperand(1);
5209 EVT VT = LHS.getValueType();
5210 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
5212 auto CRHS = dyn_cast<ConstantSDNode>(RHS);
5214 CRHS = dyn_cast<ConstantSDNode>(LHS);
5216 std::swap(LHS, RHS);
5217 CC = getSetCCSwappedOperands(CC);
5221 if (CRHS && VT == MVT::i32 && LHS.getOpcode() == ISD::SIGN_EXTEND &&
5222 isBoolSGPR(LHS.getOperand(0))) {
5223 // setcc (sext from i1 cc), -1, ne|sgt|ult) => not cc => xor cc, -1
5224 // setcc (sext from i1 cc), -1, eq|sle|uge) => cc
5225 // setcc (sext from i1 cc), 0, eq|sge|ule) => not cc => xor cc, -1
5226 // setcc (sext from i1 cc), 0, ne|ugt|slt) => cc
5227 if ((CRHS->isAllOnesValue() &&
5228 (CC == ISD::SETNE || CC == ISD::SETGT || CC == ISD::SETULT)) ||
5229 (CRHS->isNullValue() &&
5230 (CC == ISD::SETEQ || CC == ISD::SETGE || CC == ISD::SETULE)))
5231 return DAG.getNode(ISD::XOR, SL, MVT::i1, LHS.getOperand(0),
5232 DAG.getConstant(-1, SL, MVT::i1));
5233 if ((CRHS->isAllOnesValue() &&
5234 (CC == ISD::SETEQ || CC == ISD::SETLE || CC == ISD::SETUGE)) ||
5235 (CRHS->isNullValue() &&
5236 (CC == ISD::SETNE || CC == ISD::SETUGT || CC == ISD::SETLT)))
5237 return LHS.getOperand(0);
5240 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
5244 // Match isinf pattern
5245 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
5246 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) {
5247 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
5251 const APFloat &APF = CRHS->getValueAPF();
5252 if (APF.isInfinity() && !APF.isNegative()) {
5253 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY;
5254 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
5255 DAG.getConstant(Mask, SL, MVT::i32));
5262 SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
5263 DAGCombinerInfo &DCI) const {
5264 SelectionDAG &DAG = DCI.DAG;
5266 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
5268 SDValue Src = N->getOperand(0);
5269 SDValue Srl = N->getOperand(0);
5270 if (Srl.getOpcode() == ISD::ZERO_EXTEND)
5271 Srl = Srl.getOperand(0);
5273 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
5274 if (Srl.getOpcode() == ISD::SRL) {
5275 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
5276 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
5277 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
5279 if (const ConstantSDNode *C =
5280 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
5281 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
5284 unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
5285 if (SrcOffset < 32 && SrcOffset % 8 == 0) {
5286 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, SL,
5292 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
5295 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
5296 !DCI.isBeforeLegalizeOps());
5297 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5298 if (TLI.ShrinkDemandedConstant(Src, Demanded, TLO) ||
5299 TLI.SimplifyDemandedBits(Src, Demanded, Known, TLO)) {
5300 DCI.CommitTargetLoweringOpt(TLO);
5306 SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
5307 DAGCombinerInfo &DCI) const {
5308 switch (N->getOpcode()) {
5310 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
5312 return performAddCombine(N, DCI);
5314 return performSubCombine(N, DCI);
5317 return performAddCarrySubCarryCombine(N, DCI);
5319 return performFAddCombine(N, DCI);
5321 return performFSubCombine(N, DCI);
5323 return performSetCCCombine(N, DCI);
5330 case AMDGPUISD::FMIN_LEGACY:
5331 case AMDGPUISD::FMAX_LEGACY: {
5332 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG &&
5333 getTargetMachine().getOptLevel() > CodeGenOpt::None)
5334 return performMinMaxCombine(N, DCI);
5339 case ISD::ATOMIC_LOAD:
5340 case ISD::ATOMIC_STORE:
5341 case ISD::ATOMIC_CMP_SWAP:
5342 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
5343 case ISD::ATOMIC_SWAP:
5344 case ISD::ATOMIC_LOAD_ADD:
5345 case ISD::ATOMIC_LOAD_SUB:
5346 case ISD::ATOMIC_LOAD_AND:
5347 case ISD::ATOMIC_LOAD_OR:
5348 case ISD::ATOMIC_LOAD_XOR:
5349 case ISD::ATOMIC_LOAD_NAND:
5350 case ISD::ATOMIC_LOAD_MIN:
5351 case ISD::ATOMIC_LOAD_MAX:
5352 case ISD::ATOMIC_LOAD_UMIN:
5353 case ISD::ATOMIC_LOAD_UMAX:
5354 case AMDGPUISD::ATOMIC_INC:
5355 case AMDGPUISD::ATOMIC_DEC: // TODO: Target mem intrinsics.
5356 if (DCI.isBeforeLegalize())
5358 return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
5360 return performAndCombine(N, DCI);
5362 return performOrCombine(N, DCI);
5364 return performXorCombine(N, DCI);
5365 case ISD::ZERO_EXTEND:
5366 return performZeroExtendCombine(N, DCI);
5367 case AMDGPUISD::FP_CLASS:
5368 return performClassCombine(N, DCI);
5369 case ISD::FCANONICALIZE:
5370 return performFCanonicalizeCombine(N, DCI);
5371 case AMDGPUISD::FRACT:
5372 case AMDGPUISD::RCP:
5373 case AMDGPUISD::RSQ:
5374 case AMDGPUISD::RCP_LEGACY:
5375 case AMDGPUISD::RSQ_LEGACY:
5376 case AMDGPUISD::RSQ_CLAMP:
5377 case AMDGPUISD::LDEXP: {
5378 SDValue Src = N->getOperand(0);
5383 case ISD::SINT_TO_FP:
5384 case ISD::UINT_TO_FP:
5385 return performUCharToFloatCombine(N, DCI);
5386 case AMDGPUISD::CVT_F32_UBYTE0:
5387 case AMDGPUISD::CVT_F32_UBYTE1:
5388 case AMDGPUISD::CVT_F32_UBYTE2:
5389 case AMDGPUISD::CVT_F32_UBYTE3:
5390 return performCvtF32UByteNCombine(N, DCI);
5391 case AMDGPUISD::FMED3:
5392 return performFMed3Combine(N, DCI);
5393 case AMDGPUISD::CVT_PKRTZ_F16_F32:
5394 return performCvtPkRTZCombine(N, DCI);
5395 case ISD::SCALAR_TO_VECTOR: {
5396 SelectionDAG &DAG = DCI.DAG;
5397 EVT VT = N->getValueType(0);
5399 // v2i16 (scalar_to_vector i16:x) -> v2i16 (bitcast (any_extend i16:x))
5400 if (VT == MVT::v2i16 || VT == MVT::v2f16) {
5402 SDValue Src = N->getOperand(0);
5403 EVT EltVT = Src.getValueType();
5404 if (EltVT == MVT::f16)
5405 Src = DAG.getNode(ISD::BITCAST, SL, MVT::i16, Src);
5407 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SL, MVT::i32, Src);
5408 return DAG.getNode(ISD::BITCAST, SL, VT, Ext);
5413 case ISD::EXTRACT_VECTOR_ELT:
5414 return performExtractVectorEltCombine(N, DCI);
5416 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
5419 /// \brief Helper function for adjustWritemask
5420 static unsigned SubIdx2Lane(unsigned Idx) {
5423 case AMDGPU::sub0: return 0;
5424 case AMDGPU::sub1: return 1;
5425 case AMDGPU::sub2: return 2;
5426 case AMDGPU::sub3: return 3;
5430 /// \brief Adjust the writemask of MIMG instructions
5431 void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
5432 SelectionDAG &DAG) const {
5433 SDNode *Users[4] = { };
5435 unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3;
5436 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
5437 unsigned NewDmask = 0;
5439 // Try to figure out the used register components
5440 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
5443 // Don't look at users of the chain.
5444 if (I.getUse().getResNo() != 0)
5447 // Abort if we can't understand the usage
5448 if (!I->isMachineOpcode() ||
5449 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
5452 // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used.
5453 // Note that subregs are packed, i.e. Lane==0 is the first bit set
5454 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
5456 Lane = SubIdx2Lane(I->getConstantOperandVal(1));
5458 // Set which texture component corresponds to the lane.
5460 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) {
5462 Comp = countTrailingZeros(Dmask);
5463 Dmask &= ~(1 << Comp);
5466 // Abort if we have more than one user per component
5471 NewDmask |= 1 << Comp;
5474 // Abort if there's no change
5475 if (NewDmask == OldDmask)
5478 // Adjust the writemask in the node
5479 std::vector<SDValue> Ops;
5480 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
5481 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
5482 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
5483 Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops);
5485 // If we only got one lane, replace it with a copy
5486 // (if NewDmask has only one bit set...)
5487 if (NewDmask && (NewDmask & (NewDmask-1)) == 0) {
5488 SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, SDLoc(),
5490 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
5491 SDLoc(), Users[Lane]->getValueType(0),
5492 SDValue(Node, 0), RC);
5493 DAG.ReplaceAllUsesWith(Users[Lane], Copy);
5497 // Update the users of the node with the new indices
5498 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) {
5499 SDNode *User = Users[i];
5503 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
5504 DAG.UpdateNodeOperands(User, User->getOperand(0), Op);
5508 case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
5509 case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
5510 case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
5515 static bool isFrameIndexOp(SDValue Op) {
5516 if (Op.getOpcode() == ISD::AssertZext)
5517 Op = Op.getOperand(0);
5519 return isa<FrameIndexSDNode>(Op);
5522 /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG)
5523 /// with frame index operands.
5524 /// LLVM assumes that inputs are to these instructions are registers.
5525 SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
5526 SelectionDAG &DAG) const {
5527 if (Node->getOpcode() == ISD::CopyToReg) {
5528 RegisterSDNode *DestReg = cast<RegisterSDNode>(Node->getOperand(1));
5529 SDValue SrcVal = Node->getOperand(2);
5531 // Insert a copy to a VReg_1 virtual register so LowerI1Copies doesn't have
5532 // to try understanding copies to physical registers.
5533 if (SrcVal.getValueType() == MVT::i1 &&
5534 TargetRegisterInfo::isPhysicalRegister(DestReg->getReg())) {
5536 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
5537 SDValue VReg = DAG.getRegister(
5538 MRI.createVirtualRegister(&AMDGPU::VReg_1RegClass), MVT::i1);
5540 SDNode *Glued = Node->getGluedNode();
5542 = DAG.getCopyToReg(Node->getOperand(0), SL, VReg, SrcVal,
5543 SDValue(Glued, Glued ? Glued->getNumValues() - 1 : 0));
5545 = DAG.getCopyToReg(ToVReg, SL, SDValue(DestReg, 0),
5546 VReg, ToVReg.getValue(1));
5547 DAG.ReplaceAllUsesWith(Node, ToResultReg.getNode());
5548 DAG.RemoveDeadNode(Node);
5549 return ToResultReg.getNode();
5553 SmallVector<SDValue, 8> Ops;
5554 for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
5555 if (!isFrameIndexOp(Node->getOperand(i))) {
5556 Ops.push_back(Node->getOperand(i));
5561 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
5562 Node->getOperand(i).getValueType(),
5563 Node->getOperand(i)), 0));
5566 DAG.UpdateNodeOperands(Node, Ops);
5570 /// \brief Fold the instructions after selecting them.
5571 SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
5572 SelectionDAG &DAG) const {
5573 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
5574 unsigned Opcode = Node->getMachineOpcode();
5576 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
5577 !TII->isGather4(Opcode))
5578 adjustWritemask(Node, DAG);
5580 if (Opcode == AMDGPU::INSERT_SUBREG ||
5581 Opcode == AMDGPU::REG_SEQUENCE) {
5582 legalizeTargetIndependentNode(Node, DAG);
5588 /// \brief Assign the register class depending on the number of
5589 /// bits set in the writemask
5590 void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
5591 SDNode *Node) const {
5592 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
5594 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
5596 if (TII->isVOP3(MI.getOpcode())) {
5597 // Make sure constant bus requirements are respected.
5598 TII->legalizeOperandsVOP3(MRI, MI);
5602 if (TII->isMIMG(MI)) {
5603 unsigned VReg = MI.getOperand(0).getReg();
5604 const TargetRegisterClass *RC = MRI.getRegClass(VReg);
5605 // TODO: Need mapping tables to handle other cases (register classes).
5606 if (RC != &AMDGPU::VReg_128RegClass)
5609 unsigned DmaskIdx = MI.getNumOperands() == 12 ? 3 : 4;
5610 unsigned Writemask = MI.getOperand(DmaskIdx).getImm();
5611 unsigned BitsSet = 0;
5612 for (unsigned i = 0; i < 4; ++i)
5613 BitsSet += Writemask & (1 << i) ? 1 : 0;
5616 case 1: RC = &AMDGPU::VGPR_32RegClass; break;
5617 case 2: RC = &AMDGPU::VReg_64RegClass; break;
5618 case 3: RC = &AMDGPU::VReg_96RegClass; break;
5621 unsigned NewOpcode = TII->getMaskedMIMGOp(MI.getOpcode(), BitsSet);
5622 MI.setDesc(TII->get(NewOpcode));
5623 MRI.setRegClass(VReg, RC);
5627 // Replace unused atomics with the no return version.
5628 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
5629 if (NoRetAtomicOp != -1) {
5630 if (!Node->hasAnyUseOfValue(0)) {
5631 MI.setDesc(TII->get(NoRetAtomicOp));
5632 MI.RemoveOperand(0);
5636 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
5637 // instruction, because the return type of these instructions is a vec2 of
5638 // the memory type, so it can be tied to the input operand.
5639 // This means these instructions always have a use, so we need to add a
5640 // special case to check if the atomic has only one extract_subreg use,
5641 // which itself has no uses.
5642 if ((Node->hasNUsesOfValue(1, 0) &&
5643 Node->use_begin()->isMachineOpcode() &&
5644 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
5645 !Node->use_begin()->hasAnyUseOfValue(0))) {
5646 unsigned Def = MI.getOperand(0).getReg();
5648 // Change this into a noret atomic.
5649 MI.setDesc(TII->get(NoRetAtomicOp));
5650 MI.RemoveOperand(0);
5652 // If we only remove the def operand from the atomic instruction, the
5653 // extract_subreg will be left with a use of a vreg without a def.
5654 // So we need to insert an implicit_def to avoid machine verifier
5656 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
5657 TII->get(AMDGPU::IMPLICIT_DEF), Def);
5663 static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
5665 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
5666 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
5669 MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
5671 SDValue Ptr) const {
5672 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
5674 // Build the half of the subregister with the constants before building the
5675 // full 128-bit register. If we are building multiple resource descriptors,
5676 // this will allow CSEing of the 2-component register.
5677 const SDValue Ops0[] = {
5678 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
5679 buildSMovImm32(DAG, DL, 0),
5680 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
5681 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
5682 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
5685 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
5686 MVT::v2i32, Ops0), 0);
5688 // Combine the constants and the pointer.
5689 const SDValue Ops1[] = {
5690 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
5692 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
5694 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
5697 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
5700 /// \brief Return a resource descriptor with the 'Add TID' bit enabled
5701 /// The TID (Thread ID) is multiplied by the stride value (bits [61:48]
5702 /// of the resource descriptor) to create an offset, which is added to
5703 /// the resource pointer.
5704 MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
5705 SDValue Ptr, uint32_t RsrcDword1,
5706 uint64_t RsrcDword2And3) const {
5707 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
5708 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
5710 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
5711 DAG.getConstant(RsrcDword1, DL, MVT::i32)),
5715 SDValue DataLo = buildSMovImm32(DAG, DL,
5716 RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
5717 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
5719 const SDValue Ops[] = {
5720 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
5722 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
5724 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
5726 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
5728 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
5731 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
5734 //===----------------------------------------------------------------------===//
5735 // SI Inline Assembly Support
5736 //===----------------------------------------------------------------------===//
5738 std::pair<unsigned, const TargetRegisterClass *>
5739 SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
5740 StringRef Constraint,
5742 if (!isTypeLegal(VT))
5743 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
5745 if (Constraint.size() == 1) {
5746 switch (Constraint[0]) {
5749 switch (VT.getSizeInBits()) {
5751 return std::make_pair(0U, nullptr);
5754 return std::make_pair(0U, &AMDGPU::SReg_32_XM0RegClass);
5756 return std::make_pair(0U, &AMDGPU::SGPR_64RegClass);
5758 return std::make_pair(0U, &AMDGPU::SReg_128RegClass);
5760 return std::make_pair(0U, &AMDGPU::SReg_256RegClass);
5762 return std::make_pair(0U, &AMDGPU::SReg_512RegClass);
5766 switch (VT.getSizeInBits()) {
5768 return std::make_pair(0U, nullptr);
5771 return std::make_pair(0U, &AMDGPU::VGPR_32RegClass);
5773 return std::make_pair(0U, &AMDGPU::VReg_64RegClass);
5775 return std::make_pair(0U, &AMDGPU::VReg_96RegClass);
5777 return std::make_pair(0U, &AMDGPU::VReg_128RegClass);
5779 return std::make_pair(0U, &AMDGPU::VReg_256RegClass);
5781 return std::make_pair(0U, &AMDGPU::VReg_512RegClass);
5786 if (Constraint.size() > 1) {
5787 const TargetRegisterClass *RC = nullptr;
5788 if (Constraint[1] == 'v') {
5789 RC = &AMDGPU::VGPR_32RegClass;
5790 } else if (Constraint[1] == 's') {
5791 RC = &AMDGPU::SGPR_32RegClass;
5796 bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
5797 if (!Failed && Idx < RC->getNumRegs())
5798 return std::make_pair(RC->getRegister(Idx), RC);
5801 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
5804 SITargetLowering::ConstraintType
5805 SITargetLowering::getConstraintType(StringRef Constraint) const {
5806 if (Constraint.size() == 1) {
5807 switch (Constraint[0]) {
5811 return C_RegisterClass;
5814 return TargetLowering::getConstraintType(Constraint);