1 //===-- AMDGPUISelDAGToDAG.cpp - A dag to dag inst selector for AMDGPU ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //==-----------------------------------------------------------------------===//
11 /// \brief Defines an instruction selector for the AMDGPU target.
13 //===----------------------------------------------------------------------===//
16 #include "AMDGPUInstrInfo.h"
17 #include "AMDGPURegisterInfo.h"
18 #include "AMDGPUISelLowering.h" // For AMDGPUISD
19 #include "AMDGPUSubtarget.h"
20 #include "SIDefines.h"
21 #include "SIInstrInfo.h"
22 #include "SIRegisterInfo.h"
23 #include "SIISelLowering.h"
24 #include "SIMachineFunctionInfo.h"
25 #include "llvm/ADT/APInt.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/StringRef.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/CodeGen/FunctionLoweringInfo.h"
30 #include "llvm/CodeGen/ISDOpcodes.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/MachineValueType.h"
34 #include "llvm/CodeGen/SelectionDAG.h"
35 #include "llvm/CodeGen/SelectionDAGISel.h"
36 #include "llvm/CodeGen/SelectionDAGNodes.h"
37 #include "llvm/CodeGen/ValueTypes.h"
38 #include "llvm/IR/BasicBlock.h"
39 #include "llvm/IR/Instruction.h"
40 #include "llvm/MC/MCInstrDesc.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/CodeGen.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include "llvm/Support/MathExtras.h"
56 } // end namespace llvm
58 //===----------------------------------------------------------------------===//
59 // Instruction Selector Implementation
60 //===----------------------------------------------------------------------===//
64 /// AMDGPU specific code to select AMDGPU machine instructions for
65 /// SelectionDAG operations.
66 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
67 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
68 // make the right decision when generating code for different targets.
69 const AMDGPUSubtarget *Subtarget;
72 explicit AMDGPUDAGToDAGISel(TargetMachine &TM, CodeGenOpt::Level OptLevel)
73 : SelectionDAGISel(TM, OptLevel) {}
74 ~AMDGPUDAGToDAGISel() override = default;
76 bool runOnMachineFunction(MachineFunction &MF) override;
77 void Select(SDNode *N) override;
78 StringRef getPassName() const override;
79 void PostprocessISelDAG() override;
82 SDValue foldFrameIndex(SDValue N) const;
83 bool isInlineImmediate(const SDNode *N) const;
84 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
85 const R600InstrInfo *TII);
86 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
87 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
89 bool isConstantLoad(const MemSDNode *N, int cbID) const;
90 bool isUniformBr(const SDNode *N) const;
92 SDNode *glueCopyToM0(SDNode *N) const;
94 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
95 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
96 bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
98 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
99 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
100 bool isDSOffsetLegal(const SDValue &Base, unsigned Offset,
101 unsigned OffsetBits) const;
102 bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const;
103 bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0,
104 SDValue &Offset1) const;
105 bool SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
106 SDValue &SOffset, SDValue &Offset, SDValue &Offen,
107 SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC,
109 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
110 SDValue &SOffset, SDValue &Offset, SDValue &GLC,
111 SDValue &SLC, SDValue &TFE) const;
112 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
113 SDValue &VAddr, SDValue &SOffset, SDValue &Offset,
115 bool SelectMUBUFScratch(SDValue Addr, SDValue &RSrc, SDValue &VAddr,
116 SDValue &SOffset, SDValue &ImmOffset) const;
117 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset,
118 SDValue &Offset, SDValue &GLC, SDValue &SLC,
120 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
121 SDValue &Offset, SDValue &SLC) const;
122 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
123 SDValue &Offset) const;
124 bool SelectMUBUFConstant(SDValue Constant,
126 SDValue &ImmOffset) const;
127 bool SelectMUBUFIntrinsicOffset(SDValue Offset, SDValue &SOffset,
128 SDValue &ImmOffset) const;
129 bool SelectMUBUFIntrinsicVOffset(SDValue Offset, SDValue &SOffset,
130 SDValue &ImmOffset, SDValue &VOffset) const;
132 bool SelectFlat(SDValue Addr, SDValue &VAddr,
133 SDValue &SLC, SDValue &TFE) const;
135 bool SelectSMRDOffset(SDValue ByteOffsetNode, SDValue &Offset,
137 bool SelectSMRD(SDValue Addr, SDValue &SBase, SDValue &Offset,
139 bool SelectSMRDImm(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
140 bool SelectSMRDImm32(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
141 bool SelectSMRDSgpr(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
142 bool SelectSMRDBufferImm(SDValue Addr, SDValue &Offset) const;
143 bool SelectSMRDBufferImm32(SDValue Addr, SDValue &Offset) const;
144 bool SelectSMRDBufferSgpr(SDValue Addr, SDValue &Offset) const;
145 bool SelectMOVRELOffset(SDValue Index, SDValue &Base, SDValue &Offset) const;
146 bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
147 bool SelectVOP3NoMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
148 bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
149 SDValue &Clamp, SDValue &Omod) const;
150 bool SelectVOP3NoMods0(SDValue In, SDValue &Src, SDValue &SrcMods,
151 SDValue &Clamp, SDValue &Omod) const;
153 bool SelectVOP3Mods0Clamp(SDValue In, SDValue &Src, SDValue &SrcMods,
154 SDValue &Omod) const;
155 bool SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src, SDValue &SrcMods,
157 SDValue &Omod) const;
159 void SelectADD_SUB_I64(SDNode *N);
160 void SelectDIV_SCALE(SDNode *N);
161 void SelectFMA_W_CHAIN(SDNode *N);
162 void SelectFMUL_W_CHAIN(SDNode *N);
164 SDNode *getS_BFE(unsigned Opcode, const SDLoc &DL, SDValue Val,
165 uint32_t Offset, uint32_t Width);
166 void SelectS_BFEFromShifts(SDNode *N);
167 void SelectS_BFE(SDNode *N);
168 bool isCBranchSCC(const SDNode *N) const;
169 void SelectBRCOND(SDNode *N);
170 void SelectATOMIC_CMP_SWAP(SDNode *N);
172 // Include the pieces autogenerated from the target description.
173 #include "AMDGPUGenDAGISel.inc"
176 } // end anonymous namespace
178 /// \brief This pass converts a legalized DAG into a AMDGPU-specific
179 // DAG, ready for instruction scheduling.
180 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM,
181 CodeGenOpt::Level OptLevel) {
182 return new AMDGPUDAGToDAGISel(TM, OptLevel);
185 bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
186 Subtarget = &MF.getSubtarget<AMDGPUSubtarget>();
187 return SelectionDAGISel::runOnMachineFunction(MF);
190 bool AMDGPUDAGToDAGISel::isInlineImmediate(const SDNode *N) const {
191 const SIInstrInfo *TII
192 = static_cast<const SISubtarget *>(Subtarget)->getInstrInfo();
194 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N))
195 return TII->isInlineConstant(C->getAPIntValue());
197 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N))
198 return TII->isInlineConstant(C->getValueAPF().bitcastToAPInt());
203 /// \brief Determine the register class for \p OpNo
204 /// \returns The register class of the virtual register that will be used for
205 /// the given operand number \OpNo or NULL if the register class cannot be
207 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
208 unsigned OpNo) const {
209 if (!N->isMachineOpcode()) {
210 if (N->getOpcode() == ISD::CopyToReg) {
211 unsigned Reg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
212 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
213 MachineRegisterInfo &MRI = CurDAG->getMachineFunction().getRegInfo();
214 return MRI.getRegClass(Reg);
217 const SIRegisterInfo *TRI
218 = static_cast<const SISubtarget *>(Subtarget)->getRegisterInfo();
219 return TRI->getPhysRegClass(Reg);
225 switch (N->getMachineOpcode()) {
227 const MCInstrDesc &Desc =
228 Subtarget->getInstrInfo()->get(N->getMachineOpcode());
229 unsigned OpIdx = Desc.getNumDefs() + OpNo;
230 if (OpIdx >= Desc.getNumOperands())
232 int RegClass = Desc.OpInfo[OpIdx].RegClass;
236 return Subtarget->getRegisterInfo()->getRegClass(RegClass);
238 case AMDGPU::REG_SEQUENCE: {
239 unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
240 const TargetRegisterClass *SuperRC =
241 Subtarget->getRegisterInfo()->getRegClass(RCID);
243 SDValue SubRegOp = N->getOperand(OpNo + 1);
244 unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
245 return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
251 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0(SDNode *N) const {
252 if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
253 cast<MemSDNode>(N)->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
256 const SITargetLowering& Lowering =
257 *static_cast<const SITargetLowering*>(getTargetLowering());
259 // Write max value to m0 before each load operation
261 SDValue M0 = Lowering.copyToM0(*CurDAG, CurDAG->getEntryNode(), SDLoc(N),
262 CurDAG->getTargetConstant(-1, SDLoc(N), MVT::i32));
264 SDValue Glue = M0.getValue(1);
266 SmallVector <SDValue, 8> Ops;
267 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
268 Ops.push_back(N->getOperand(i));
271 CurDAG->MorphNodeTo(N, N->getOpcode(), N->getVTList(), Ops);
276 static unsigned selectSGPRVectorRegClassID(unsigned NumVectorElts) {
277 switch (NumVectorElts) {
279 return AMDGPU::SReg_32_XM0RegClassID;
281 return AMDGPU::SReg_64RegClassID;
283 return AMDGPU::SReg_128RegClassID;
285 return AMDGPU::SReg_256RegClassID;
287 return AMDGPU::SReg_512RegClassID;
290 llvm_unreachable("invalid vector size");
293 void AMDGPUDAGToDAGISel::Select(SDNode *N) {
294 unsigned int Opc = N->getOpcode();
295 if (N->isMachineOpcode()) {
297 return; // Already selected.
300 if (isa<AtomicSDNode>(N) ||
301 (Opc == AMDGPUISD::ATOMIC_INC || Opc == AMDGPUISD::ATOMIC_DEC))
306 // We are selecting i64 ADD here instead of custom lower it during
307 // DAG legalization, so we can fold some i64 ADDs used for address
308 // calculation into the LOAD and STORE instructions.
315 if (N->getValueType(0) != MVT::i64 ||
316 Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
319 SelectADD_SUB_I64(N);
322 case AMDGPUISD::FMUL_W_CHAIN: {
323 SelectFMUL_W_CHAIN(N);
326 case AMDGPUISD::FMA_W_CHAIN: {
327 SelectFMA_W_CHAIN(N);
331 case ISD::SCALAR_TO_VECTOR:
332 case AMDGPUISD::BUILD_VERTICAL_VECTOR:
333 case ISD::BUILD_VECTOR: {
335 const AMDGPURegisterInfo *TRI = Subtarget->getRegisterInfo();
336 EVT VT = N->getValueType(0);
337 unsigned NumVectorElts = VT.getVectorNumElements();
338 EVT EltVT = VT.getVectorElementType();
339 assert(EltVT.bitsEq(MVT::i32));
340 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
341 RegClassID = selectSGPRVectorRegClassID(NumVectorElts);
343 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
344 // that adds a 128 bits reg copy when going through TwoAddressInstructions
345 // pass. We want to avoid 128 bits copies as much as possible because they
346 // can't be bundled by our scheduler.
347 switch(NumVectorElts) {
348 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
350 if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
351 RegClassID = AMDGPU::R600_Reg128VerticalRegClassID;
353 RegClassID = AMDGPU::R600_Reg128RegClassID;
355 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
360 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
362 if (NumVectorElts == 1) {
363 CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, N->getOperand(0),
368 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
370 // 16 = Max Num Vector Elements
371 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
372 // 1 = Vector Register Class
373 SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
375 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
376 bool IsRegSeq = true;
377 unsigned NOps = N->getNumOperands();
378 for (unsigned i = 0; i < NOps; i++) {
379 // XXX: Why is this here?
380 if (isa<RegisterSDNode>(N->getOperand(i))) {
384 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
385 RegSeqArgs[1 + (2 * i) + 1] =
386 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), DL,
390 if (NOps != NumVectorElts) {
391 // Fill in the missing undef elements if this was a scalar_to_vector.
392 assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
394 MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
396 for (unsigned i = NOps; i < NumVectorElts; ++i) {
397 RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
398 RegSeqArgs[1 + (2 * i) + 1] =
399 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), DL, MVT::i32);
405 CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), RegSeqArgs);
408 case ISD::BUILD_PAIR: {
409 SDValue RC, SubReg0, SubReg1;
410 if (Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
414 if (N->getValueType(0) == MVT::i128) {
415 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32);
416 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32);
417 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32);
418 } else if (N->getValueType(0) == MVT::i64) {
419 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32);
420 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
421 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
423 llvm_unreachable("Unhandled value type for BUILD_PAIR");
425 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
426 N->getOperand(1), SubReg1 };
427 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
428 N->getValueType(0), Ops));
433 case ISD::ConstantFP: {
434 if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
435 N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
439 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
440 Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
442 ConstantSDNode *C = cast<ConstantSDNode>(N);
443 Imm = C->getZExtValue();
447 SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
448 CurDAG->getConstant(Imm & 0xFFFFFFFF, DL,
450 SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
451 CurDAG->getConstant(Imm >> 32, DL, MVT::i32));
452 const SDValue Ops[] = {
453 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
454 SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
455 SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
458 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
459 N->getValueType(0), Ops));
468 case AMDGPUISD::BFE_I32:
469 case AMDGPUISD::BFE_U32: {
470 if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
473 // There is a scalar version available, but unlike the vector version which
474 // has a separate operand for the offset and width, the scalar version packs
475 // the width and offset into a single operand. Try to move to the scalar
476 // version if the offsets are constant, so that we can try to keep extended
477 // loads of kernel arguments in SGPRs.
479 // TODO: Technically we could try to pattern match scalar bitshifts of
480 // dynamic values, but it's probably not useful.
481 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
485 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
489 bool Signed = Opc == AMDGPUISD::BFE_I32;
491 uint32_t OffsetVal = Offset->getZExtValue();
492 uint32_t WidthVal = Width->getZExtValue();
494 ReplaceNode(N, getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
495 SDLoc(N), N->getOperand(0), OffsetVal, WidthVal));
498 case AMDGPUISD::DIV_SCALE: {
502 case ISD::CopyToReg: {
503 const SITargetLowering& Lowering =
504 *static_cast<const SITargetLowering*>(getTargetLowering());
505 Lowering.legalizeTargetIndependentNode(N, *CurDAG);
511 case ISD::SIGN_EXTEND_INREG:
512 if (N->getValueType(0) != MVT::i32 ||
513 Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
522 case AMDGPUISD::ATOMIC_CMP_SWAP:
523 SelectATOMIC_CMP_SWAP(N);
530 bool AMDGPUDAGToDAGISel::isConstantLoad(const MemSDNode *N, int CbId) const {
534 return N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS;
536 return N->getAddressSpace() == AMDGPUAS::CONSTANT_BUFFER_0 + CbId;
539 bool AMDGPUDAGToDAGISel::isUniformBr(const SDNode *N) const {
540 const BasicBlock *BB = FuncInfo->MBB->getBasicBlock();
541 const Instruction *Term = BB->getTerminator();
542 return Term->getMetadata("amdgpu.uniform") ||
543 Term->getMetadata("structurizecfg.uniform");
546 StringRef AMDGPUDAGToDAGISel::getPassName() const {
547 return "AMDGPU DAG->DAG Pattern Instruction Selection";
550 //===----------------------------------------------------------------------===//
552 //===----------------------------------------------------------------------===//
554 bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
556 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
557 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, SDLoc(Addr),
564 bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
565 SDValue& BaseReg, SDValue &Offset) {
566 if (!isa<ConstantSDNode>(Addr)) {
568 Offset = CurDAG->getIntPtrConstant(0, SDLoc(Addr), true);
574 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
576 ConstantSDNode *IMMOffset;
578 if (Addr.getOpcode() == ISD::ADD
579 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
580 && isInt<16>(IMMOffset->getZExtValue())) {
582 Base = Addr.getOperand(0);
583 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
586 // If the pointer address is constant, we can move it to the offset field.
587 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
588 && isInt<16>(IMMOffset->getZExtValue())) {
589 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
590 SDLoc(CurDAG->getEntryNode()),
591 AMDGPU::ZERO, MVT::i32);
592 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
597 // Default case, no offset
599 Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
603 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
608 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
609 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
610 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
611 } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) &&
612 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) {
613 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
614 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
615 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
616 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
617 Base = Addr.getOperand(0);
618 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
621 Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
627 void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
629 SDValue LHS = N->getOperand(0);
630 SDValue RHS = N->getOperand(1);
632 unsigned Opcode = N->getOpcode();
633 bool ConsumeCarry = (Opcode == ISD::ADDE || Opcode == ISD::SUBE);
635 ConsumeCarry || Opcode == ISD::ADDC || Opcode == ISD::SUBC;
637 (Opcode == ISD::ADD || Opcode == ISD::ADDC || Opcode == ISD::ADDE);
639 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
640 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
642 SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
643 DL, MVT::i32, LHS, Sub0);
644 SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
645 DL, MVT::i32, LHS, Sub1);
647 SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
648 DL, MVT::i32, RHS, Sub0);
649 SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
650 DL, MVT::i32, RHS, Sub1);
652 SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
654 unsigned Opc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
655 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
659 SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
660 AddLo = CurDAG->getMachineNode(Opc, DL, VTList, Args);
662 SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0), N->getOperand(2) };
663 AddLo = CurDAG->getMachineNode(CarryOpc, DL, VTList, Args);
665 SDValue AddHiArgs[] = {
670 SDNode *AddHi = CurDAG->getMachineNode(CarryOpc, DL, VTList, AddHiArgs);
672 SDValue RegSequenceArgs[] = {
673 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
679 SDNode *RegSequence = CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
680 MVT::i64, RegSequenceArgs);
683 // Replace the carry-use
684 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 1), SDValue(AddHi, 1));
687 // Replace the remaining uses.
688 CurDAG->ReplaceAllUsesWith(N, RegSequence);
689 CurDAG->RemoveDeadNode(N);
692 void AMDGPUDAGToDAGISel::SelectFMA_W_CHAIN(SDNode *N) {
694 // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp, omod
697 SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[6], Ops[7]);
698 SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
699 SelectVOP3Mods(N->getOperand(3), Ops[5], Ops[4]);
700 Ops[8] = N->getOperand(0);
701 Ops[9] = N->getOperand(4);
703 CurDAG->SelectNodeTo(N, AMDGPU::V_FMA_F32, N->getVTList(), Ops);
706 void AMDGPUDAGToDAGISel::SelectFMUL_W_CHAIN(SDNode *N) {
708 // src0_modifiers, src0, src1_modifiers, src1, clamp, omod
711 SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[4], Ops[5]);
712 SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
713 Ops[6] = N->getOperand(0);
714 Ops[7] = N->getOperand(3);
716 CurDAG->SelectNodeTo(N, AMDGPU::V_MUL_F32_e64, N->getVTList(), Ops);
719 // We need to handle this here because tablegen doesn't support matching
720 // instructions with multiple outputs.
721 void AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
723 EVT VT = N->getValueType(0);
725 assert(VT == MVT::f32 || VT == MVT::f64);
728 = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32;
730 SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2) };
731 CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
734 bool AMDGPUDAGToDAGISel::isDSOffsetLegal(const SDValue &Base, unsigned Offset,
735 unsigned OffsetBits) const {
736 if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
737 (OffsetBits == 8 && !isUInt<8>(Offset)))
740 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS ||
741 Subtarget->unsafeDSOffsetFoldingEnabled())
744 // On Southern Islands instruction with a negative base value and an offset
745 // don't seem to work.
746 return CurDAG->SignBitIsZero(Base);
749 bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
750 SDValue &Offset) const {
752 if (CurDAG->isBaseWithConstantOffset(Addr)) {
753 SDValue N0 = Addr.getOperand(0);
754 SDValue N1 = Addr.getOperand(1);
755 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
756 if (isDSOffsetLegal(N0, C1->getSExtValue(), 16)) {
759 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
762 } else if (Addr.getOpcode() == ISD::SUB) {
763 // sub C, x -> add (sub 0, x), C
764 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
765 int64_t ByteOffset = C->getSExtValue();
766 if (isUInt<16>(ByteOffset)) {
767 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
769 // XXX - This is kind of hacky. Create a dummy sub node so we can check
770 // the known bits in isDSOffsetLegal. We need to emit the selected node
771 // here, so this is thrown away.
772 SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32,
773 Zero, Addr.getOperand(1));
775 if (isDSOffsetLegal(Sub, ByteOffset, 16)) {
776 MachineSDNode *MachineSub
777 = CurDAG->getMachineNode(AMDGPU::V_SUB_I32_e32, DL, MVT::i32,
778 Zero, Addr.getOperand(1));
780 Base = SDValue(MachineSub, 0);
781 Offset = CurDAG->getTargetConstant(ByteOffset, DL, MVT::i16);
786 } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
787 // If we have a constant address, prefer to put the constant into the
788 // offset. This can save moves to load the constant address since multiple
789 // operations can share the zero base address register, and enables merging
790 // into read2 / write2 instructions.
794 if (isUInt<16>(CAddr->getZExtValue())) {
795 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
796 MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
798 Base = SDValue(MovZero, 0);
799 Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
806 Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i16);
810 // TODO: If offset is too big, put low 16-bit into offset.
811 bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
813 SDValue &Offset1) const {
816 if (CurDAG->isBaseWithConstantOffset(Addr)) {
817 SDValue N0 = Addr.getOperand(0);
818 SDValue N1 = Addr.getOperand(1);
819 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
820 unsigned DWordOffset0 = C1->getZExtValue() / 4;
821 unsigned DWordOffset1 = DWordOffset0 + 1;
823 if (isDSOffsetLegal(N0, DWordOffset1, 8)) {
825 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
826 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
829 } else if (Addr.getOpcode() == ISD::SUB) {
830 // sub C, x -> add (sub 0, x), C
831 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
832 unsigned DWordOffset0 = C->getZExtValue() / 4;
833 unsigned DWordOffset1 = DWordOffset0 + 1;
835 if (isUInt<8>(DWordOffset0)) {
837 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
839 // XXX - This is kind of hacky. Create a dummy sub node so we can check
840 // the known bits in isDSOffsetLegal. We need to emit the selected node
841 // here, so this is thrown away.
842 SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32,
843 Zero, Addr.getOperand(1));
845 if (isDSOffsetLegal(Sub, DWordOffset1, 8)) {
846 MachineSDNode *MachineSub
847 = CurDAG->getMachineNode(AMDGPU::V_SUB_I32_e32, DL, MVT::i32,
848 Zero, Addr.getOperand(1));
850 Base = SDValue(MachineSub, 0);
851 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
852 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
857 } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
858 unsigned DWordOffset0 = CAddr->getZExtValue() / 4;
859 unsigned DWordOffset1 = DWordOffset0 + 1;
860 assert(4 * DWordOffset0 == CAddr->getZExtValue());
862 if (isUInt<8>(DWordOffset0) && isUInt<8>(DWordOffset1)) {
863 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
864 MachineSDNode *MovZero
865 = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
867 Base = SDValue(MovZero, 0);
868 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
869 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
876 // FIXME: This is broken on SI where we still need to check if the base
877 // pointer is positive here.
879 Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8);
880 Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8);
884 static bool isLegalMUBUFImmOffset(const ConstantSDNode *Imm) {
885 return isUInt<12>(Imm->getZExtValue());
888 bool AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
889 SDValue &VAddr, SDValue &SOffset,
890 SDValue &Offset, SDValue &Offen,
891 SDValue &Idxen, SDValue &Addr64,
892 SDValue &GLC, SDValue &SLC,
893 SDValue &TFE) const {
894 // Subtarget prefers to use flat instruction
895 if (Subtarget->useFlatForGlobal())
901 GLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
903 SLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
904 TFE = CurDAG->getTargetConstant(0, DL, MVT::i1);
906 Idxen = CurDAG->getTargetConstant(0, DL, MVT::i1);
907 Offen = CurDAG->getTargetConstant(0, DL, MVT::i1);
908 Addr64 = CurDAG->getTargetConstant(0, DL, MVT::i1);
909 SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
911 if (CurDAG->isBaseWithConstantOffset(Addr)) {
912 SDValue N0 = Addr.getOperand(0);
913 SDValue N1 = Addr.getOperand(1);
914 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
916 if (N0.getOpcode() == ISD::ADD) {
917 // (add (add N2, N3), C1) -> addr64
918 SDValue N2 = N0.getOperand(0);
919 SDValue N3 = N0.getOperand(1);
920 Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
924 // (add N0, C1) -> offset
925 VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
929 if (isLegalMUBUFImmOffset(C1)) {
930 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
934 if (isUInt<32>(C1->getZExtValue())) {
935 // Illegal offset, store it in soffset.
936 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
937 SOffset = SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
938 CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32)),
944 if (Addr.getOpcode() == ISD::ADD) {
945 // (add N0, N1) -> addr64
946 SDValue N0 = Addr.getOperand(0);
947 SDValue N1 = Addr.getOperand(1);
948 Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
951 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
955 // default case -> offset
956 VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
958 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
963 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
964 SDValue &VAddr, SDValue &SOffset,
965 SDValue &Offset, SDValue &GLC,
966 SDValue &SLC, SDValue &TFE) const {
967 SDValue Ptr, Offen, Idxen, Addr64;
969 // addr64 bit was removed for volcanic islands.
970 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
973 if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
977 ConstantSDNode *C = cast<ConstantSDNode>(Addr64);
978 if (C->getSExtValue()) {
981 const SITargetLowering& Lowering =
982 *static_cast<const SITargetLowering*>(getTargetLowering());
984 SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0);
991 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
992 SDValue &VAddr, SDValue &SOffset,
994 SDValue &SLC) const {
995 SLC = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i1);
998 return SelectMUBUFAddr64(Addr, SRsrc, VAddr, SOffset, Offset, GLC, SLC, TFE);
1001 SDValue AMDGPUDAGToDAGISel::foldFrameIndex(SDValue N) const {
1002 if (auto FI = dyn_cast<FrameIndexSDNode>(N))
1003 return CurDAG->getTargetFrameIndex(FI->getIndex(), FI->getValueType(0));
1007 bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
1008 SDValue &VAddr, SDValue &SOffset,
1009 SDValue &ImmOffset) const {
1012 MachineFunction &MF = CurDAG->getMachineFunction();
1013 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1015 Rsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1016 SOffset = CurDAG->getRegister(Info->getScratchWaveOffsetReg(), MVT::i32);
1019 if (CurDAG->isBaseWithConstantOffset(Addr)) {
1020 SDValue N0 = Addr.getOperand(0);
1021 SDValue N1 = Addr.getOperand(1);
1023 // Offsets in vaddr must be positive.
1024 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1025 if (isLegalMUBUFImmOffset(C1)) {
1026 VAddr = foldFrameIndex(N0);
1027 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1033 VAddr = foldFrameIndex(Addr);
1034 ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1038 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1039 SDValue &SOffset, SDValue &Offset,
1040 SDValue &GLC, SDValue &SLC,
1041 SDValue &TFE) const {
1042 SDValue Ptr, VAddr, Offen, Idxen, Addr64;
1043 const SIInstrInfo *TII =
1044 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
1046 if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
1050 if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
1051 !cast<ConstantSDNode>(Idxen)->getSExtValue() &&
1052 !cast<ConstantSDNode>(Addr64)->getSExtValue()) {
1053 uint64_t Rsrc = TII->getDefaultRsrcDataFormat() |
1054 APInt::getAllOnesValue(32).getZExtValue(); // Size
1057 const SITargetLowering& Lowering =
1058 *static_cast<const SITargetLowering*>(getTargetLowering());
1060 SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0);
1066 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1067 SDValue &Soffset, SDValue &Offset
1069 SDValue GLC, SLC, TFE;
1071 return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE);
1073 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1074 SDValue &Soffset, SDValue &Offset,
1075 SDValue &SLC) const {
1078 return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE);
1081 bool AMDGPUDAGToDAGISel::SelectMUBUFConstant(SDValue Constant,
1083 SDValue &ImmOffset) const {
1085 uint32_t Imm = cast<ConstantSDNode>(Constant)->getZExtValue();
1086 uint32_t Overflow = 0;
1089 if (Imm <= 4095 + 64) {
1090 // Use an SOffset inline constant for 1..64
1091 Overflow = Imm - 4095;
1094 // Try to keep the same value in SOffset for adjacent loads, so that
1095 // the corresponding register contents can be re-used.
1097 // Load values with all low-bits set into SOffset, so that a larger
1098 // range of values can be covered using s_movk_i32
1099 uint32_t High = (Imm + 1) & ~4095;
1100 uint32_t Low = (Imm + 1) & 4095;
1102 Overflow = High - 1;
1106 // There is a hardware bug in SI and CI which prevents address clamping in
1107 // MUBUF instructions from working correctly with SOffsets. The immediate
1108 // offset is unaffected.
1110 Subtarget->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS)
1113 ImmOffset = CurDAG->getTargetConstant(Imm, DL, MVT::i16);
1116 SOffset = CurDAG->getTargetConstant(Overflow, DL, MVT::i32);
1118 SOffset = SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
1119 CurDAG->getTargetConstant(Overflow, DL, MVT::i32)),
1125 bool AMDGPUDAGToDAGISel::SelectMUBUFIntrinsicOffset(SDValue Offset,
1127 SDValue &ImmOffset) const {
1130 if (!isa<ConstantSDNode>(Offset))
1133 return SelectMUBUFConstant(Offset, SOffset, ImmOffset);
1136 bool AMDGPUDAGToDAGISel::SelectMUBUFIntrinsicVOffset(SDValue Offset,
1139 SDValue &VOffset) const {
1142 // Don't generate an unnecessary voffset for constant offsets.
1143 if (isa<ConstantSDNode>(Offset)) {
1146 // When necessary, use a voffset in <= CI anyway to work around a hardware
1148 if (Subtarget->getGeneration() > AMDGPUSubtarget::SEA_ISLANDS ||
1149 SelectMUBUFConstant(Offset, Tmp1, Tmp2))
1153 if (CurDAG->isBaseWithConstantOffset(Offset)) {
1154 SDValue N0 = Offset.getOperand(0);
1155 SDValue N1 = Offset.getOperand(1);
1156 if (cast<ConstantSDNode>(N1)->getSExtValue() >= 0 &&
1157 SelectMUBUFConstant(N1, SOffset, ImmOffset)) {
1163 SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1164 ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1170 bool AMDGPUDAGToDAGISel::SelectFlat(SDValue Addr,
1173 SDValue &TFE) const {
1175 TFE = SLC = CurDAG->getTargetConstant(0, SDLoc(), MVT::i1);
1180 /// \param EncodedOffset This is the immediate value that will be encoded
1181 /// directly into the instruction. On SI/CI the \p EncodedOffset
1182 /// will be in units of dwords and on VI+ it will be units of bytes.
1183 static bool isLegalSMRDImmOffset(const AMDGPUSubtarget *ST,
1184 int64_t EncodedOffset) {
1185 return ST->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS ?
1186 isUInt<8>(EncodedOffset) : isUInt<20>(EncodedOffset);
1189 bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDValue ByteOffsetNode,
1190 SDValue &Offset, bool &Imm) const {
1192 // FIXME: Handle non-constant offsets.
1193 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ByteOffsetNode);
1197 SDLoc SL(ByteOffsetNode);
1198 AMDGPUSubtarget::Generation Gen = Subtarget->getGeneration();
1199 int64_t ByteOffset = C->getSExtValue();
1200 int64_t EncodedOffset = Gen < AMDGPUSubtarget::VOLCANIC_ISLANDS ?
1201 ByteOffset >> 2 : ByteOffset;
1203 if (isLegalSMRDImmOffset(Subtarget, EncodedOffset)) {
1204 Offset = CurDAG->getTargetConstant(EncodedOffset, SL, MVT::i32);
1209 if (!isUInt<32>(EncodedOffset) || !isUInt<32>(ByteOffset))
1212 if (Gen == AMDGPUSubtarget::SEA_ISLANDS && isUInt<32>(EncodedOffset)) {
1213 // 32-bit Immediates are supported on Sea Islands.
1214 Offset = CurDAG->getTargetConstant(EncodedOffset, SL, MVT::i32);
1216 SDValue C32Bit = CurDAG->getTargetConstant(ByteOffset, SL, MVT::i32);
1217 Offset = SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32,
1224 bool AMDGPUDAGToDAGISel::SelectSMRD(SDValue Addr, SDValue &SBase,
1225 SDValue &Offset, bool &Imm) const {
1227 if (CurDAG->isBaseWithConstantOffset(Addr)) {
1228 SDValue N0 = Addr.getOperand(0);
1229 SDValue N1 = Addr.getOperand(1);
1231 if (SelectSMRDOffset(N1, Offset, Imm)) {
1237 Offset = CurDAG->getTargetConstant(0, SL, MVT::i32);
1242 bool AMDGPUDAGToDAGISel::SelectSMRDImm(SDValue Addr, SDValue &SBase,
1243 SDValue &Offset) const {
1245 return SelectSMRD(Addr, SBase, Offset, Imm) && Imm;
1248 bool AMDGPUDAGToDAGISel::SelectSMRDImm32(SDValue Addr, SDValue &SBase,
1249 SDValue &Offset) const {
1251 if (Subtarget->getGeneration() != AMDGPUSubtarget::SEA_ISLANDS)
1255 if (!SelectSMRD(Addr, SBase, Offset, Imm))
1258 return !Imm && isa<ConstantSDNode>(Offset);
1261 bool AMDGPUDAGToDAGISel::SelectSMRDSgpr(SDValue Addr, SDValue &SBase,
1262 SDValue &Offset) const {
1264 return SelectSMRD(Addr, SBase, Offset, Imm) && !Imm &&
1265 !isa<ConstantSDNode>(Offset);
1268 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm(SDValue Addr,
1269 SDValue &Offset) const {
1271 return SelectSMRDOffset(Addr, Offset, Imm) && Imm;
1274 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm32(SDValue Addr,
1275 SDValue &Offset) const {
1276 if (Subtarget->getGeneration() != AMDGPUSubtarget::SEA_ISLANDS)
1280 if (!SelectSMRDOffset(Addr, Offset, Imm))
1283 return !Imm && isa<ConstantSDNode>(Offset);
1286 bool AMDGPUDAGToDAGISel::SelectSMRDBufferSgpr(SDValue Addr,
1287 SDValue &Offset) const {
1289 return SelectSMRDOffset(Addr, Offset, Imm) && !Imm &&
1290 !isa<ConstantSDNode>(Offset);
1293 bool AMDGPUDAGToDAGISel::SelectMOVRELOffset(SDValue Index,
1295 SDValue &Offset) const {
1298 if (CurDAG->isBaseWithConstantOffset(Index)) {
1299 SDValue N0 = Index.getOperand(0);
1300 SDValue N1 = Index.getOperand(1);
1301 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1305 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32);
1309 if (isa<ConstantSDNode>(Index))
1313 Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1317 SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, const SDLoc &DL,
1318 SDValue Val, uint32_t Offset,
1320 // Transformation function, pack the offset and width of a BFE into
1321 // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
1322 // source, bits [5:0] contain the offset and bits [22:16] the width.
1323 uint32_t PackedVal = Offset | (Width << 16);
1324 SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, DL, MVT::i32);
1326 return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst);
1329 void AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
1330 // "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c)
1331 // "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c)
1332 // Predicate: 0 < b <= c < 32
1334 const SDValue &Shl = N->getOperand(0);
1335 ConstantSDNode *B = dyn_cast<ConstantSDNode>(Shl->getOperand(1));
1336 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
1339 uint32_t BVal = B->getZExtValue();
1340 uint32_t CVal = C->getZExtValue();
1342 if (0 < BVal && BVal <= CVal && CVal < 32) {
1343 bool Signed = N->getOpcode() == ISD::SRA;
1344 unsigned Opcode = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
1346 ReplaceNode(N, getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0), CVal - BVal,
1354 void AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
1355 switch (N->getOpcode()) {
1357 if (N->getOperand(0).getOpcode() == ISD::SRL) {
1358 // "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)"
1359 // Predicate: isMask(mask)
1360 const SDValue &Srl = N->getOperand(0);
1361 ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(Srl.getOperand(1));
1362 ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1));
1364 if (Shift && Mask) {
1365 uint32_t ShiftVal = Shift->getZExtValue();
1366 uint32_t MaskVal = Mask->getZExtValue();
1368 if (isMask_32(MaskVal)) {
1369 uint32_t WidthVal = countPopulation(MaskVal);
1371 ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
1372 Srl.getOperand(0), ShiftVal, WidthVal));
1379 if (N->getOperand(0).getOpcode() == ISD::AND) {
1380 // "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)"
1381 // Predicate: isMask(mask >> b)
1382 const SDValue &And = N->getOperand(0);
1383 ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N->getOperand(1));
1384 ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(And->getOperand(1));
1386 if (Shift && Mask) {
1387 uint32_t ShiftVal = Shift->getZExtValue();
1388 uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal;
1390 if (isMask_32(MaskVal)) {
1391 uint32_t WidthVal = countPopulation(MaskVal);
1393 ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
1394 And.getOperand(0), ShiftVal, WidthVal));
1398 } else if (N->getOperand(0).getOpcode() == ISD::SHL) {
1399 SelectS_BFEFromShifts(N);
1404 if (N->getOperand(0).getOpcode() == ISD::SHL) {
1405 SelectS_BFEFromShifts(N);
1410 case ISD::SIGN_EXTEND_INREG: {
1411 // sext_inreg (srl x, 16), i8 -> bfe_i32 x, 16, 8
1412 SDValue Src = N->getOperand(0);
1413 if (Src.getOpcode() != ISD::SRL)
1416 const ConstantSDNode *Amt = dyn_cast<ConstantSDNode>(Src.getOperand(1));
1420 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
1421 ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_I32, SDLoc(N), Src.getOperand(0),
1422 Amt->getZExtValue(), Width));
1430 bool AMDGPUDAGToDAGISel::isCBranchSCC(const SDNode *N) const {
1431 assert(N->getOpcode() == ISD::BRCOND);
1432 if (!N->hasOneUse())
1435 SDValue Cond = N->getOperand(1);
1436 if (Cond.getOpcode() == ISD::CopyToReg)
1437 Cond = Cond.getOperand(2);
1439 if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse())
1442 MVT VT = Cond.getOperand(0).getSimpleValueType();
1446 if (VT == MVT::i64) {
1447 auto ST = static_cast<const SISubtarget *>(Subtarget);
1449 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
1450 return (CC == ISD::SETEQ || CC == ISD::SETNE) && ST->hasScalarCompareEq64();
1456 void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
1457 SDValue Cond = N->getOperand(1);
1459 if (Cond.isUndef()) {
1460 CurDAG->SelectNodeTo(N, AMDGPU::SI_BR_UNDEF, MVT::Other,
1461 N->getOperand(2), N->getOperand(0));
1465 if (isCBranchSCC(N)) {
1466 // This brcond will use S_CBRANCH_SCC*, so let tablegen handle it.
1473 SDValue VCC = CurDAG->getCopyToReg(N->getOperand(0), SL, AMDGPU::VCC, Cond);
1474 CurDAG->SelectNodeTo(N, AMDGPU::S_CBRANCH_VCCNZ, MVT::Other,
1475 N->getOperand(2), // Basic Block
1479 // This is here because there isn't a way to use the generated sub0_sub1 as the
1480 // subreg index to EXTRACT_SUBREG in tablegen.
1481 void AMDGPUDAGToDAGISel::SelectATOMIC_CMP_SWAP(SDNode *N) {
1482 MemSDNode *Mem = cast<MemSDNode>(N);
1483 unsigned AS = Mem->getAddressSpace();
1484 if (AS == AMDGPUAS::FLAT_ADDRESS) {
1489 MVT VT = N->getSimpleValueType(0);
1490 bool Is32 = (VT == MVT::i32);
1493 MachineSDNode *CmpSwap = nullptr;
1494 if (Subtarget->hasAddr64()) {
1495 SDValue SRsrc, VAddr, SOffset, Offset, GLC, SLC;
1497 if (SelectMUBUFAddr64(Mem->getBasePtr(), SRsrc, VAddr, SOffset, Offset, SLC)) {
1498 unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_RTN_ADDR64 :
1499 AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_RTN_ADDR64;
1500 SDValue CmpVal = Mem->getOperand(2);
1502 // XXX - Do we care about glue operands?
1505 CmpVal, VAddr, SRsrc, SOffset, Offset, SLC, Mem->getChain()
1508 CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
1513 SDValue SRsrc, SOffset, Offset, SLC;
1514 if (SelectMUBUFOffset(Mem->getBasePtr(), SRsrc, SOffset, Offset, SLC)) {
1515 unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_RTN_OFFSET :
1516 AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_RTN_OFFSET;
1518 SDValue CmpVal = Mem->getOperand(2);
1520 CmpVal, SRsrc, SOffset, Offset, SLC, Mem->getChain()
1523 CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
1532 MachineSDNode::mmo_iterator MMOs = MF->allocateMemRefsArray(1);
1533 *MMOs = Mem->getMemOperand();
1534 CmpSwap->setMemRefs(MMOs, MMOs + 1);
1536 unsigned SubReg = Is32 ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
1538 = CurDAG->getTargetExtractSubreg(SubReg, SL, VT, SDValue(CmpSwap, 0));
1540 ReplaceUses(SDValue(N, 0), Extract);
1541 ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 1));
1542 CurDAG->RemoveDeadNode(N);
1545 bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
1546 SDValue &SrcMods) const {
1551 if (Src.getOpcode() == ISD::FNEG) {
1552 Mods |= SISrcMods::NEG;
1553 Src = Src.getOperand(0);
1556 if (Src.getOpcode() == ISD::FABS) {
1557 Mods |= SISrcMods::ABS;
1558 Src = Src.getOperand(0);
1561 SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
1566 bool AMDGPUDAGToDAGISel::SelectVOP3NoMods(SDValue In, SDValue &Src,
1567 SDValue &SrcMods) const {
1568 bool Res = SelectVOP3Mods(In, Src, SrcMods);
1569 return Res && cast<ConstantSDNode>(SrcMods)->isNullValue();
1572 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
1573 SDValue &SrcMods, SDValue &Clamp,
1574 SDValue &Omod) const {
1576 // FIXME: Handle Clamp and Omod
1577 Clamp = CurDAG->getTargetConstant(0, DL, MVT::i32);
1578 Omod = CurDAG->getTargetConstant(0, DL, MVT::i32);
1580 return SelectVOP3Mods(In, Src, SrcMods);
1583 bool AMDGPUDAGToDAGISel::SelectVOP3NoMods0(SDValue In, SDValue &Src,
1584 SDValue &SrcMods, SDValue &Clamp,
1585 SDValue &Omod) const {
1586 bool Res = SelectVOP3Mods0(In, Src, SrcMods, Clamp, Omod);
1588 return Res && cast<ConstantSDNode>(SrcMods)->isNullValue() &&
1589 cast<ConstantSDNode>(Clamp)->isNullValue() &&
1590 cast<ConstantSDNode>(Omod)->isNullValue();
1593 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp(SDValue In, SDValue &Src,
1595 SDValue &Omod) const {
1596 // FIXME: Handle Omod
1597 Omod = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
1599 return SelectVOP3Mods(In, Src, SrcMods);
1602 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src,
1605 SDValue &Omod) const {
1606 Clamp = Omod = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
1607 return SelectVOP3Mods(In, Src, SrcMods);
1610 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
1611 const AMDGPUTargetLowering& Lowering =
1612 *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
1613 bool IsModified = false;
1616 // Go over all selected nodes and try to fold them a bit more
1617 for (SDNode &Node : CurDAG->allnodes()) {
1618 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(&Node);
1622 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
1623 if (ResNode != &Node) {
1624 ReplaceUses(&Node, ResNode);
1628 CurDAG->RemoveDeadNodes();
1629 } while (IsModified);