1 //===-- AMDGPUISelDAGToDAG.cpp - A dag to dag inst selector for AMDGPU ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //==-----------------------------------------------------------------------===//
11 /// \brief Defines an instruction selector for the AMDGPU target.
13 //===----------------------------------------------------------------------===//
15 #include "AMDGPUInstrInfo.h"
16 #include "AMDGPUIntrinsicInfo.h"
17 #include "AMDGPUISelLowering.h" // For AMDGPUISD
18 #include "AMDGPUSubtarget.h"
19 #include "SIISelLowering.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/FunctionLoweringInfo.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/PseudoSourceValue.h"
25 #include "llvm/CodeGen/SelectionDAG.h"
26 #include "llvm/CodeGen/SelectionDAGISel.h"
27 #include "llvm/IR/DiagnosticInfo.h"
35 //===----------------------------------------------------------------------===//
36 // Instruction Selector Implementation
37 //===----------------------------------------------------------------------===//
41 static bool isCBranchSCC(const SDNode *N) {
42 assert(N->getOpcode() == ISD::BRCOND);
46 SDValue Cond = N->getOperand(1);
47 if (Cond.getOpcode() == ISD::CopyToReg)
48 Cond = Cond.getOperand(2);
49 return Cond.getOpcode() == ISD::SETCC &&
50 Cond.getOperand(0).getValueType() == MVT::i32 && Cond.hasOneUse();
53 /// AMDGPU specific code to select AMDGPU machine instructions for
54 /// SelectionDAG operations.
55 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
56 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
57 // make the right decision when generating code for different targets.
58 const AMDGPUSubtarget *Subtarget;
61 AMDGPUDAGToDAGISel(TargetMachine &TM);
62 virtual ~AMDGPUDAGToDAGISel();
63 bool runOnMachineFunction(MachineFunction &MF) override;
64 void Select(SDNode *N) override;
65 const char *getPassName() const override;
66 void PreprocessISelDAG() override;
67 void PostprocessISelDAG() override;
70 bool isInlineImmediate(SDNode *N) const;
71 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
72 const R600InstrInfo *TII);
73 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
74 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
76 bool isConstantLoad(const MemSDNode *N, int cbID) const;
77 bool isUniformBr(const SDNode *N) const;
79 SDNode *glueCopyToM0(SDNode *N) const;
81 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
82 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
83 bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
85 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
86 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
87 bool isDSOffsetLegal(const SDValue &Base, unsigned Offset,
88 unsigned OffsetBits) const;
89 bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const;
90 bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0,
91 SDValue &Offset1) const;
92 bool SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
93 SDValue &SOffset, SDValue &Offset, SDValue &Offen,
94 SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC,
96 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
97 SDValue &SOffset, SDValue &Offset, SDValue &GLC,
98 SDValue &SLC, SDValue &TFE) const;
99 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
100 SDValue &VAddr, SDValue &SOffset, SDValue &Offset,
102 bool SelectMUBUFScratch(SDValue Addr, SDValue &RSrc, SDValue &VAddr,
103 SDValue &SOffset, SDValue &ImmOffset) const;
104 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset,
105 SDValue &Offset, SDValue &GLC, SDValue &SLC,
107 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
108 SDValue &Offset, SDValue &SLC) const;
109 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
110 SDValue &Offset) const;
111 bool SelectMUBUFConstant(SDValue Constant,
113 SDValue &ImmOffset) const;
114 bool SelectMUBUFIntrinsicOffset(SDValue Offset, SDValue &SOffset,
115 SDValue &ImmOffset) const;
116 bool SelectMUBUFIntrinsicVOffset(SDValue Offset, SDValue &SOffset,
117 SDValue &ImmOffset, SDValue &VOffset) const;
119 bool SelectFlat(SDValue Addr, SDValue &VAddr,
120 SDValue &SLC, SDValue &TFE) const;
122 bool SelectSMRDOffset(SDValue ByteOffsetNode, SDValue &Offset,
124 bool SelectSMRD(SDValue Addr, SDValue &SBase, SDValue &Offset,
126 bool SelectSMRDImm(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
127 bool SelectSMRDImm32(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
128 bool SelectSMRDSgpr(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
129 bool SelectSMRDBufferImm(SDValue Addr, SDValue &Offset) const;
130 bool SelectSMRDBufferImm32(SDValue Addr, SDValue &Offset) const;
131 bool SelectSMRDBufferSgpr(SDValue Addr, SDValue &Offset) const;
132 bool SelectMOVRELOffset(SDValue Index, SDValue &Base, SDValue &Offset) const;
133 bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
134 bool SelectVOP3NoMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
135 bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
136 SDValue &Clamp, SDValue &Omod) const;
137 bool SelectVOP3NoMods0(SDValue In, SDValue &Src, SDValue &SrcMods,
138 SDValue &Clamp, SDValue &Omod) const;
140 bool SelectVOP3Mods0Clamp(SDValue In, SDValue &Src, SDValue &SrcMods,
141 SDValue &Omod) const;
142 bool SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src, SDValue &SrcMods,
144 SDValue &Omod) const;
146 void SelectADD_SUB_I64(SDNode *N);
147 void SelectDIV_SCALE(SDNode *N);
149 SDNode *getS_BFE(unsigned Opcode, const SDLoc &DL, SDValue Val,
150 uint32_t Offset, uint32_t Width);
151 void SelectS_BFEFromShifts(SDNode *N);
152 void SelectS_BFE(SDNode *N);
153 void SelectBRCOND(SDNode *N);
154 void SelectATOMIC_CMP_SWAP(SDNode *N);
156 // Include the pieces autogenerated from the target description.
157 #include "AMDGPUGenDAGISel.inc"
159 } // end anonymous namespace
161 /// \brief This pass converts a legalized DAG into a AMDGPU-specific
162 // DAG, ready for instruction scheduling.
163 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM) {
164 return new AMDGPUDAGToDAGISel(TM);
167 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
168 : SelectionDAGISel(TM) {}
170 bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
171 Subtarget = &MF.getSubtarget<AMDGPUSubtarget>();
172 return SelectionDAGISel::runOnMachineFunction(MF);
175 AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
178 bool AMDGPUDAGToDAGISel::isInlineImmediate(SDNode *N) const {
179 const SITargetLowering *TL
180 = static_cast<const SITargetLowering *>(getTargetLowering());
181 return TL->analyzeImmediate(N) == 0;
184 /// \brief Determine the register class for \p OpNo
185 /// \returns The register class of the virtual register that will be used for
186 /// the given operand number \OpNo or NULL if the register class cannot be
188 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
189 unsigned OpNo) const {
190 if (!N->isMachineOpcode())
193 switch (N->getMachineOpcode()) {
195 const MCInstrDesc &Desc =
196 Subtarget->getInstrInfo()->get(N->getMachineOpcode());
197 unsigned OpIdx = Desc.getNumDefs() + OpNo;
198 if (OpIdx >= Desc.getNumOperands())
200 int RegClass = Desc.OpInfo[OpIdx].RegClass;
204 return Subtarget->getRegisterInfo()->getRegClass(RegClass);
206 case AMDGPU::REG_SEQUENCE: {
207 unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
208 const TargetRegisterClass *SuperRC =
209 Subtarget->getRegisterInfo()->getRegClass(RCID);
211 SDValue SubRegOp = N->getOperand(OpNo + 1);
212 unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
213 return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
219 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0(SDNode *N) const {
220 if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
221 cast<MemSDNode>(N)->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
224 const SITargetLowering& Lowering =
225 *static_cast<const SITargetLowering*>(getTargetLowering());
227 // Write max value to m0 before each load operation
229 SDValue M0 = Lowering.copyToM0(*CurDAG, CurDAG->getEntryNode(), SDLoc(N),
230 CurDAG->getTargetConstant(-1, SDLoc(N), MVT::i32));
232 SDValue Glue = M0.getValue(1);
234 SmallVector <SDValue, 8> Ops;
235 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
236 Ops.push_back(N->getOperand(i));
239 CurDAG->MorphNodeTo(N, N->getOpcode(), N->getVTList(), Ops);
244 static unsigned selectSGPRVectorRegClassID(unsigned NumVectorElts) {
245 switch (NumVectorElts) {
247 return AMDGPU::SReg_32RegClassID;
249 return AMDGPU::SReg_64RegClassID;
251 return AMDGPU::SReg_128RegClassID;
253 return AMDGPU::SReg_256RegClassID;
255 return AMDGPU::SReg_512RegClassID;
258 llvm_unreachable("invalid vector size");
261 void AMDGPUDAGToDAGISel::Select(SDNode *N) {
262 unsigned int Opc = N->getOpcode();
263 if (N->isMachineOpcode()) {
265 return; // Already selected.
268 if (isa<AtomicSDNode>(N) ||
269 (Opc == AMDGPUISD::ATOMIC_INC || Opc == AMDGPUISD::ATOMIC_DEC))
274 // We are selecting i64 ADD here instead of custom lower it during
275 // DAG legalization, so we can fold some i64 ADDs used for address
276 // calculation into the LOAD and STORE instructions.
279 if (N->getValueType(0) != MVT::i64 ||
280 Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
283 SelectADD_SUB_I64(N);
286 case ISD::SCALAR_TO_VECTOR:
287 case AMDGPUISD::BUILD_VERTICAL_VECTOR:
288 case ISD::BUILD_VECTOR: {
290 const AMDGPURegisterInfo *TRI = Subtarget->getRegisterInfo();
291 EVT VT = N->getValueType(0);
292 unsigned NumVectorElts = VT.getVectorNumElements();
293 EVT EltVT = VT.getVectorElementType();
294 assert(EltVT.bitsEq(MVT::i32));
295 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
296 RegClassID = selectSGPRVectorRegClassID(NumVectorElts);
298 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
299 // that adds a 128 bits reg copy when going through TwoAddressInstructions
300 // pass. We want to avoid 128 bits copies as much as possible because they
301 // can't be bundled by our scheduler.
302 switch(NumVectorElts) {
303 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
305 if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
306 RegClassID = AMDGPU::R600_Reg128VerticalRegClassID;
308 RegClassID = AMDGPU::R600_Reg128RegClassID;
310 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
315 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
317 if (NumVectorElts == 1) {
318 CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, N->getOperand(0),
323 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
325 // 16 = Max Num Vector Elements
326 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
327 // 1 = Vector Register Class
328 SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
330 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
331 bool IsRegSeq = true;
332 unsigned NOps = N->getNumOperands();
333 for (unsigned i = 0; i < NOps; i++) {
334 // XXX: Why is this here?
335 if (isa<RegisterSDNode>(N->getOperand(i))) {
339 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
340 RegSeqArgs[1 + (2 * i) + 1] =
341 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), DL,
345 if (NOps != NumVectorElts) {
346 // Fill in the missing undef elements if this was a scalar_to_vector.
347 assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
349 MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
351 for (unsigned i = NOps; i < NumVectorElts; ++i) {
352 RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
353 RegSeqArgs[1 + (2 * i) + 1] =
354 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), DL, MVT::i32);
360 CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), RegSeqArgs);
363 case ISD::BUILD_PAIR: {
364 SDValue RC, SubReg0, SubReg1;
365 if (Subtarget->getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
369 if (N->getValueType(0) == MVT::i128) {
370 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32);
371 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32);
372 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32);
373 } else if (N->getValueType(0) == MVT::i64) {
374 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32);
375 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
376 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
378 llvm_unreachable("Unhandled value type for BUILD_PAIR");
380 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
381 N->getOperand(1), SubReg1 };
382 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
383 N->getValueType(0), Ops));
388 case ISD::ConstantFP: {
389 if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
390 N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
394 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
395 Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
397 ConstantSDNode *C = cast<ConstantSDNode>(N);
398 Imm = C->getZExtValue();
402 SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
403 CurDAG->getConstant(Imm & 0xFFFFFFFF, DL,
405 SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
406 CurDAG->getConstant(Imm >> 32, DL, MVT::i32));
407 const SDValue Ops[] = {
408 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
409 SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
410 SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
413 ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
414 N->getValueType(0), Ops));
423 case AMDGPUISD::BFE_I32:
424 case AMDGPUISD::BFE_U32: {
425 if (Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
428 // There is a scalar version available, but unlike the vector version which
429 // has a separate operand for the offset and width, the scalar version packs
430 // the width and offset into a single operand. Try to move to the scalar
431 // version if the offsets are constant, so that we can try to keep extended
432 // loads of kernel arguments in SGPRs.
434 // TODO: Technically we could try to pattern match scalar bitshifts of
435 // dynamic values, but it's probably not useful.
436 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
440 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
444 bool Signed = Opc == AMDGPUISD::BFE_I32;
446 uint32_t OffsetVal = Offset->getZExtValue();
447 uint32_t WidthVal = Width->getZExtValue();
449 ReplaceNode(N, getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
450 SDLoc(N), N->getOperand(0), OffsetVal, WidthVal));
453 case AMDGPUISD::DIV_SCALE: {
457 case ISD::CopyToReg: {
458 const SITargetLowering& Lowering =
459 *static_cast<const SITargetLowering*>(getTargetLowering());
460 Lowering.legalizeTargetIndependentNode(N, *CurDAG);
466 case ISD::SIGN_EXTEND_INREG:
467 if (N->getValueType(0) != MVT::i32 ||
468 Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
477 case AMDGPUISD::ATOMIC_CMP_SWAP:
478 SelectATOMIC_CMP_SWAP(N);
485 bool AMDGPUDAGToDAGISel::isConstantLoad(const MemSDNode *N, int CbId) const {
489 return N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS;
491 return N->getAddressSpace() == AMDGPUAS::CONSTANT_BUFFER_0 + CbId;
494 bool AMDGPUDAGToDAGISel::isUniformBr(const SDNode *N) const {
495 const BasicBlock *BB = FuncInfo->MBB->getBasicBlock();
496 const Instruction *Term = BB->getTerminator();
497 return Term->getMetadata("amdgpu.uniform") ||
498 Term->getMetadata("structurizecfg.uniform");
501 const char *AMDGPUDAGToDAGISel::getPassName() const {
502 return "AMDGPU DAG->DAG Pattern Instruction Selection";
505 //===----------------------------------------------------------------------===//
507 //===----------------------------------------------------------------------===//
509 bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
511 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
512 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, SDLoc(Addr),
519 bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
520 SDValue& BaseReg, SDValue &Offset) {
521 if (!isa<ConstantSDNode>(Addr)) {
523 Offset = CurDAG->getIntPtrConstant(0, SDLoc(Addr), true);
529 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
531 ConstantSDNode *IMMOffset;
533 if (Addr.getOpcode() == ISD::ADD
534 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
535 && isInt<16>(IMMOffset->getZExtValue())) {
537 Base = Addr.getOperand(0);
538 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
541 // If the pointer address is constant, we can move it to the offset field.
542 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
543 && isInt<16>(IMMOffset->getZExtValue())) {
544 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
545 SDLoc(CurDAG->getEntryNode()),
546 AMDGPU::ZERO, MVT::i32);
547 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
552 // Default case, no offset
554 Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
558 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
563 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
564 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
565 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
566 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
567 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
568 Base = Addr.getOperand(0);
569 Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
572 Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
578 void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
580 SDValue LHS = N->getOperand(0);
581 SDValue RHS = N->getOperand(1);
583 bool IsAdd = (N->getOpcode() == ISD::ADD);
585 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
586 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
588 SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
589 DL, MVT::i32, LHS, Sub0);
590 SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
591 DL, MVT::i32, LHS, Sub1);
593 SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
594 DL, MVT::i32, RHS, Sub0);
595 SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
596 DL, MVT::i32, RHS, Sub1);
598 SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
599 SDValue AddLoArgs[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
601 unsigned Opc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
602 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
604 SDNode *AddLo = CurDAG->getMachineNode( Opc, DL, VTList, AddLoArgs);
605 SDValue Carry(AddLo, 1);
607 = CurDAG->getMachineNode(CarryOpc, DL, MVT::i32,
608 SDValue(Hi0, 0), SDValue(Hi1, 0), Carry);
611 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
617 CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
620 // We need to handle this here because tablegen doesn't support matching
621 // instructions with multiple outputs.
622 void AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
624 EVT VT = N->getValueType(0);
626 assert(VT == MVT::f32 || VT == MVT::f64);
629 = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32;
631 // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp,
635 SelectVOP3Mods0(N->getOperand(0), Ops[1], Ops[0], Ops[6], Ops[7]);
636 SelectVOP3Mods(N->getOperand(1), Ops[3], Ops[2]);
637 SelectVOP3Mods(N->getOperand(2), Ops[5], Ops[4]);
638 CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops);
641 bool AMDGPUDAGToDAGISel::isDSOffsetLegal(const SDValue &Base, unsigned Offset,
642 unsigned OffsetBits) const {
643 if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
644 (OffsetBits == 8 && !isUInt<8>(Offset)))
647 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS ||
648 Subtarget->unsafeDSOffsetFoldingEnabled())
651 // On Southern Islands instruction with a negative base value and an offset
652 // don't seem to work.
653 return CurDAG->SignBitIsZero(Base);
656 bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
657 SDValue &Offset) const {
659 if (CurDAG->isBaseWithConstantOffset(Addr)) {
660 SDValue N0 = Addr.getOperand(0);
661 SDValue N1 = Addr.getOperand(1);
662 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
663 if (isDSOffsetLegal(N0, C1->getSExtValue(), 16)) {
666 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
669 } else if (Addr.getOpcode() == ISD::SUB) {
670 // sub C, x -> add (sub 0, x), C
671 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
672 int64_t ByteOffset = C->getSExtValue();
673 if (isUInt<16>(ByteOffset)) {
674 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
676 // XXX - This is kind of hacky. Create a dummy sub node so we can check
677 // the known bits in isDSOffsetLegal. We need to emit the selected node
678 // here, so this is thrown away.
679 SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32,
680 Zero, Addr.getOperand(1));
682 if (isDSOffsetLegal(Sub, ByteOffset, 16)) {
683 MachineSDNode *MachineSub
684 = CurDAG->getMachineNode(AMDGPU::V_SUB_I32_e32, DL, MVT::i32,
685 Zero, Addr.getOperand(1));
687 Base = SDValue(MachineSub, 0);
688 Offset = CurDAG->getTargetConstant(ByteOffset, DL, MVT::i16);
693 } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
694 // If we have a constant address, prefer to put the constant into the
695 // offset. This can save moves to load the constant address since multiple
696 // operations can share the zero base address register, and enables merging
697 // into read2 / write2 instructions.
701 if (isUInt<16>(CAddr->getZExtValue())) {
702 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
703 MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
705 Base = SDValue(MovZero, 0);
706 Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
713 Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i16);
717 // TODO: If offset is too big, put low 16-bit into offset.
718 bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
720 SDValue &Offset1) const {
723 if (CurDAG->isBaseWithConstantOffset(Addr)) {
724 SDValue N0 = Addr.getOperand(0);
725 SDValue N1 = Addr.getOperand(1);
726 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
727 unsigned DWordOffset0 = C1->getZExtValue() / 4;
728 unsigned DWordOffset1 = DWordOffset0 + 1;
730 if (isDSOffsetLegal(N0, DWordOffset1, 8)) {
732 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
733 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
736 } else if (Addr.getOpcode() == ISD::SUB) {
737 // sub C, x -> add (sub 0, x), C
738 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
739 unsigned DWordOffset0 = C->getZExtValue() / 4;
740 unsigned DWordOffset1 = DWordOffset0 + 1;
742 if (isUInt<8>(DWordOffset0)) {
744 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
746 // XXX - This is kind of hacky. Create a dummy sub node so we can check
747 // the known bits in isDSOffsetLegal. We need to emit the selected node
748 // here, so this is thrown away.
749 SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32,
750 Zero, Addr.getOperand(1));
752 if (isDSOffsetLegal(Sub, DWordOffset1, 8)) {
753 MachineSDNode *MachineSub
754 = CurDAG->getMachineNode(AMDGPU::V_SUB_I32_e32, DL, MVT::i32,
755 Zero, Addr.getOperand(1));
757 Base = SDValue(MachineSub, 0);
758 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
759 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
764 } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
765 unsigned DWordOffset0 = CAddr->getZExtValue() / 4;
766 unsigned DWordOffset1 = DWordOffset0 + 1;
767 assert(4 * DWordOffset0 == CAddr->getZExtValue());
769 if (isUInt<8>(DWordOffset0) && isUInt<8>(DWordOffset1)) {
770 SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
771 MachineSDNode *MovZero
772 = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
774 Base = SDValue(MovZero, 0);
775 Offset0 = CurDAG->getTargetConstant(DWordOffset0, DL, MVT::i8);
776 Offset1 = CurDAG->getTargetConstant(DWordOffset1, DL, MVT::i8);
783 Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8);
784 Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8);
788 static bool isLegalMUBUFImmOffset(const ConstantSDNode *Imm) {
789 return isUInt<12>(Imm->getZExtValue());
792 bool AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
793 SDValue &VAddr, SDValue &SOffset,
794 SDValue &Offset, SDValue &Offen,
795 SDValue &Idxen, SDValue &Addr64,
796 SDValue &GLC, SDValue &SLC,
797 SDValue &TFE) const {
798 // Subtarget prefers to use flat instruction
799 if (Subtarget->useFlatForGlobal())
805 GLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
807 SLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
808 TFE = CurDAG->getTargetConstant(0, DL, MVT::i1);
810 Idxen = CurDAG->getTargetConstant(0, DL, MVT::i1);
811 Offen = CurDAG->getTargetConstant(0, DL, MVT::i1);
812 Addr64 = CurDAG->getTargetConstant(0, DL, MVT::i1);
813 SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
815 if (CurDAG->isBaseWithConstantOffset(Addr)) {
816 SDValue N0 = Addr.getOperand(0);
817 SDValue N1 = Addr.getOperand(1);
818 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
820 if (N0.getOpcode() == ISD::ADD) {
821 // (add (add N2, N3), C1) -> addr64
822 SDValue N2 = N0.getOperand(0);
823 SDValue N3 = N0.getOperand(1);
824 Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
829 // (add N0, C1) -> offset
830 VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
834 if (isLegalMUBUFImmOffset(C1)) {
835 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
839 if (isUInt<32>(C1->getZExtValue())) {
840 // Illegal offset, store it in soffset.
841 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
842 SOffset = SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
843 CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32)),
849 if (Addr.getOpcode() == ISD::ADD) {
850 // (add N0, N1) -> addr64
851 SDValue N0 = Addr.getOperand(0);
852 SDValue N1 = Addr.getOperand(1);
853 Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
856 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
860 // default case -> offset
861 VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
863 Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
868 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
869 SDValue &VAddr, SDValue &SOffset,
870 SDValue &Offset, SDValue &GLC,
871 SDValue &SLC, SDValue &TFE) const {
872 SDValue Ptr, Offen, Idxen, Addr64;
874 // addr64 bit was removed for volcanic islands.
875 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
878 if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
882 ConstantSDNode *C = cast<ConstantSDNode>(Addr64);
883 if (C->getSExtValue()) {
886 const SITargetLowering& Lowering =
887 *static_cast<const SITargetLowering*>(getTargetLowering());
889 SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0);
896 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
897 SDValue &VAddr, SDValue &SOffset,
899 SDValue &SLC) const {
900 SLC = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i1);
903 return SelectMUBUFAddr64(Addr, SRsrc, VAddr, SOffset, Offset, GLC, SLC, TFE);
906 bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
907 SDValue &VAddr, SDValue &SOffset,
908 SDValue &ImmOffset) const {
911 MachineFunction &MF = CurDAG->getMachineFunction();
912 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
914 Rsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
915 SOffset = CurDAG->getRegister(Info->getScratchWaveOffsetReg(), MVT::i32);
918 if (CurDAG->isBaseWithConstantOffset(Addr)) {
919 SDValue N0 = Addr.getOperand(0);
920 SDValue N1 = Addr.getOperand(1);
922 // Offsets in vaddr must be positive.
923 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
924 if (isLegalMUBUFImmOffset(C1)) {
926 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
933 ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16);
937 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
938 SDValue &SOffset, SDValue &Offset,
939 SDValue &GLC, SDValue &SLC,
940 SDValue &TFE) const {
941 SDValue Ptr, VAddr, Offen, Idxen, Addr64;
942 const SIInstrInfo *TII =
943 static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
945 if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
949 if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
950 !cast<ConstantSDNode>(Idxen)->getSExtValue() &&
951 !cast<ConstantSDNode>(Addr64)->getSExtValue()) {
952 uint64_t Rsrc = TII->getDefaultRsrcDataFormat() |
953 APInt::getAllOnesValue(32).getZExtValue(); // Size
956 const SITargetLowering& Lowering =
957 *static_cast<const SITargetLowering*>(getTargetLowering());
959 SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0);
965 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
966 SDValue &Soffset, SDValue &Offset
968 SDValue GLC, SLC, TFE;
970 return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE);
972 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
973 SDValue &Soffset, SDValue &Offset,
974 SDValue &SLC) const {
977 return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE);
980 bool AMDGPUDAGToDAGISel::SelectMUBUFConstant(SDValue Constant,
982 SDValue &ImmOffset) const {
984 uint32_t Imm = cast<ConstantSDNode>(Constant)->getZExtValue();
985 uint32_t Overflow = 0;
988 if (Imm <= 4095 + 64) {
989 // Use an SOffset inline constant for 1..64
990 Overflow = Imm - 4095;
993 // Try to keep the same value in SOffset for adjacent loads, so that
994 // the corresponding register contents can be re-used.
996 // Load values with all low-bits set into SOffset, so that a larger
997 // range of values can be covered using s_movk_i32
998 uint32_t High = (Imm + 1) & ~4095;
999 uint32_t Low = (Imm + 1) & 4095;
1001 Overflow = High - 1;
1005 // There is a hardware bug in SI and CI which prevents address clamping in
1006 // MUBUF instructions from working correctly with SOffsets. The immediate
1007 // offset is unaffected.
1009 Subtarget->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS)
1012 ImmOffset = CurDAG->getTargetConstant(Imm, DL, MVT::i16);
1015 SOffset = CurDAG->getTargetConstant(Overflow, DL, MVT::i32);
1017 SOffset = SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
1018 CurDAG->getTargetConstant(Overflow, DL, MVT::i32)),
1024 bool AMDGPUDAGToDAGISel::SelectMUBUFIntrinsicOffset(SDValue Offset,
1026 SDValue &ImmOffset) const {
1029 if (!isa<ConstantSDNode>(Offset))
1032 return SelectMUBUFConstant(Offset, SOffset, ImmOffset);
1035 bool AMDGPUDAGToDAGISel::SelectMUBUFIntrinsicVOffset(SDValue Offset,
1038 SDValue &VOffset) const {
1041 // Don't generate an unnecessary voffset for constant offsets.
1042 if (isa<ConstantSDNode>(Offset)) {
1045 // When necessary, use a voffset in <= CI anyway to work around a hardware
1047 if (Subtarget->getGeneration() > AMDGPUSubtarget::SEA_ISLANDS ||
1048 SelectMUBUFConstant(Offset, Tmp1, Tmp2))
1052 if (CurDAG->isBaseWithConstantOffset(Offset)) {
1053 SDValue N0 = Offset.getOperand(0);
1054 SDValue N1 = Offset.getOperand(1);
1055 if (cast<ConstantSDNode>(N1)->getSExtValue() >= 0 &&
1056 SelectMUBUFConstant(N1, SOffset, ImmOffset)) {
1062 SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1063 ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1069 bool AMDGPUDAGToDAGISel::SelectFlat(SDValue Addr,
1072 SDValue &TFE) const {
1074 TFE = SLC = CurDAG->getTargetConstant(0, SDLoc(), MVT::i1);
1079 /// \param EncodedOffset This is the immediate value that will be encoded
1080 /// directly into the instruction. On SI/CI the \p EncodedOffset
1081 /// will be in units of dwords and on VI+ it will be units of bytes.
1082 static bool isLegalSMRDImmOffset(const AMDGPUSubtarget *ST,
1083 int64_t EncodedOffset) {
1084 return ST->getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS ?
1085 isUInt<8>(EncodedOffset) : isUInt<20>(EncodedOffset);
1088 bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDValue ByteOffsetNode,
1089 SDValue &Offset, bool &Imm) const {
1091 // FIXME: Handle non-constant offsets.
1092 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ByteOffsetNode);
1096 SDLoc SL(ByteOffsetNode);
1097 AMDGPUSubtarget::Generation Gen = Subtarget->getGeneration();
1098 int64_t ByteOffset = C->getSExtValue();
1099 int64_t EncodedOffset = Gen < AMDGPUSubtarget::VOLCANIC_ISLANDS ?
1100 ByteOffset >> 2 : ByteOffset;
1102 if (isLegalSMRDImmOffset(Subtarget, EncodedOffset)) {
1103 Offset = CurDAG->getTargetConstant(EncodedOffset, SL, MVT::i32);
1108 if (!isUInt<32>(EncodedOffset) || !isUInt<32>(ByteOffset))
1111 if (Gen == AMDGPUSubtarget::SEA_ISLANDS && isUInt<32>(EncodedOffset)) {
1112 // 32-bit Immediates are supported on Sea Islands.
1113 Offset = CurDAG->getTargetConstant(EncodedOffset, SL, MVT::i32);
1115 SDValue C32Bit = CurDAG->getTargetConstant(ByteOffset, SL, MVT::i32);
1116 Offset = SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32,
1123 bool AMDGPUDAGToDAGISel::SelectSMRD(SDValue Addr, SDValue &SBase,
1124 SDValue &Offset, bool &Imm) const {
1127 if (CurDAG->isBaseWithConstantOffset(Addr)) {
1128 SDValue N0 = Addr.getOperand(0);
1129 SDValue N1 = Addr.getOperand(1);
1131 if (SelectSMRDOffset(N1, Offset, Imm)) {
1137 Offset = CurDAG->getTargetConstant(0, SL, MVT::i32);
1142 bool AMDGPUDAGToDAGISel::SelectSMRDImm(SDValue Addr, SDValue &SBase,
1143 SDValue &Offset) const {
1145 return SelectSMRD(Addr, SBase, Offset, Imm) && Imm;
1148 bool AMDGPUDAGToDAGISel::SelectSMRDImm32(SDValue Addr, SDValue &SBase,
1149 SDValue &Offset) const {
1151 if (Subtarget->getGeneration() != AMDGPUSubtarget::SEA_ISLANDS)
1155 if (!SelectSMRD(Addr, SBase, Offset, Imm))
1158 return !Imm && isa<ConstantSDNode>(Offset);
1161 bool AMDGPUDAGToDAGISel::SelectSMRDSgpr(SDValue Addr, SDValue &SBase,
1162 SDValue &Offset) const {
1164 return SelectSMRD(Addr, SBase, Offset, Imm) && !Imm &&
1165 !isa<ConstantSDNode>(Offset);
1168 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm(SDValue Addr,
1169 SDValue &Offset) const {
1171 return SelectSMRDOffset(Addr, Offset, Imm) && Imm;
1174 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm32(SDValue Addr,
1175 SDValue &Offset) const {
1176 if (Subtarget->getGeneration() != AMDGPUSubtarget::SEA_ISLANDS)
1180 if (!SelectSMRDOffset(Addr, Offset, Imm))
1183 return !Imm && isa<ConstantSDNode>(Offset);
1186 bool AMDGPUDAGToDAGISel::SelectSMRDBufferSgpr(SDValue Addr,
1187 SDValue &Offset) const {
1189 return SelectSMRDOffset(Addr, Offset, Imm) && !Imm &&
1190 !isa<ConstantSDNode>(Offset);
1193 bool AMDGPUDAGToDAGISel::SelectMOVRELOffset(SDValue Index,
1195 SDValue &Offset) const {
1198 if (CurDAG->isBaseWithConstantOffset(Index)) {
1199 SDValue N0 = Index.getOperand(0);
1200 SDValue N1 = Index.getOperand(1);
1201 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1205 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32);
1209 if (isa<ConstantSDNode>(Index))
1213 Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1217 SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, const SDLoc &DL,
1218 SDValue Val, uint32_t Offset,
1220 // Transformation function, pack the offset and width of a BFE into
1221 // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
1222 // source, bits [5:0] contain the offset and bits [22:16] the width.
1223 uint32_t PackedVal = Offset | (Width << 16);
1224 SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, DL, MVT::i32);
1226 return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst);
1229 void AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
1230 // "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c)
1231 // "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c)
1232 // Predicate: 0 < b <= c < 32
1234 const SDValue &Shl = N->getOperand(0);
1235 ConstantSDNode *B = dyn_cast<ConstantSDNode>(Shl->getOperand(1));
1236 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
1239 uint32_t BVal = B->getZExtValue();
1240 uint32_t CVal = C->getZExtValue();
1242 if (0 < BVal && BVal <= CVal && CVal < 32) {
1243 bool Signed = N->getOpcode() == ISD::SRA;
1244 unsigned Opcode = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
1246 ReplaceNode(N, getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0), CVal - BVal,
1254 void AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
1255 switch (N->getOpcode()) {
1257 if (N->getOperand(0).getOpcode() == ISD::SRL) {
1258 // "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)"
1259 // Predicate: isMask(mask)
1260 const SDValue &Srl = N->getOperand(0);
1261 ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(Srl.getOperand(1));
1262 ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1));
1264 if (Shift && Mask) {
1265 uint32_t ShiftVal = Shift->getZExtValue();
1266 uint32_t MaskVal = Mask->getZExtValue();
1268 if (isMask_32(MaskVal)) {
1269 uint32_t WidthVal = countPopulation(MaskVal);
1271 ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
1272 Srl.getOperand(0), ShiftVal, WidthVal));
1279 if (N->getOperand(0).getOpcode() == ISD::AND) {
1280 // "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)"
1281 // Predicate: isMask(mask >> b)
1282 const SDValue &And = N->getOperand(0);
1283 ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N->getOperand(1));
1284 ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(And->getOperand(1));
1286 if (Shift && Mask) {
1287 uint32_t ShiftVal = Shift->getZExtValue();
1288 uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal;
1290 if (isMask_32(MaskVal)) {
1291 uint32_t WidthVal = countPopulation(MaskVal);
1293 ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
1294 And.getOperand(0), ShiftVal, WidthVal));
1298 } else if (N->getOperand(0).getOpcode() == ISD::SHL) {
1299 SelectS_BFEFromShifts(N);
1304 if (N->getOperand(0).getOpcode() == ISD::SHL) {
1305 SelectS_BFEFromShifts(N);
1310 case ISD::SIGN_EXTEND_INREG: {
1311 // sext_inreg (srl x, 16), i8 -> bfe_i32 x, 16, 8
1312 SDValue Src = N->getOperand(0);
1313 if (Src.getOpcode() != ISD::SRL)
1316 const ConstantSDNode *Amt = dyn_cast<ConstantSDNode>(Src.getOperand(1));
1320 unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
1321 ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_I32, SDLoc(N), Src.getOperand(0),
1322 Amt->getZExtValue(), Width));
1330 void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
1331 SDValue Cond = N->getOperand(1);
1333 if (isCBranchSCC(N)) {
1334 // This brcond will use S_CBRANCH_SCC*, so let tablegen handle it.
1339 // The result of VOPC instructions is or'd against ~EXEC before it is
1340 // written to vcc or another SGPR. This means that the value '1' is always
1341 // written to the corresponding bit for results that are masked. In order
1342 // to correctly check against vccz, we need to and VCC with the EXEC
1343 // register in order to clear the value from the masked bits.
1347 SDNode *MaskedCond =
1348 CurDAG->getMachineNode(AMDGPU::S_AND_B64, SL, MVT::i1,
1349 CurDAG->getRegister(AMDGPU::EXEC, MVT::i1),
1351 SDValue VCC = CurDAG->getCopyToReg(N->getOperand(0), SL, AMDGPU::VCC,
1352 SDValue(MaskedCond, 0),
1353 SDValue()); // Passing SDValue() adds a
1355 CurDAG->SelectNodeTo(N, AMDGPU::S_CBRANCH_VCCNZ, MVT::Other,
1356 N->getOperand(2), // Basic Block
1357 VCC.getValue(0), // Chain
1358 VCC.getValue(1)); // Glue
1362 // This is here because there isn't a way to use the generated sub0_sub1 as the
1363 // subreg index to EXTRACT_SUBREG in tablegen.
1364 void AMDGPUDAGToDAGISel::SelectATOMIC_CMP_SWAP(SDNode *N) {
1365 MemSDNode *Mem = cast<MemSDNode>(N);
1366 unsigned AS = Mem->getAddressSpace();
1367 if (AS == AMDGPUAS::FLAT_ADDRESS) {
1372 MVT VT = N->getSimpleValueType(0);
1373 bool Is32 = (VT == MVT::i32);
1376 MachineSDNode *CmpSwap = nullptr;
1377 if (Subtarget->hasAddr64()) {
1378 SDValue SRsrc, VAddr, SOffset, Offset, GLC, SLC;
1380 if (SelectMUBUFAddr64(Mem->getBasePtr(), SRsrc, VAddr, SOffset, Offset, SLC)) {
1381 unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_RTN_ADDR64 :
1382 AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_RTN_ADDR64;
1383 SDValue CmpVal = Mem->getOperand(2);
1385 // XXX - Do we care about glue operands?
1388 CmpVal, VAddr, SRsrc, SOffset, Offset, SLC, Mem->getChain()
1391 CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
1396 SDValue SRsrc, SOffset, Offset, SLC;
1397 if (SelectMUBUFOffset(Mem->getBasePtr(), SRsrc, SOffset, Offset, SLC)) {
1398 unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_RTN_OFFSET :
1399 AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_RTN_OFFSET;
1401 SDValue CmpVal = Mem->getOperand(2);
1403 CmpVal, SRsrc, SOffset, Offset, SLC, Mem->getChain()
1406 CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
1415 MachineSDNode::mmo_iterator MMOs = MF->allocateMemRefsArray(1);
1416 *MMOs = Mem->getMemOperand();
1417 CmpSwap->setMemRefs(MMOs, MMOs + 1);
1419 unsigned SubReg = Is32 ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
1421 = CurDAG->getTargetExtractSubreg(SubReg, SL, VT, SDValue(CmpSwap, 0));
1423 ReplaceUses(SDValue(N, 0), Extract);
1424 ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 1));
1425 CurDAG->RemoveDeadNode(N);
1428 bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
1429 SDValue &SrcMods) const {
1435 if (Src.getOpcode() == ISD::FNEG) {
1436 Mods |= SISrcMods::NEG;
1437 Src = Src.getOperand(0);
1440 if (Src.getOpcode() == ISD::FABS) {
1441 Mods |= SISrcMods::ABS;
1442 Src = Src.getOperand(0);
1445 SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
1450 bool AMDGPUDAGToDAGISel::SelectVOP3NoMods(SDValue In, SDValue &Src,
1451 SDValue &SrcMods) const {
1452 bool Res = SelectVOP3Mods(In, Src, SrcMods);
1453 return Res && cast<ConstantSDNode>(SrcMods)->isNullValue();
1456 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
1457 SDValue &SrcMods, SDValue &Clamp,
1458 SDValue &Omod) const {
1460 // FIXME: Handle Clamp and Omod
1461 Clamp = CurDAG->getTargetConstant(0, DL, MVT::i32);
1462 Omod = CurDAG->getTargetConstant(0, DL, MVT::i32);
1464 return SelectVOP3Mods(In, Src, SrcMods);
1467 bool AMDGPUDAGToDAGISel::SelectVOP3NoMods0(SDValue In, SDValue &Src,
1468 SDValue &SrcMods, SDValue &Clamp,
1469 SDValue &Omod) const {
1470 bool Res = SelectVOP3Mods0(In, Src, SrcMods, Clamp, Omod);
1472 return Res && cast<ConstantSDNode>(SrcMods)->isNullValue() &&
1473 cast<ConstantSDNode>(Clamp)->isNullValue() &&
1474 cast<ConstantSDNode>(Omod)->isNullValue();
1477 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp(SDValue In, SDValue &Src,
1479 SDValue &Omod) const {
1480 // FIXME: Handle Omod
1481 Omod = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
1483 return SelectVOP3Mods(In, Src, SrcMods);
1486 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src,
1489 SDValue &Omod) const {
1490 Clamp = Omod = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
1491 return SelectVOP3Mods(In, Src, SrcMods);
1494 void AMDGPUDAGToDAGISel::PreprocessISelDAG() {
1495 MachineFrameInfo *MFI = CurDAG->getMachineFunction().getFrameInfo();
1497 // Handle the perverse case where a frame index is being stored. We don't
1498 // want to see multiple frame index operands on the same instruction since
1499 // it complicates things and violates some assumptions about frame index
1501 for (int I = MFI->getObjectIndexBegin(), E = MFI->getObjectIndexEnd();
1503 SDValue FI = CurDAG->getTargetFrameIndex(I, MVT::i32);
1505 // It's possible that we have a frame index defined in the function that
1506 // isn't used in this block.
1510 // Skip over the AssertZext inserted during lowering.
1511 SDValue EffectiveFI = FI;
1512 auto It = FI->use_begin();
1513 if (It->getOpcode() == ISD::AssertZext && FI->hasOneUse()) {
1514 EffectiveFI = SDValue(*It, 0);
1515 It = EffectiveFI->use_begin();
1518 for (auto It = EffectiveFI->use_begin(); !It.atEnd(); ) {
1519 SDUse &Use = It.getUse();
1520 SDNode *User = Use.getUser();
1521 unsigned OpIdx = It.getOperandNo();
1524 if (MemSDNode *M = dyn_cast<MemSDNode>(User)) {
1525 unsigned PtrIdx = M->getOpcode() == ISD::STORE ? 2 : 1;
1526 if (OpIdx == PtrIdx)
1529 unsigned OpN = M->getNumOperands();
1532 assert(OpN < array_lengthof(NewOps));
1533 for (unsigned Op = 0; Op != OpN; ++Op) {
1535 NewOps[Op] = M->getOperand(Op);
1539 MachineSDNode *Mov = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
1540 SDLoc(M), MVT::i32, FI);
1541 NewOps[Op] = SDValue(Mov, 0);
1544 CurDAG->UpdateNodeOperands(M, makeArrayRef(NewOps, OpN));
1550 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
1551 const AMDGPUTargetLowering& Lowering =
1552 *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
1553 bool IsModified = false;
1556 // Go over all selected nodes and try to fold them a bit more
1557 for (SDNode &Node : CurDAG->allnodes()) {
1558 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(&Node);
1562 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
1563 if (ResNode != &Node) {
1564 ReplaceUses(&Node, ResNode);
1568 CurDAG->RemoveDeadNodes();
1569 } while (IsModified);