1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the targeting of the InstructionSelector class for
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
14 #include "AMDGPUInstructionSelector.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
23 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/IR/DiagnosticInfo.h"
28 #include "llvm/IR/IntrinsicsAMDGPU.h"
31 #define DEBUG_TYPE "amdgpu-isel"
34 using namespace MIPatternMatch;
36 static cl::opt<bool> AllowRiskySelect(
37 "amdgpu-global-isel-risky-select",
38 cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
42 #define GET_GLOBALISEL_IMPL
43 #define AMDGPUSubtarget GCNSubtarget
44 #include "AMDGPUGenGlobalISel.inc"
45 #undef GET_GLOBALISEL_IMPL
46 #undef AMDGPUSubtarget
48 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
49 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
50 const AMDGPUTargetMachine &TM)
51 : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
53 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
54 #define GET_GLOBALISEL_PREDICATES_INIT
55 #include "AMDGPUGenGlobalISel.inc"
56 #undef GET_GLOBALISEL_PREDICATES_INIT
57 #define GET_GLOBALISEL_TEMPORARIES_INIT
58 #include "AMDGPUGenGlobalISel.inc"
59 #undef GET_GLOBALISEL_TEMPORARIES_INIT
63 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
65 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
66 CodeGenCoverage *CoverageInfo,
67 ProfileSummaryInfo *PSI,
68 BlockFrequencyInfo *BFI) {
69 MRI = &MF.getRegInfo();
70 Subtarget = &MF.getSubtarget<GCNSubtarget>();
71 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
74 bool AMDGPUInstructionSelector::isVCC(Register Reg,
75 const MachineRegisterInfo &MRI) const {
76 // The verifier is oblivious to s1 being a valid value for wavesize registers.
80 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
81 const TargetRegisterClass *RC =
82 RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
84 const LLT Ty = MRI.getType(Reg);
85 if (!Ty.isValid() || Ty.getSizeInBits() != 1)
87 // G_TRUNC s1 result is never vcc.
88 return MRI.getVRegDef(Reg)->getOpcode() != AMDGPU::G_TRUNC &&
89 RC->hasSuperClassEq(TRI.getBoolRC());
92 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
93 return RB->getID() == AMDGPU::VCCRegBankID;
96 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
97 unsigned NewOpc) const {
98 MI.setDesc(TII.get(NewOpc));
99 MI.removeOperand(1); // Remove intrinsic ID.
100 MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
102 MachineOperand &Dst = MI.getOperand(0);
103 MachineOperand &Src = MI.getOperand(1);
105 // TODO: This should be legalized to s32 if needed
106 if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
109 const TargetRegisterClass *DstRC
110 = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
111 const TargetRegisterClass *SrcRC
112 = TRI.getConstrainedRegClassForOperand(Src, *MRI);
113 if (!DstRC || DstRC != SrcRC)
116 return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
117 RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
120 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
121 const DebugLoc &DL = I.getDebugLoc();
122 MachineBasicBlock *BB = I.getParent();
123 I.setDesc(TII.get(TargetOpcode::COPY));
125 const MachineOperand &Src = I.getOperand(1);
126 MachineOperand &Dst = I.getOperand(0);
127 Register DstReg = Dst.getReg();
128 Register SrcReg = Src.getReg();
130 if (isVCC(DstReg, *MRI)) {
131 if (SrcReg == AMDGPU::SCC) {
132 const TargetRegisterClass *RC
133 = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
136 return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
139 if (!isVCC(SrcReg, *MRI)) {
140 // TODO: Should probably leave the copy and let copyPhysReg expand it.
141 if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
144 const TargetRegisterClass *SrcRC
145 = TRI.getConstrainedRegClassForOperand(Src, *MRI);
147 std::optional<ValueAndVReg> ConstVal =
148 getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
151 STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
152 BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
153 .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
155 Register MaskedReg = MRI->createVirtualRegister(SrcRC);
157 // We can't trust the high bits at this point, so clear them.
159 // TODO: Skip masking high bits if def is known boolean.
162 TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
163 BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
166 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
171 if (!MRI->getRegClassOrNull(SrcReg))
172 MRI->setRegClass(SrcReg, SrcRC);
177 const TargetRegisterClass *RC =
178 TRI.getConstrainedRegClassForOperand(Dst, *MRI);
179 if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
185 for (const MachineOperand &MO : I.operands()) {
186 if (MO.getReg().isPhysical())
189 const TargetRegisterClass *RC =
190 TRI.getConstrainedRegClassForOperand(MO, *MRI);
193 RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
198 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
199 const Register DefReg = I.getOperand(0).getReg();
200 const LLT DefTy = MRI->getType(DefReg);
201 if (DefTy == LLT::scalar(1)) {
202 if (!AllowRiskySelect) {
203 LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
207 LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
210 // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
212 const RegClassOrRegBank &RegClassOrBank =
213 MRI->getRegClassOrRegBank(DefReg);
215 const TargetRegisterClass *DefRC
216 = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
218 if (!DefTy.isValid()) {
219 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
223 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
224 DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB);
226 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
231 // TODO: Verify that all registers have the same bank
232 I.setDesc(TII.get(TargetOpcode::PHI));
233 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
237 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
238 const TargetRegisterClass &SubRC,
239 unsigned SubIdx) const {
241 MachineInstr *MI = MO.getParent();
242 MachineBasicBlock *BB = MO.getParent()->getParent();
243 Register DstReg = MRI->createVirtualRegister(&SubRC);
246 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
247 Register Reg = MO.getReg();
248 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
249 .addReg(Reg, 0, ComposedSubIdx);
251 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
252 MO.isKill(), MO.isDead(), MO.isUndef(),
253 MO.isEarlyClobber(), 0, MO.isDebug(),
254 MO.isInternalRead());
259 APInt Imm(64, MO.getImm());
263 llvm_unreachable("do not know to split immediate with this sub index.");
265 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
267 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
271 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
274 return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
276 return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
278 return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
280 llvm_unreachable("not a bit op");
284 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
285 Register DstReg = I.getOperand(0).getReg();
286 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
288 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
289 if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
290 DstRB->getID() != AMDGPU::VCCRegBankID)
293 bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
295 I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
297 // Dead implicit-def of scc
298 I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
302 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
305 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
306 MachineBasicBlock *BB = I.getParent();
307 MachineFunction *MF = BB->getParent();
308 Register DstReg = I.getOperand(0).getReg();
309 const DebugLoc &DL = I.getDebugLoc();
310 LLT Ty = MRI->getType(DstReg);
314 unsigned Size = Ty.getSizeInBits();
315 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
316 const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
317 const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
321 const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
323 BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
324 .add(I.getOperand(1))
325 .add(I.getOperand(2));
327 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
330 if (STI.hasAddNoCarry()) {
331 const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
332 I.setDesc(TII.get(Opc));
333 I.addOperand(*MF, MachineOperand::CreateImm(0));
334 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
335 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
338 const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
340 Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
342 = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
343 .addDef(UnusedCarry, RegState::Dead)
344 .add(I.getOperand(1))
345 .add(I.getOperand(2))
348 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
351 assert(!Sub && "illegal sub should not reach here");
353 const TargetRegisterClass &RC
354 = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
355 const TargetRegisterClass &HalfRC
356 = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
358 MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
359 MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
360 MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
361 MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
363 Register DstLo = MRI->createVirtualRegister(&HalfRC);
364 Register DstHi = MRI->createVirtualRegister(&HalfRC);
367 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
370 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
374 const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
375 Register CarryReg = MRI->createVirtualRegister(CarryRC);
376 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
381 MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
382 .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
385 .addReg(CarryReg, RegState::Kill)
388 if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
392 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
394 .addImm(AMDGPU::sub0)
396 .addImm(AMDGPU::sub1);
399 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
406 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
407 MachineInstr &I) const {
408 MachineBasicBlock *BB = I.getParent();
409 MachineFunction *MF = BB->getParent();
410 const DebugLoc &DL = I.getDebugLoc();
411 Register Dst0Reg = I.getOperand(0).getReg();
412 Register Dst1Reg = I.getOperand(1).getReg();
413 const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
414 I.getOpcode() == AMDGPU::G_UADDE;
415 const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
416 I.getOpcode() == AMDGPU::G_USUBE;
418 if (isVCC(Dst1Reg, *MRI)) {
419 unsigned NoCarryOpc =
420 IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
421 unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
422 I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
423 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
424 I.addOperand(*MF, MachineOperand::CreateImm(0));
425 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
428 Register Src0Reg = I.getOperand(2).getReg();
429 Register Src1Reg = I.getOperand(3).getReg();
432 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
433 .addReg(I.getOperand(4).getReg());
436 unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
437 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
439 BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
440 .add(I.getOperand(2))
441 .add(I.getOperand(3));
442 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
443 .addReg(AMDGPU::SCC);
445 if (!MRI->getRegClassOrNull(Dst1Reg))
446 MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
448 if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
449 !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
450 !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
454 !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
455 AMDGPU::SReg_32RegClass, *MRI))
462 bool AMDGPUInstructionSelector::selectG_AMDGPU_MAD_64_32(
463 MachineInstr &I) const {
464 MachineBasicBlock *BB = I.getParent();
465 MachineFunction *MF = BB->getParent();
466 const bool IsUnsigned = I.getOpcode() == AMDGPU::G_AMDGPU_MAD_U64_U32;
469 if (Subtarget->hasMADIntraFwdBug())
470 Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_gfx11_e64
471 : AMDGPU::V_MAD_I64_I32_gfx11_e64;
473 Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_e64 : AMDGPU::V_MAD_I64_I32_e64;
474 I.setDesc(TII.get(Opc));
475 I.addOperand(*MF, MachineOperand::CreateImm(0));
476 I.addImplicitDefUseOperands(*MF);
477 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
480 // TODO: We should probably legalize these to only using 32-bit results.
481 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
482 MachineBasicBlock *BB = I.getParent();
483 Register DstReg = I.getOperand(0).getReg();
484 Register SrcReg = I.getOperand(1).getReg();
485 LLT DstTy = MRI->getType(DstReg);
486 LLT SrcTy = MRI->getType(SrcReg);
487 const unsigned SrcSize = SrcTy.getSizeInBits();
488 unsigned DstSize = DstTy.getSizeInBits();
490 // TODO: Should handle any multiple of 32 offset.
491 unsigned Offset = I.getOperand(2).getImm();
492 if (Offset % 32 != 0 || DstSize > 128)
495 // 16-bit operations really use 32-bit registers.
496 // FIXME: Probably should not allow 16-bit G_EXTRACT results.
500 const TargetRegisterClass *DstRC =
501 TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
502 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
505 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
506 const TargetRegisterClass *SrcRC =
507 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
510 unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
512 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
516 SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
517 *SrcRC, I.getOperand(1));
518 const DebugLoc &DL = I.getDebugLoc();
519 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
520 .addReg(SrcReg, 0, SubReg);
526 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
527 MachineBasicBlock *BB = MI.getParent();
528 Register DstReg = MI.getOperand(0).getReg();
529 LLT DstTy = MRI->getType(DstReg);
530 LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
532 const unsigned SrcSize = SrcTy.getSizeInBits();
534 return selectImpl(MI, *CoverageInfo);
536 const DebugLoc &DL = MI.getDebugLoc();
537 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
538 const unsigned DstSize = DstTy.getSizeInBits();
539 const TargetRegisterClass *DstRC =
540 TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
544 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
545 MachineInstrBuilder MIB =
546 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
547 for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
548 MachineOperand &Src = MI.getOperand(I + 1);
549 MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
550 MIB.addImm(SubRegs[I]);
552 const TargetRegisterClass *SrcRC
553 = TRI.getConstrainedRegClassForOperand(Src, *MRI);
554 if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
558 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
561 MI.eraseFromParent();
565 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
566 MachineBasicBlock *BB = MI.getParent();
567 const int NumDst = MI.getNumOperands() - 1;
569 MachineOperand &Src = MI.getOperand(NumDst);
571 Register SrcReg = Src.getReg();
572 Register DstReg0 = MI.getOperand(0).getReg();
573 LLT DstTy = MRI->getType(DstReg0);
574 LLT SrcTy = MRI->getType(SrcReg);
576 const unsigned DstSize = DstTy.getSizeInBits();
577 const unsigned SrcSize = SrcTy.getSizeInBits();
578 const DebugLoc &DL = MI.getDebugLoc();
579 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
581 const TargetRegisterClass *SrcRC =
582 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
583 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
586 // Note we could have mixed SGPR and VGPR destination banks for an SGPR
587 // source, and this relies on the fact that the same subregister indices are
589 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
590 for (int I = 0, E = NumDst; I != E; ++I) {
591 MachineOperand &Dst = MI.getOperand(I);
592 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
593 .addReg(SrcReg, 0, SubRegs[I]);
595 // Make sure the subregister index is valid for the source register.
596 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
597 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
600 const TargetRegisterClass *DstRC =
601 TRI.getConstrainedRegClassForOperand(Dst, *MRI);
602 if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
606 MI.eraseFromParent();
610 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR(MachineInstr &MI) const {
611 assert(MI.getOpcode() == AMDGPU::G_BUILD_VECTOR_TRUNC ||
612 MI.getOpcode() == AMDGPU::G_BUILD_VECTOR);
614 Register Src0 = MI.getOperand(1).getReg();
615 Register Src1 = MI.getOperand(2).getReg();
616 LLT SrcTy = MRI->getType(Src0);
617 const unsigned SrcSize = SrcTy.getSizeInBits();
619 // BUILD_VECTOR with >=32 bits source is handled by MERGE_VALUE.
620 if (MI.getOpcode() == AMDGPU::G_BUILD_VECTOR && SrcSize >= 32) {
621 return selectG_MERGE_VALUES(MI);
624 // Selection logic below is for V2S16 only.
625 // For G_BUILD_VECTOR_TRUNC, additionally check that the operands are s32.
626 Register Dst = MI.getOperand(0).getReg();
627 if (MRI->getType(Dst) != LLT::fixed_vector(2, 16) ||
628 (MI.getOpcode() == AMDGPU::G_BUILD_VECTOR_TRUNC &&
629 SrcTy != LLT::scalar(32)))
630 return selectImpl(MI, *CoverageInfo);
632 const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
633 if (DstBank->getID() == AMDGPU::AGPRRegBankID)
636 assert(DstBank->getID() == AMDGPU::SGPRRegBankID ||
637 DstBank->getID() == AMDGPU::VGPRRegBankID);
638 const bool IsVector = DstBank->getID() == AMDGPU::VGPRRegBankID;
640 const DebugLoc &DL = MI.getDebugLoc();
641 MachineBasicBlock *BB = MI.getParent();
643 // First, before trying TableGen patterns, check if both sources are
644 // constants. In those cases, we can trivially compute the final constant
645 // and emit a simple move.
646 auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
649 getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true);
651 const int64_t K0 = ConstSrc0->Value.getSExtValue();
652 const int64_t K1 = ConstSrc1->Value.getSExtValue();
653 uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
654 uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
655 uint32_t Imm = Lo16 | (Hi16 << 16);
659 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), Dst).addImm(Imm);
660 MI.eraseFromParent();
661 return RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI);
665 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst).addImm(Imm);
666 MI.eraseFromParent();
667 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
671 // Now try TableGen patterns.
672 if (selectImpl(MI, *CoverageInfo))
675 // TODO: This should probably be a combine somewhere
676 // (build_vector $src0, undef) -> copy $src0
677 MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
678 if (Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
679 MI.setDesc(TII.get(AMDGPU::COPY));
682 IsVector ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
683 return RBI.constrainGenericRegister(Dst, RC, *MRI) &&
684 RBI.constrainGenericRegister(Src0, RC, *MRI);
687 // TODO: Can be improved?
689 Register TmpReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
690 auto MIB = BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_AND_B32_e32), TmpReg)
693 if (!constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI))
696 MIB = BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_LSHL_OR_B32_e64), Dst)
700 if (!constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI))
703 MI.eraseFromParent();
710 // With multiple uses of the shift, this will duplicate the shift and
711 // increase register pressure.
713 // (build_vector (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
714 // => (S_PACK_HH_B32_B16 $src0, $src1)
715 // (build_vector (lshr_oneuse SReg_32:$src0, 16), $src1)
716 // => (S_PACK_HL_B32_B16 $src0, $src1)
717 // (build_vector $src0, (lshr_oneuse SReg_32:$src1, 16))
718 // => (S_PACK_LH_B32_B16 $src0, $src1)
719 // (build_vector $src0, $src1)
720 // => (S_PACK_LL_B32_B16 $src0, $src1)
722 bool Shift0 = mi_match(
723 Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
725 bool Shift1 = mi_match(
726 Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
728 unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
729 if (Shift0 && Shift1) {
730 Opc = AMDGPU::S_PACK_HH_B32_B16;
731 MI.getOperand(1).setReg(ShiftSrc0);
732 MI.getOperand(2).setReg(ShiftSrc1);
734 Opc = AMDGPU::S_PACK_LH_B32_B16;
735 MI.getOperand(2).setReg(ShiftSrc1);
738 getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
739 if (ConstSrc1 && ConstSrc1->Value == 0) {
740 // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
741 auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
745 MI.eraseFromParent();
746 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
748 if (STI.hasSPackHL()) {
749 Opc = AMDGPU::S_PACK_HL_B32_B16;
750 MI.getOperand(1).setReg(ShiftSrc0);
754 MI.setDesc(TII.get(Opc));
755 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
758 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
759 return selectG_ADD_SUB(I);
762 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
763 const MachineOperand &MO = I.getOperand(0);
765 // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
766 // regbank check here is to know why getConstrainedRegClassForOperand failed.
767 const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
768 if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
769 (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
770 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
777 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
778 MachineBasicBlock *BB = I.getParent();
780 Register DstReg = I.getOperand(0).getReg();
781 Register Src0Reg = I.getOperand(1).getReg();
782 Register Src1Reg = I.getOperand(2).getReg();
783 LLT Src1Ty = MRI->getType(Src1Reg);
785 unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
786 unsigned InsSize = Src1Ty.getSizeInBits();
788 int64_t Offset = I.getOperand(3).getImm();
790 // FIXME: These cases should have been illegal and unnecessary to check here.
791 if (Offset % 32 != 0 || InsSize % 32 != 0)
794 // Currently not handled by getSubRegFromChannel.
798 unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
799 if (SubReg == AMDGPU::NoSubRegister)
802 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
803 const TargetRegisterClass *DstRC =
804 TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
808 const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
809 const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
810 const TargetRegisterClass *Src0RC =
811 TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank);
812 const TargetRegisterClass *Src1RC =
813 TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank);
815 // Deal with weird cases where the class only partially supports the subreg
817 Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
818 if (!Src0RC || !Src1RC)
821 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
822 !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
823 !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
826 const DebugLoc &DL = I.getDebugLoc();
827 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
836 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const {
837 Register DstReg = MI.getOperand(0).getReg();
838 Register SrcReg = MI.getOperand(1).getReg();
839 Register OffsetReg = MI.getOperand(2).getReg();
840 Register WidthReg = MI.getOperand(3).getReg();
842 assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID &&
843 "scalar BFX instructions are expanded in regbankselect");
844 assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 &&
845 "64-bit vector BFX instructions are expanded in regbankselect");
847 const DebugLoc &DL = MI.getDebugLoc();
848 MachineBasicBlock *MBB = MI.getParent();
850 bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX;
851 unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
852 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg)
856 MI.eraseFromParent();
857 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
860 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
861 if (STI.getLDSBankCount() != 16)
862 return selectImpl(MI, *CoverageInfo);
864 Register Dst = MI.getOperand(0).getReg();
865 Register Src0 = MI.getOperand(2).getReg();
866 Register M0Val = MI.getOperand(6).getReg();
867 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
868 !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
869 !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
872 // This requires 2 instructions. It is possible to write a pattern to support
873 // this, but the generated isel emitter doesn't correctly deal with multiple
874 // output instructions using the same physical register input. The copy to m0
875 // is incorrectly placed before the second instruction.
877 // TODO: Match source modifiers.
879 Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
880 const DebugLoc &DL = MI.getDebugLoc();
881 MachineBasicBlock *MBB = MI.getParent();
883 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
885 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
887 .addImm(MI.getOperand(4).getImm()) // $attr
888 .addImm(MI.getOperand(3).getImm()); // $attrchan
890 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
891 .addImm(0) // $src0_modifiers
892 .addReg(Src0) // $src0
893 .addImm(MI.getOperand(4).getImm()) // $attr
894 .addImm(MI.getOperand(3).getImm()) // $attrchan
895 .addImm(0) // $src2_modifiers
896 .addReg(InterpMov) // $src2 - 2 f16 values selected by high
897 .addImm(MI.getOperand(5).getImm()) // $high
901 MI.eraseFromParent();
905 // Writelane is special in that it can use SGPR and M0 (which would normally
906 // count as using the constant bus twice - but in this case it is allowed since
907 // the lane selector doesn't count as a use of the constant bus). However, it is
908 // still required to abide by the 1 SGPR rule. Fix this up if we might have
910 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
911 // With a constant bus limit of at least 2, there's no issue.
912 if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
913 return selectImpl(MI, *CoverageInfo);
915 MachineBasicBlock *MBB = MI.getParent();
916 const DebugLoc &DL = MI.getDebugLoc();
917 Register VDst = MI.getOperand(0).getReg();
918 Register Val = MI.getOperand(2).getReg();
919 Register LaneSelect = MI.getOperand(3).getReg();
920 Register VDstIn = MI.getOperand(4).getReg();
922 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
924 std::optional<ValueAndVReg> ConstSelect =
925 getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
927 // The selector has to be an inline immediate, so we can use whatever for
928 // the other operands.
930 MIB.addImm(ConstSelect->Value.getSExtValue() &
931 maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
933 std::optional<ValueAndVReg> ConstVal =
934 getIConstantVRegValWithLookThrough(Val, *MRI);
936 // If the value written is an inline immediate, we can get away without a
938 if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
939 STI.hasInv2PiInlineImm())) {
940 MIB.addImm(ConstVal->Value.getSExtValue());
941 MIB.addReg(LaneSelect);
945 // If the lane selector was originally in a VGPR and copied with
946 // readfirstlane, there's a hazard to read the same SGPR from the
947 // VALU. Constrain to a different SGPR to help avoid needing a nop later.
948 RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
950 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
952 MIB.addReg(AMDGPU::M0);
958 MI.eraseFromParent();
959 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
962 // We need to handle this here because tablegen doesn't support matching
963 // instructions with multiple outputs.
964 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
965 Register Dst0 = MI.getOperand(0).getReg();
966 Register Dst1 = MI.getOperand(1).getReg();
968 LLT Ty = MRI->getType(Dst0);
970 if (Ty == LLT::scalar(32))
971 Opc = AMDGPU::V_DIV_SCALE_F32_e64;
972 else if (Ty == LLT::scalar(64))
973 Opc = AMDGPU::V_DIV_SCALE_F64_e64;
977 // TODO: Match source modifiers.
979 const DebugLoc &DL = MI.getDebugLoc();
980 MachineBasicBlock *MBB = MI.getParent();
982 Register Numer = MI.getOperand(3).getReg();
983 Register Denom = MI.getOperand(4).getReg();
984 unsigned ChooseDenom = MI.getOperand(5).getImm();
986 Register Src0 = ChooseDenom != 0 ? Numer : Denom;
988 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
990 .addImm(0) // $src0_modifiers
991 .addUse(Src0) // $src0
992 .addImm(0) // $src1_modifiers
993 .addUse(Denom) // $src1
994 .addImm(0) // $src2_modifiers
995 .addUse(Numer) // $src2
999 MI.eraseFromParent();
1000 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1003 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
1004 unsigned IntrinsicID = I.getIntrinsicID();
1005 switch (IntrinsicID) {
1006 case Intrinsic::amdgcn_if_break: {
1007 MachineBasicBlock *BB = I.getParent();
1009 // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1010 // SelectionDAG uses for wave32 vs wave64.
1011 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
1012 .add(I.getOperand(0))
1013 .add(I.getOperand(2))
1014 .add(I.getOperand(3));
1016 Register DstReg = I.getOperand(0).getReg();
1017 Register Src0Reg = I.getOperand(2).getReg();
1018 Register Src1Reg = I.getOperand(3).getReg();
1020 I.eraseFromParent();
1022 for (Register Reg : { DstReg, Src0Reg, Src1Reg })
1023 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1027 case Intrinsic::amdgcn_interp_p1_f16:
1028 return selectInterpP1F16(I);
1029 case Intrinsic::amdgcn_wqm:
1030 return constrainCopyLikeIntrin(I, AMDGPU::WQM);
1031 case Intrinsic::amdgcn_softwqm:
1032 return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
1033 case Intrinsic::amdgcn_strict_wwm:
1034 case Intrinsic::amdgcn_wwm:
1035 return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
1036 case Intrinsic::amdgcn_strict_wqm:
1037 return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
1038 case Intrinsic::amdgcn_writelane:
1039 return selectWritelane(I);
1040 case Intrinsic::amdgcn_div_scale:
1041 return selectDivScale(I);
1042 case Intrinsic::amdgcn_icmp:
1043 case Intrinsic::amdgcn_fcmp:
1044 if (selectImpl(I, *CoverageInfo))
1046 return selectIntrinsicCmp(I);
1047 case Intrinsic::amdgcn_ballot:
1048 return selectBallot(I);
1049 case Intrinsic::amdgcn_inverse_ballot:
1050 return selectInverseBallot(I);
1051 case Intrinsic::amdgcn_reloc_constant:
1052 return selectRelocConstant(I);
1053 case Intrinsic::amdgcn_groupstaticsize:
1054 return selectGroupStaticSize(I);
1055 case Intrinsic::returnaddress:
1056 return selectReturnAddress(I);
1057 case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
1058 case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
1059 case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
1060 case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
1061 case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
1062 case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
1063 case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_bf8:
1064 case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_fp8:
1065 case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_bf8:
1066 case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_fp8:
1067 case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_bf8:
1068 case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_fp8:
1069 case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_bf8:
1070 case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_fp8:
1071 return selectSMFMACIntrin(I);
1073 return selectImpl(I, *CoverageInfo);
1077 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size,
1078 const GCNSubtarget &ST) {
1079 if (Size != 16 && Size != 32 && Size != 64)
1082 if (Size == 16 && !ST.has16BitInsts())
1085 const auto Select = [&](unsigned S16Opc, unsigned TrueS16Opc, unsigned S32Opc,
1088 return ST.hasTrue16BitInsts() ? TrueS16Opc : S16Opc;
1096 llvm_unreachable("Unknown condition code!");
1097 case CmpInst::ICMP_NE:
1098 return Select(AMDGPU::V_CMP_NE_U16_e64, AMDGPU::V_CMP_NE_U16_t16_e64,
1099 AMDGPU::V_CMP_NE_U32_e64, AMDGPU::V_CMP_NE_U64_e64);
1100 case CmpInst::ICMP_EQ:
1101 return Select(AMDGPU::V_CMP_EQ_U16_e64, AMDGPU::V_CMP_EQ_U16_t16_e64,
1102 AMDGPU::V_CMP_EQ_U32_e64, AMDGPU::V_CMP_EQ_U64_e64);
1103 case CmpInst::ICMP_SGT:
1104 return Select(AMDGPU::V_CMP_GT_I16_e64, AMDGPU::V_CMP_GT_I16_t16_e64,
1105 AMDGPU::V_CMP_GT_I32_e64, AMDGPU::V_CMP_GT_I64_e64);
1106 case CmpInst::ICMP_SGE:
1107 return Select(AMDGPU::V_CMP_GE_I16_e64, AMDGPU::V_CMP_GE_I16_t16_e64,
1108 AMDGPU::V_CMP_GE_I32_e64, AMDGPU::V_CMP_GE_I64_e64);
1109 case CmpInst::ICMP_SLT:
1110 return Select(AMDGPU::V_CMP_LT_I16_e64, AMDGPU::V_CMP_LT_I16_t16_e64,
1111 AMDGPU::V_CMP_LT_I32_e64, AMDGPU::V_CMP_LT_I64_e64);
1112 case CmpInst::ICMP_SLE:
1113 return Select(AMDGPU::V_CMP_LE_I16_e64, AMDGPU::V_CMP_LE_I16_t16_e64,
1114 AMDGPU::V_CMP_LE_I32_e64, AMDGPU::V_CMP_LE_I64_e64);
1115 case CmpInst::ICMP_UGT:
1116 return Select(AMDGPU::V_CMP_GT_U16_e64, AMDGPU::V_CMP_GT_U16_t16_e64,
1117 AMDGPU::V_CMP_GT_U32_e64, AMDGPU::V_CMP_GT_U64_e64);
1118 case CmpInst::ICMP_UGE:
1119 return Select(AMDGPU::V_CMP_GE_U16_e64, AMDGPU::V_CMP_GE_U16_t16_e64,
1120 AMDGPU::V_CMP_GE_U32_e64, AMDGPU::V_CMP_GE_U64_e64);
1121 case CmpInst::ICMP_ULT:
1122 return Select(AMDGPU::V_CMP_LT_U16_e64, AMDGPU::V_CMP_LT_U16_t16_e64,
1123 AMDGPU::V_CMP_LT_U32_e64, AMDGPU::V_CMP_LT_U64_e64);
1124 case CmpInst::ICMP_ULE:
1125 return Select(AMDGPU::V_CMP_LE_U16_e64, AMDGPU::V_CMP_LE_U16_t16_e64,
1126 AMDGPU::V_CMP_LE_U32_e64, AMDGPU::V_CMP_LE_U64_e64);
1128 case CmpInst::FCMP_OEQ:
1129 return Select(AMDGPU::V_CMP_EQ_F16_e64, AMDGPU::V_CMP_EQ_F16_t16_e64,
1130 AMDGPU::V_CMP_EQ_F32_e64, AMDGPU::V_CMP_EQ_F64_e64);
1131 case CmpInst::FCMP_OGT:
1132 return Select(AMDGPU::V_CMP_GT_F16_e64, AMDGPU::V_CMP_GT_F16_t16_e64,
1133 AMDGPU::V_CMP_GT_F32_e64, AMDGPU::V_CMP_GT_F64_e64);
1134 case CmpInst::FCMP_OGE:
1135 return Select(AMDGPU::V_CMP_GE_F16_e64, AMDGPU::V_CMP_GE_F16_t16_e64,
1136 AMDGPU::V_CMP_GE_F32_e64, AMDGPU::V_CMP_GE_F64_e64);
1137 case CmpInst::FCMP_OLT:
1138 return Select(AMDGPU::V_CMP_LT_F16_e64, AMDGPU::V_CMP_LT_F16_t16_e64,
1139 AMDGPU::V_CMP_LT_F32_e64, AMDGPU::V_CMP_LT_F64_e64);
1140 case CmpInst::FCMP_OLE:
1141 return Select(AMDGPU::V_CMP_LE_F16_e64, AMDGPU::V_CMP_LE_F16_t16_e64,
1142 AMDGPU::V_CMP_LE_F32_e64, AMDGPU::V_CMP_LE_F64_e64);
1143 case CmpInst::FCMP_ONE:
1144 return Select(AMDGPU::V_CMP_NEQ_F16_e64, AMDGPU::V_CMP_NEQ_F16_t16_e64,
1145 AMDGPU::V_CMP_NEQ_F32_e64, AMDGPU::V_CMP_NEQ_F64_e64);
1146 case CmpInst::FCMP_ORD:
1147 return Select(AMDGPU::V_CMP_O_F16_e64, AMDGPU::V_CMP_O_F16_t16_e64,
1148 AMDGPU::V_CMP_O_F32_e64, AMDGPU::V_CMP_O_F64_e64);
1149 case CmpInst::FCMP_UNO:
1150 return Select(AMDGPU::V_CMP_U_F16_e64, AMDGPU::V_CMP_U_F16_t16_e64,
1151 AMDGPU::V_CMP_U_F32_e64, AMDGPU::V_CMP_U_F64_e64);
1152 case CmpInst::FCMP_UEQ:
1153 return Select(AMDGPU::V_CMP_NLG_F16_e64, AMDGPU::V_CMP_NLG_F16_t16_e64,
1154 AMDGPU::V_CMP_NLG_F32_e64, AMDGPU::V_CMP_NLG_F64_e64);
1155 case CmpInst::FCMP_UGT:
1156 return Select(AMDGPU::V_CMP_NLE_F16_e64, AMDGPU::V_CMP_NLE_F16_t16_e64,
1157 AMDGPU::V_CMP_NLE_F32_e64, AMDGPU::V_CMP_NLE_F64_e64);
1158 case CmpInst::FCMP_UGE:
1159 return Select(AMDGPU::V_CMP_NLT_F16_e64, AMDGPU::V_CMP_NLT_F16_t16_e64,
1160 AMDGPU::V_CMP_NLT_F32_e64, AMDGPU::V_CMP_NLT_F64_e64);
1161 case CmpInst::FCMP_ULT:
1162 return Select(AMDGPU::V_CMP_NGE_F16_e64, AMDGPU::V_CMP_NGE_F16_t16_e64,
1163 AMDGPU::V_CMP_NGE_F32_e64, AMDGPU::V_CMP_NGE_F64_e64);
1164 case CmpInst::FCMP_ULE:
1165 return Select(AMDGPU::V_CMP_NGT_F16_e64, AMDGPU::V_CMP_NGT_F16_t16_e64,
1166 AMDGPU::V_CMP_NGT_F32_e64, AMDGPU::V_CMP_NGT_F64_e64);
1167 case CmpInst::FCMP_UNE:
1168 return Select(AMDGPU::V_CMP_NEQ_F16_e64, AMDGPU::V_CMP_NEQ_F16_t16_e64,
1169 AMDGPU::V_CMP_NEQ_F32_e64, AMDGPU::V_CMP_NEQ_F64_e64);
1170 case CmpInst::FCMP_TRUE:
1171 return Select(AMDGPU::V_CMP_TRU_F16_e64, AMDGPU::V_CMP_TRU_F16_t16_e64,
1172 AMDGPU::V_CMP_TRU_F32_e64, AMDGPU::V_CMP_TRU_F64_e64);
1173 case CmpInst::FCMP_FALSE:
1174 return Select(AMDGPU::V_CMP_F_F16_e64, AMDGPU::V_CMP_F_F16_t16_e64,
1175 AMDGPU::V_CMP_F_F32_e64, AMDGPU::V_CMP_F_F64_e64);
1179 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
1180 unsigned Size) const {
1182 if (!STI.hasScalarCompareEq64())
1186 case CmpInst::ICMP_NE:
1187 return AMDGPU::S_CMP_LG_U64;
1188 case CmpInst::ICMP_EQ:
1189 return AMDGPU::S_CMP_EQ_U64;
1199 case CmpInst::ICMP_NE:
1200 return AMDGPU::S_CMP_LG_U32;
1201 case CmpInst::ICMP_EQ:
1202 return AMDGPU::S_CMP_EQ_U32;
1203 case CmpInst::ICMP_SGT:
1204 return AMDGPU::S_CMP_GT_I32;
1205 case CmpInst::ICMP_SGE:
1206 return AMDGPU::S_CMP_GE_I32;
1207 case CmpInst::ICMP_SLT:
1208 return AMDGPU::S_CMP_LT_I32;
1209 case CmpInst::ICMP_SLE:
1210 return AMDGPU::S_CMP_LE_I32;
1211 case CmpInst::ICMP_UGT:
1212 return AMDGPU::S_CMP_GT_U32;
1213 case CmpInst::ICMP_UGE:
1214 return AMDGPU::S_CMP_GE_U32;
1215 case CmpInst::ICMP_ULT:
1216 return AMDGPU::S_CMP_LT_U32;
1217 case CmpInst::ICMP_ULE:
1218 return AMDGPU::S_CMP_LE_U32;
1220 llvm_unreachable("Unknown condition code!");
1224 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1225 MachineBasicBlock *BB = I.getParent();
1226 const DebugLoc &DL = I.getDebugLoc();
1228 Register SrcReg = I.getOperand(2).getReg();
1229 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1231 auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1233 Register CCReg = I.getOperand(0).getReg();
1234 if (!isVCC(CCReg, *MRI)) {
1235 int Opcode = getS_CMPOpcode(Pred, Size);
1238 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1239 .add(I.getOperand(2))
1240 .add(I.getOperand(3));
1241 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1242 .addReg(AMDGPU::SCC);
1244 constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1245 RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1246 I.eraseFromParent();
1250 int Opcode = getV_CMPOpcode(Pred, Size, *Subtarget);
1254 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1255 I.getOperand(0).getReg())
1256 .add(I.getOperand(2))
1257 .add(I.getOperand(3));
1258 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1259 *TRI.getBoolRC(), *MRI);
1260 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1261 I.eraseFromParent();
1265 bool AMDGPUInstructionSelector::selectIntrinsicCmp(MachineInstr &I) const {
1266 Register Dst = I.getOperand(0).getReg();
1267 if (isVCC(Dst, *MRI))
1270 LLT DstTy = MRI->getType(Dst);
1271 if (DstTy.getSizeInBits() != STI.getWavefrontSize())
1274 MachineBasicBlock *BB = I.getParent();
1275 const DebugLoc &DL = I.getDebugLoc();
1276 Register SrcReg = I.getOperand(2).getReg();
1277 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1279 // i1 inputs are not supported in GlobalISel.
1283 auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1284 if (!CmpInst::isIntPredicate(Pred) && !CmpInst::isFPPredicate(Pred)) {
1285 BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst);
1286 I.eraseFromParent();
1287 return RBI.constrainGenericRegister(Dst, *TRI.getBoolRC(), *MRI);
1290 const int Opcode = getV_CMPOpcode(Pred, Size, *Subtarget);
1294 MachineInstrBuilder SelectedMI;
1295 MachineOperand &LHS = I.getOperand(2);
1296 MachineOperand &RHS = I.getOperand(3);
1297 auto [Src0, Src0Mods] = selectVOP3ModsImpl(LHS);
1298 auto [Src1, Src1Mods] = selectVOP3ModsImpl(RHS);
1300 copyToVGPRIfSrcFolded(Src0, Src0Mods, LHS, &I, /*ForceVGPR*/ true);
1302 copyToVGPRIfSrcFolded(Src1, Src1Mods, RHS, &I, /*ForceVGPR*/ true);
1303 SelectedMI = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst);
1304 if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src0_modifiers))
1305 SelectedMI.addImm(Src0Mods);
1306 SelectedMI.addReg(Src0Reg);
1307 if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src1_modifiers))
1308 SelectedMI.addImm(Src1Mods);
1309 SelectedMI.addReg(Src1Reg);
1310 if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::clamp))
1311 SelectedMI.addImm(0); // clamp
1312 if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::op_sel))
1313 SelectedMI.addImm(0); // op_sel
1315 RBI.constrainGenericRegister(Dst, *TRI.getBoolRC(), *MRI);
1316 if (!constrainSelectedInstRegOperands(*SelectedMI, TII, TRI, RBI))
1319 I.eraseFromParent();
1323 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1324 MachineBasicBlock *BB = I.getParent();
1325 const DebugLoc &DL = I.getDebugLoc();
1326 Register DstReg = I.getOperand(0).getReg();
1327 const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1328 const bool Is64 = Size == 64;
1329 const bool IsWave32 = (STI.getWavefrontSize() == 32);
1331 // In the common case, the return type matches the wave size.
1332 // However we also support emitting i64 ballots in wave32 mode.
1333 if (Size != STI.getWavefrontSize() && (!Is64 || !IsWave32))
1336 std::optional<ValueAndVReg> Arg =
1337 getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
1339 const auto BuildCopy = [&](Register SrcReg) {
1340 if (Size == STI.getWavefrontSize()) {
1341 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1346 // If emitting a i64 ballot in wave32, fill the upper bits with zeroes.
1347 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1348 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B32), HiReg).addImm(0);
1349 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1351 .addImm(AMDGPU::sub0)
1353 .addImm(AMDGPU::sub1);
1357 const int64_t Value = Arg->Value.getSExtValue();
1359 unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1360 BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1361 } else if (Value == -1) // all ones
1362 BuildCopy(IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC);
1366 BuildCopy(I.getOperand(2).getReg());
1368 I.eraseFromParent();
1372 bool AMDGPUInstructionSelector::selectInverseBallot(MachineInstr &I) const {
1373 MachineBasicBlock *BB = I.getParent();
1374 const DebugLoc &DL = I.getDebugLoc();
1375 const Register DstReg = I.getOperand(0).getReg();
1376 const Register MaskReg = I.getOperand(2).getReg();
1378 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(MaskReg);
1379 I.eraseFromParent();
1383 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1384 Register DstReg = I.getOperand(0).getReg();
1385 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1386 const TargetRegisterClass *DstRC = TRI.getRegClassForSizeOnBank(32, *DstBank);
1387 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1390 const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1392 Module *M = MF->getFunction().getParent();
1393 const MDNode *Metadata = I.getOperand(2).getMetadata();
1394 auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1395 auto RelocSymbol = cast<GlobalVariable>(
1396 M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1398 MachineBasicBlock *BB = I.getParent();
1399 BuildMI(*BB, &I, I.getDebugLoc(),
1400 TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1401 .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1403 I.eraseFromParent();
1407 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1408 Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1410 Register DstReg = I.getOperand(0).getReg();
1411 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1412 unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1413 AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1415 MachineBasicBlock *MBB = I.getParent();
1416 const DebugLoc &DL = I.getDebugLoc();
1418 auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1420 if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1421 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1422 MIB.addImm(MFI->getLDSSize());
1424 Module *M = MF->getFunction().getParent();
1425 const GlobalValue *GV
1426 = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1427 MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1430 I.eraseFromParent();
1431 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1434 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1435 MachineBasicBlock *MBB = I.getParent();
1436 MachineFunction &MF = *MBB->getParent();
1437 const DebugLoc &DL = I.getDebugLoc();
1439 MachineOperand &Dst = I.getOperand(0);
1440 Register DstReg = Dst.getReg();
1441 unsigned Depth = I.getOperand(2).getImm();
1443 const TargetRegisterClass *RC
1444 = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1445 if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1446 !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1449 // Check for kernel and shader functions
1451 MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1452 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1454 I.eraseFromParent();
1458 MachineFrameInfo &MFI = MF.getFrameInfo();
1459 // There is a call to @llvm.returnaddress in this function
1460 MFI.setReturnAddressIsTaken(true);
1462 // Get the return address reg and mark it as an implicit live-in
1463 Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1464 Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1465 AMDGPU::SReg_64RegClass, DL);
1466 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1468 I.eraseFromParent();
1472 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1473 // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1474 // SelectionDAG uses for wave32 vs wave64.
1475 MachineBasicBlock *BB = MI.getParent();
1476 BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1477 .add(MI.getOperand(1));
1479 Register Reg = MI.getOperand(1).getReg();
1480 MI.eraseFromParent();
1482 if (!MRI->getRegClassOrNull(Reg))
1483 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1487 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1488 MachineInstr &MI, Intrinsic::ID IntrID) const {
1489 MachineBasicBlock *MBB = MI.getParent();
1490 MachineFunction *MF = MBB->getParent();
1491 const DebugLoc &DL = MI.getDebugLoc();
1493 unsigned IndexOperand = MI.getOperand(7).getImm();
1494 bool WaveRelease = MI.getOperand(8).getImm() != 0;
1495 bool WaveDone = MI.getOperand(9).getImm() != 0;
1497 if (WaveDone && !WaveRelease)
1498 report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1500 unsigned OrderedCountIndex = IndexOperand & 0x3f;
1501 IndexOperand &= ~0x3f;
1502 unsigned CountDw = 0;
1504 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1505 CountDw = (IndexOperand >> 24) & 0xf;
1506 IndexOperand &= ~(0xf << 24);
1508 if (CountDw < 1 || CountDw > 4) {
1510 "ds_ordered_count: dword count must be between 1 and 4");
1515 report_fatal_error("ds_ordered_count: bad index operand");
1517 unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1518 unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1520 unsigned Offset0 = OrderedCountIndex << 2;
1521 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (Instruction << 4);
1523 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1524 Offset1 |= (CountDw - 1) << 6;
1526 if (STI.getGeneration() < AMDGPUSubtarget::GFX11)
1527 Offset1 |= ShaderType << 2;
1529 unsigned Offset = Offset0 | (Offset1 << 8);
1531 Register M0Val = MI.getOperand(2).getReg();
1532 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1535 Register DstReg = MI.getOperand(0).getReg();
1536 Register ValReg = MI.getOperand(3).getReg();
1537 MachineInstrBuilder DS =
1538 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1543 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1546 bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1547 MI.eraseFromParent();
1551 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1553 case Intrinsic::amdgcn_ds_gws_init:
1554 return AMDGPU::DS_GWS_INIT;
1555 case Intrinsic::amdgcn_ds_gws_barrier:
1556 return AMDGPU::DS_GWS_BARRIER;
1557 case Intrinsic::amdgcn_ds_gws_sema_v:
1558 return AMDGPU::DS_GWS_SEMA_V;
1559 case Intrinsic::amdgcn_ds_gws_sema_br:
1560 return AMDGPU::DS_GWS_SEMA_BR;
1561 case Intrinsic::amdgcn_ds_gws_sema_p:
1562 return AMDGPU::DS_GWS_SEMA_P;
1563 case Intrinsic::amdgcn_ds_gws_sema_release_all:
1564 return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1566 llvm_unreachable("not a gws intrinsic");
1570 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1571 Intrinsic::ID IID) const {
1572 if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1573 !STI.hasGWSSemaReleaseAll())
1576 // intrinsic ID, vsrc, offset
1577 const bool HasVSrc = MI.getNumOperands() == 3;
1578 assert(HasVSrc || MI.getNumOperands() == 2);
1580 Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1581 const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1582 if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1585 MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1588 MachineBasicBlock *MBB = MI.getParent();
1589 const DebugLoc &DL = MI.getDebugLoc();
1591 MachineInstr *Readfirstlane = nullptr;
1593 // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1594 // incoming offset, in case there's an add of a constant. We'll have to put it
1596 if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1597 Readfirstlane = OffsetDef;
1598 BaseOffset = OffsetDef->getOperand(1).getReg();
1599 OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1602 if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1603 // If we have a constant offset, try to use the 0 in m0 as the base.
1604 // TODO: Look into changing the default m0 initialization value. If the
1605 // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1606 // the immediate offset.
1608 ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1609 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1612 std::tie(BaseOffset, ImmOffset) =
1613 AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset, KB);
1615 if (Readfirstlane) {
1616 // We have the constant offset now, so put the readfirstlane back on the
1617 // variable component.
1618 if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1621 Readfirstlane->getOperand(1).setReg(BaseOffset);
1622 BaseOffset = Readfirstlane->getOperand(0).getReg();
1624 if (!RBI.constrainGenericRegister(BaseOffset,
1625 AMDGPU::SReg_32RegClass, *MRI))
1629 Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1630 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1634 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1638 // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1639 // offset field) % 64. Some versions of the programming guide omit the m0
1640 // part, or claim it's from offset 0.
1641 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1644 Register VSrc = MI.getOperand(1).getReg();
1647 if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1651 MIB.addImm(ImmOffset)
1654 TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::data0);
1656 MI.eraseFromParent();
1660 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1661 bool IsAppend) const {
1662 Register PtrBase = MI.getOperand(2).getReg();
1663 LLT PtrTy = MRI->getType(PtrBase);
1664 bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1667 std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1669 // TODO: Should this try to look through readfirstlane like GWS?
1670 if (!isDSOffsetLegal(PtrBase, Offset)) {
1671 PtrBase = MI.getOperand(2).getReg();
1675 MachineBasicBlock *MBB = MI.getParent();
1676 const DebugLoc &DL = MI.getDebugLoc();
1677 const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1679 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1681 if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1684 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1686 .addImm(IsGDS ? -1 : 0)
1688 MI.eraseFromParent();
1689 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1692 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1693 if (TM.getOptLevel() > CodeGenOpt::None) {
1694 unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1695 if (WGSize <= STI.getWavefrontSize()) {
1696 MachineBasicBlock *MBB = MI.getParent();
1697 const DebugLoc &DL = MI.getDebugLoc();
1698 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1699 MI.eraseFromParent();
1703 return selectImpl(MI, *CoverageInfo);
1706 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1711 TFE = (TexFailCtrl & 0x1) ? true : false;
1712 TexFailCtrl &= ~(uint64_t)0x1;
1713 LWE = (TexFailCtrl & 0x2) ? true : false;
1714 TexFailCtrl &= ~(uint64_t)0x2;
1716 return TexFailCtrl == 0;
1719 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1720 MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1721 MachineBasicBlock *MBB = MI.getParent();
1722 const DebugLoc &DL = MI.getDebugLoc();
1724 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1725 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1727 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1728 unsigned IntrOpcode = Intr->BaseOpcode;
1729 const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1730 const bool IsGFX11Plus = AMDGPU::isGFX11Plus(STI);
1732 const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1734 Register VDataIn, VDataOut;
1736 int NumVDataDwords = -1;
1737 bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 ||
1738 MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16;
1741 if (!BaseOpcode->Sampler)
1744 Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1748 bool IsTexFail = false;
1749 if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1750 TFE, LWE, IsTexFail))
1753 const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1754 const bool IsA16 = (Flags & 1) != 0;
1755 const bool IsG16 = (Flags & 2) != 0;
1757 // A16 implies 16 bit gradients if subtarget doesn't support G16
1758 if (IsA16 && !STI.hasG16() && !IsG16)
1762 unsigned DMaskLanes = 0;
1764 if (BaseOpcode->Atomic) {
1765 VDataOut = MI.getOperand(0).getReg();
1766 VDataIn = MI.getOperand(2).getReg();
1767 LLT Ty = MRI->getType(VDataIn);
1769 // Be careful to allow atomic swap on 16-bit element vectors.
1770 const bool Is64Bit = BaseOpcode->AtomicX2 ?
1771 Ty.getSizeInBits() == 128 :
1772 Ty.getSizeInBits() == 64;
1774 if (BaseOpcode->AtomicX2) {
1775 assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1777 DMask = Is64Bit ? 0xf : 0x3;
1778 NumVDataDwords = Is64Bit ? 4 : 2;
1780 DMask = Is64Bit ? 0x3 : 0x1;
1781 NumVDataDwords = Is64Bit ? 2 : 1;
1784 DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1785 DMaskLanes = BaseOpcode->Gather4 ? 4 : llvm::popcount(DMask);
1787 if (BaseOpcode->Store) {
1788 VDataIn = MI.getOperand(1).getReg();
1789 VDataTy = MRI->getType(VDataIn);
1790 NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1792 VDataOut = MI.getOperand(0).getReg();
1793 VDataTy = MRI->getType(VDataOut);
1794 NumVDataDwords = DMaskLanes;
1796 if (IsD16 && !STI.hasUnpackedD16VMem())
1797 NumVDataDwords = (DMaskLanes + 1) / 2;
1802 if (Subtarget->hasG16() && IsG16) {
1803 const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1804 AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1805 assert(G16MappingInfo);
1806 IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1809 // TODO: Check this in verifier.
1810 assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1812 unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1813 if (BaseOpcode->Atomic)
1814 CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1815 if (CPol & ~AMDGPU::CPol::ALL)
1818 int NumVAddrRegs = 0;
1819 int NumVAddrDwords = 0;
1820 for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1821 // Skip the $noregs and 0s inserted during legalization.
1822 MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1823 if (!AddrOp.isReg())
1824 continue; // XXX - Break?
1826 Register Addr = AddrOp.getReg();
1831 NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1834 // The legalizer preprocessed the intrinsic arguments. If we aren't using
1835 // NSA, these should have been packed into a single value in the first
1838 NumVAddrRegs != 1 &&
1839 (STI.hasPartialNSAEncoding() ? NumVAddrDwords >= NumVAddrRegs
1840 : NumVAddrDwords == NumVAddrRegs);
1841 if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1842 LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1851 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1852 UseNSA ? AMDGPU::MIMGEncGfx11NSA
1853 : AMDGPU::MIMGEncGfx11Default,
1854 NumVDataDwords, NumVAddrDwords);
1855 } else if (IsGFX10Plus) {
1856 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1857 UseNSA ? AMDGPU::MIMGEncGfx10NSA
1858 : AMDGPU::MIMGEncGfx10Default,
1859 NumVDataDwords, NumVAddrDwords);
1861 if (Subtarget->hasGFX90AInsts()) {
1862 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a,
1863 NumVDataDwords, NumVAddrDwords);
1867 << "requested image instruction is not supported on this GPU\n");
1872 STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1873 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1874 NumVDataDwords, NumVAddrDwords);
1876 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1877 NumVDataDwords, NumVAddrDwords);
1882 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1886 if (BaseOpcode->AtomicX2) {
1887 const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1889 Register TmpReg = MRI->createVirtualRegister(
1890 Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1891 unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1894 if (!MRI->use_empty(VDataOut)) {
1895 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1896 .addReg(TmpReg, RegState::Kill, SubReg);
1900 MIB.addDef(VDataOut); // vdata output
1905 MIB.addReg(VDataIn); // vdata input
1907 for (int I = 0; I != NumVAddrRegs; ++I) {
1908 MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1909 if (SrcOp.isReg()) {
1910 assert(SrcOp.getReg() != 0);
1911 MIB.addReg(SrcOp.getReg());
1915 MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1916 if (BaseOpcode->Sampler)
1917 MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1919 MIB.addImm(DMask); // dmask
1922 MIB.addImm(DimInfo->Encoding);
1926 MIB.addImm(IsA16 && // a16 or r128
1927 STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1929 MIB.addImm(IsA16 ? -1 : 0);
1931 if (!Subtarget->hasGFX90AInsts()) {
1932 MIB.addImm(TFE); // tfe
1934 LLVM_DEBUG(dbgs() << "TFE is not supported on this GPU\n");
1938 MIB.addImm(LWE); // lwe
1940 MIB.addImm(DimInfo->DA ? -1 : 0);
1941 if (BaseOpcode->HasD16)
1942 MIB.addImm(IsD16 ? -1 : 0);
1945 // An image load instruction with TFE/LWE only conditionally writes to its
1946 // result registers. Initialize them to zero so that we always get well
1947 // defined result values.
1948 assert(VDataOut && !VDataIn);
1949 Register Tied = MRI->cloneVirtualRegister(VDataOut);
1950 Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1951 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
1953 auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
1954 if (STI.usePRTStrictNull()) {
1955 // With enable-prt-strict-null enabled, initialize all result registers to
1958 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1959 for (auto Sub : Parts)
1960 RegSeq.addReg(Zero).addImm(Sub);
1962 // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
1964 Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1965 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1967 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1968 for (auto Sub : Parts.drop_back(1))
1969 RegSeq.addReg(Undef).addImm(Sub);
1970 RegSeq.addReg(Zero).addImm(Parts.back());
1972 MIB.addReg(Tied, RegState::Implicit);
1973 MIB->tieOperands(0, MIB->getNumOperands() - 1);
1976 MI.eraseFromParent();
1977 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1978 TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::vaddr);
1982 // We need to handle this here because tablegen doesn't support matching
1983 // instructions with multiple outputs.
1984 bool AMDGPUInstructionSelector::selectDSBvhStackIntrinsic(
1985 MachineInstr &MI) const {
1986 Register Dst0 = MI.getOperand(0).getReg();
1987 Register Dst1 = MI.getOperand(1).getReg();
1989 const DebugLoc &DL = MI.getDebugLoc();
1990 MachineBasicBlock *MBB = MI.getParent();
1992 Register Addr = MI.getOperand(3).getReg();
1993 Register Data0 = MI.getOperand(4).getReg();
1994 Register Data1 = MI.getOperand(5).getReg();
1995 unsigned Offset = MI.getOperand(6).getImm();
1997 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_BVH_STACK_RTN_B32), Dst0)
2005 MI.eraseFromParent();
2006 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2009 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
2010 MachineInstr &I) const {
2011 unsigned IntrinsicID = I.getIntrinsicID();
2012 switch (IntrinsicID) {
2013 case Intrinsic::amdgcn_end_cf:
2014 return selectEndCfIntrinsic(I);
2015 case Intrinsic::amdgcn_ds_ordered_add:
2016 case Intrinsic::amdgcn_ds_ordered_swap:
2017 return selectDSOrderedIntrinsic(I, IntrinsicID);
2018 case Intrinsic::amdgcn_ds_gws_init:
2019 case Intrinsic::amdgcn_ds_gws_barrier:
2020 case Intrinsic::amdgcn_ds_gws_sema_v:
2021 case Intrinsic::amdgcn_ds_gws_sema_br:
2022 case Intrinsic::amdgcn_ds_gws_sema_p:
2023 case Intrinsic::amdgcn_ds_gws_sema_release_all:
2024 return selectDSGWSIntrinsic(I, IntrinsicID);
2025 case Intrinsic::amdgcn_ds_append:
2026 return selectDSAppendConsume(I, true);
2027 case Intrinsic::amdgcn_ds_consume:
2028 return selectDSAppendConsume(I, false);
2029 case Intrinsic::amdgcn_s_barrier:
2030 return selectSBarrier(I);
2031 case Intrinsic::amdgcn_raw_buffer_load_lds:
2032 case Intrinsic::amdgcn_raw_ptr_buffer_load_lds:
2033 case Intrinsic::amdgcn_struct_buffer_load_lds:
2034 case Intrinsic::amdgcn_struct_ptr_buffer_load_lds:
2035 return selectBufferLoadLds(I);
2036 case Intrinsic::amdgcn_global_load_lds:
2037 return selectGlobalLoadLds(I);
2038 case Intrinsic::amdgcn_exp_compr:
2039 if (!STI.hasCompressedExport()) {
2040 Function &F = I.getMF()->getFunction();
2041 DiagnosticInfoUnsupported NoFpRet(
2042 F, "intrinsic not supported on subtarget", I.getDebugLoc(), DS_Error);
2043 F.getContext().diagnose(NoFpRet);
2047 case Intrinsic::amdgcn_ds_bvh_stack_rtn:
2048 return selectDSBvhStackIntrinsic(I);
2050 return selectImpl(I, *CoverageInfo);
2053 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
2054 if (selectImpl(I, *CoverageInfo))
2057 MachineBasicBlock *BB = I.getParent();
2058 const DebugLoc &DL = I.getDebugLoc();
2060 Register DstReg = I.getOperand(0).getReg();
2061 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
2062 assert(Size <= 32 || Size == 64);
2063 const MachineOperand &CCOp = I.getOperand(1);
2064 Register CCReg = CCOp.getReg();
2065 if (!isVCC(CCReg, *MRI)) {
2066 unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
2067 AMDGPU::S_CSELECT_B32;
2068 MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
2071 // The generic constrainSelectedInstRegOperands doesn't work for the scc register
2072 // bank, because it does not cover the register class that we used to represent
2073 // for it. So we need to manually set the register class here.
2074 if (!MRI->getRegClassOrNull(CCReg))
2075 MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
2076 MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
2077 .add(I.getOperand(2))
2078 .add(I.getOperand(3));
2081 Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
2082 Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
2083 I.eraseFromParent();
2087 // Wide VGPR select should have been split in RegBankSelect.
2091 MachineInstr *Select =
2092 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
2094 .add(I.getOperand(3))
2096 .add(I.getOperand(2))
2097 .add(I.getOperand(1));
2099 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
2100 I.eraseFromParent();
2104 static int sizeToSubRegIndex(unsigned Size) {
2107 return AMDGPU::sub0;
2109 return AMDGPU::sub0_sub1;
2111 return AMDGPU::sub0_sub1_sub2;
2113 return AMDGPU::sub0_sub1_sub2_sub3;
2115 return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
2118 return AMDGPU::sub0;
2121 return sizeToSubRegIndex(llvm::bit_ceil(Size));
2125 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
2126 Register DstReg = I.getOperand(0).getReg();
2127 Register SrcReg = I.getOperand(1).getReg();
2128 const LLT DstTy = MRI->getType(DstReg);
2129 const LLT SrcTy = MRI->getType(SrcReg);
2130 const LLT S1 = LLT::scalar(1);
2132 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2133 const RegisterBank *DstRB;
2135 // This is a special case. We don't treat s1 for legalization artifacts as
2139 DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2144 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2146 unsigned DstSize = DstTy.getSizeInBits();
2147 unsigned SrcSize = SrcTy.getSizeInBits();
2149 const TargetRegisterClass *SrcRC =
2150 TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB);
2151 const TargetRegisterClass *DstRC =
2152 TRI.getRegClassForSizeOnBank(DstSize, *DstRB);
2153 if (!SrcRC || !DstRC)
2156 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2157 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
2158 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
2162 if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
2163 MachineBasicBlock *MBB = I.getParent();
2164 const DebugLoc &DL = I.getDebugLoc();
2166 Register LoReg = MRI->createVirtualRegister(DstRC);
2167 Register HiReg = MRI->createVirtualRegister(DstRC);
2168 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
2169 .addReg(SrcReg, 0, AMDGPU::sub0);
2170 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
2171 .addReg(SrcReg, 0, AMDGPU::sub1);
2173 if (IsVALU && STI.hasSDWA()) {
2174 // Write the low 16-bits of the high element into the high 16-bits of the
2176 MachineInstr *MovSDWA =
2177 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2178 .addImm(0) // $src0_modifiers
2179 .addReg(HiReg) // $src0
2180 .addImm(0) // $clamp
2181 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel
2182 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2183 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel
2184 .addReg(LoReg, RegState::Implicit);
2185 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2187 Register TmpReg0 = MRI->createVirtualRegister(DstRC);
2188 Register TmpReg1 = MRI->createVirtualRegister(DstRC);
2189 Register ImmReg = MRI->createVirtualRegister(DstRC);
2191 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
2195 BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
2200 unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
2201 unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2202 unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
2204 BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
2206 BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
2209 BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
2214 I.eraseFromParent();
2218 if (!DstTy.isScalar())
2222 int SubRegIdx = sizeToSubRegIndex(DstSize);
2223 if (SubRegIdx == -1)
2226 // Deal with weird cases where the class only partially supports the subreg
2228 const TargetRegisterClass *SrcWithSubRC
2229 = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
2233 if (SrcWithSubRC != SrcRC) {
2234 if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
2238 I.getOperand(1).setSubReg(SubRegIdx);
2241 I.setDesc(TII.get(TargetOpcode::COPY));
2245 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
2246 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
2247 Mask = maskTrailingOnes<unsigned>(Size);
2248 int SignedMask = static_cast<int>(Mask);
2249 return SignedMask >= -16 && SignedMask <= 64;
2252 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
2253 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
2254 Register Reg, const MachineRegisterInfo &MRI,
2255 const TargetRegisterInfo &TRI) const {
2256 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
2257 if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
2260 // Ignore the type, since we don't use vcc in artifacts.
2261 if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
2262 return &RBI.getRegBankFromRegClass(*RC, LLT());
2266 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
2267 bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
2268 bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
2269 const DebugLoc &DL = I.getDebugLoc();
2270 MachineBasicBlock &MBB = *I.getParent();
2271 const Register DstReg = I.getOperand(0).getReg();
2272 const Register SrcReg = I.getOperand(1).getReg();
2274 const LLT DstTy = MRI->getType(DstReg);
2275 const LLT SrcTy = MRI->getType(SrcReg);
2276 const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
2277 I.getOperand(2).getImm() : SrcTy.getSizeInBits();
2278 const unsigned DstSize = DstTy.getSizeInBits();
2279 if (!DstTy.isScalar())
2282 // Artifact casts should never use vcc.
2283 const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
2285 // FIXME: This should probably be illegal and split earlier.
2286 if (I.getOpcode() == AMDGPU::G_ANYEXT) {
2288 return selectCOPY(I);
2290 const TargetRegisterClass *SrcRC =
2291 TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank);
2292 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2293 const TargetRegisterClass *DstRC =
2294 TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
2296 Register UndefReg = MRI->createVirtualRegister(SrcRC);
2297 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2298 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2300 .addImm(AMDGPU::sub0)
2302 .addImm(AMDGPU::sub1);
2303 I.eraseFromParent();
2305 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2306 RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2309 if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2310 // 64-bit should have been split up in RegBankSelect
2312 // Try to use an and with a mask if it will save code size.
2314 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2315 MachineInstr *ExtI =
2316 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2319 I.eraseFromParent();
2320 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2323 const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2324 MachineInstr *ExtI =
2325 BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2327 .addImm(0) // Offset
2328 .addImm(SrcSize); // Width
2329 I.eraseFromParent();
2330 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2333 if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2334 const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2335 AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2336 if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2339 if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2340 const unsigned SextOpc = SrcSize == 8 ?
2341 AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2342 BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2344 I.eraseFromParent();
2345 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2348 // Using a single 32-bit SALU to calculate the high half is smaller than
2349 // S_BFE with a literal constant operand.
2350 if (DstSize > 32 && SrcSize == 32) {
2351 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2352 unsigned SubReg = InReg ? AMDGPU::sub0 : AMDGPU::NoSubRegister;
2354 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_ASHR_I32), HiReg)
2355 .addReg(SrcReg, 0, SubReg)
2358 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_MOV_B32), HiReg)
2361 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2362 .addReg(SrcReg, 0, SubReg)
2363 .addImm(AMDGPU::sub0)
2365 .addImm(AMDGPU::sub1);
2366 I.eraseFromParent();
2367 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass,
2371 const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2372 const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2374 // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2375 if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2376 // We need a 64-bit register source, but the high bits don't matter.
2377 Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2378 Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2379 unsigned SubReg = InReg ? AMDGPU::sub0 : AMDGPU::NoSubRegister;
2381 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2382 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2383 .addReg(SrcReg, 0, SubReg)
2384 .addImm(AMDGPU::sub0)
2386 .addImm(AMDGPU::sub1);
2388 BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2390 .addImm(SrcSize << 16);
2392 I.eraseFromParent();
2393 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2397 if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2398 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2402 BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2404 .addImm(SrcSize << 16);
2407 I.eraseFromParent();
2408 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2414 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2415 MachineBasicBlock *BB = I.getParent();
2416 MachineOperand &ImmOp = I.getOperand(1);
2417 Register DstReg = I.getOperand(0).getReg();
2418 unsigned Size = MRI->getType(DstReg).getSizeInBits();
2420 // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2421 if (ImmOp.isFPImm()) {
2422 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2423 ImmOp.ChangeToImmediate(Imm.getZExtValue());
2424 } else if (ImmOp.isCImm()) {
2425 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2427 llvm_unreachable("Not supported by g_constants");
2430 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2431 const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2434 if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2435 Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2437 Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2439 // We should never produce s1 values on banks other than VCC. If the user of
2440 // this already constrained the register, we may incorrectly think it's VCC
2441 // if it wasn't originally.
2447 I.setDesc(TII.get(Opcode));
2448 I.addImplicitDefUseOperands(*MF);
2449 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2452 const DebugLoc &DL = I.getDebugLoc();
2454 APInt Imm(Size, I.getOperand(1).getImm());
2456 MachineInstr *ResInst;
2457 if (IsSgpr && TII.isInlineConstant(Imm)) {
2458 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2459 .addImm(I.getOperand(1).getImm());
2461 const TargetRegisterClass *RC = IsSgpr ?
2462 &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2463 Register LoReg = MRI->createVirtualRegister(RC);
2464 Register HiReg = MRI->createVirtualRegister(RC);
2466 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2467 .addImm(Imm.trunc(32).getZExtValue());
2469 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2470 .addImm(Imm.ashr(32).getZExtValue());
2472 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2474 .addImm(AMDGPU::sub0)
2476 .addImm(AMDGPU::sub1);
2479 // We can't call constrainSelectedInstRegOperands here, because it doesn't
2480 // work for target independent opcodes
2481 I.eraseFromParent();
2482 const TargetRegisterClass *DstRC =
2483 TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2486 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2489 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2490 // Only manually handle the f64 SGPR case.
2492 // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2493 // the bit ops theoretically have a second result due to the implicit def of
2494 // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2495 // that is easy by disabling the check. The result works, but uses a
2496 // nonsensical sreg32orlds_and_sreg_1 regclass.
2498 // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2499 // the variadic REG_SEQUENCE operands.
2501 Register Dst = MI.getOperand(0).getReg();
2502 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2503 if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2504 MRI->getType(Dst) != LLT::scalar(64))
2507 Register Src = MI.getOperand(1).getReg();
2508 MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2510 Src = Fabs->getOperand(1).getReg();
2512 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2513 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2516 MachineBasicBlock *BB = MI.getParent();
2517 const DebugLoc &DL = MI.getDebugLoc();
2518 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2519 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2520 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2521 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2523 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2524 .addReg(Src, 0, AMDGPU::sub0);
2525 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2526 .addReg(Src, 0, AMDGPU::sub1);
2527 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2528 .addImm(0x80000000);
2530 // Set or toggle sign bit.
2531 unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2532 BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2535 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2537 .addImm(AMDGPU::sub0)
2539 .addImm(AMDGPU::sub1);
2540 MI.eraseFromParent();
2544 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2545 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2546 Register Dst = MI.getOperand(0).getReg();
2547 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2548 if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2549 MRI->getType(Dst) != LLT::scalar(64))
2552 Register Src = MI.getOperand(1).getReg();
2553 MachineBasicBlock *BB = MI.getParent();
2554 const DebugLoc &DL = MI.getDebugLoc();
2555 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2556 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2557 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2558 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2560 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2561 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2564 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2565 .addReg(Src, 0, AMDGPU::sub0);
2566 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2567 .addReg(Src, 0, AMDGPU::sub1);
2568 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2569 .addImm(0x7fffffff);
2572 // TODO: Should this used S_BITSET0_*?
2573 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2576 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2578 .addImm(AMDGPU::sub0)
2580 .addImm(AMDGPU::sub1);
2582 MI.eraseFromParent();
2586 static bool isConstant(const MachineInstr &MI) {
2587 return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2590 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2591 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2593 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2597 if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2602 for (unsigned i = 1; i != 3; ++i) {
2603 const MachineOperand &GEPOp = PtrMI->getOperand(i);
2604 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2606 if (i == 2 && isConstant(*OpDef)) {
2607 // TODO: Could handle constant base + variable offset, but a combine
2608 // probably should have commuted it.
2609 assert(GEPInfo.Imm == 0);
2610 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2613 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2614 if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2615 GEPInfo.SgprParts.push_back(GEPOp.getReg());
2617 GEPInfo.VgprParts.push_back(GEPOp.getReg());
2620 AddrInfo.push_back(GEPInfo);
2621 getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2624 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2625 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2628 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2629 if (!MI.hasOneMemOperand())
2632 const MachineMemOperand *MMO = *MI.memoperands_begin();
2633 const Value *Ptr = MMO->getValue();
2635 // UndefValue means this is a load of a kernel input. These are uniform.
2636 // Sometimes LDS instructions have constant pointers.
2637 // If Ptr is null, then that means this mem operand contains a
2638 // PseudoSourceValue like GOT.
2639 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2640 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2643 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2646 const Instruction *I = dyn_cast<Instruction>(Ptr);
2647 return I && I->getMetadata("amdgpu.uniform");
2650 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2651 for (const GEPInfo &GEPInfo : AddrInfo) {
2652 if (!GEPInfo.VgprParts.empty())
2658 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2659 const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2660 unsigned AS = PtrTy.getAddressSpace();
2661 if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2662 STI.ldsRequiresM0Init()) {
2663 MachineBasicBlock *BB = I.getParent();
2665 // If DS instructions require M0 initialization, insert it before selecting.
2666 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2671 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2672 MachineInstr &I) const {
2674 return selectImpl(I, *CoverageInfo);
2677 static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {
2678 if (Reg.isPhysical())
2681 MachineInstr &MI = *MRI.getUniqueVRegDef(Reg);
2682 const unsigned Opcode = MI.getOpcode();
2684 if (Opcode == AMDGPU::COPY)
2685 return isVCmpResult(MI.getOperand(1).getReg(), MRI);
2687 if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR ||
2688 Opcode == AMDGPU::G_XOR)
2689 return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
2690 isVCmpResult(MI.getOperand(2).getReg(), MRI);
2692 if (Opcode == TargetOpcode::G_INTRINSIC)
2693 return MI.getIntrinsicID() == Intrinsic::amdgcn_class;
2695 return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
2698 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2699 MachineBasicBlock *BB = I.getParent();
2700 MachineOperand &CondOp = I.getOperand(0);
2701 Register CondReg = CondOp.getReg();
2702 const DebugLoc &DL = I.getDebugLoc();
2705 Register CondPhysReg;
2706 const TargetRegisterClass *ConstrainRC;
2708 // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2709 // whether the branch is uniform when selecting the instruction. In
2710 // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2711 // RegBankSelect knows what it's doing if the branch condition is scc, even
2712 // though it currently does not.
2713 if (!isVCC(CondReg, *MRI)) {
2714 if (MRI->getType(CondReg) != LLT::scalar(32))
2717 CondPhysReg = AMDGPU::SCC;
2718 BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2719 ConstrainRC = &AMDGPU::SReg_32RegClass;
2721 // FIXME: Should scc->vcc copies and with exec?
2723 // Unless the value of CondReg is a result of a V_CMP* instruction then we
2724 // need to insert an and with exec.
2725 if (!isVCmpResult(CondReg, *MRI)) {
2726 const bool Is64 = STI.isWave64();
2727 const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
2728 const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
2730 Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC());
2731 BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg)
2737 CondPhysReg = TRI.getVCC();
2738 BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2739 ConstrainRC = TRI.getBoolRC();
2742 if (!MRI->getRegClassOrNull(CondReg))
2743 MRI->setRegClass(CondReg, ConstrainRC);
2745 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2747 BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2748 .addMBB(I.getOperand(1).getMBB());
2750 I.eraseFromParent();
2754 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2755 MachineInstr &I) const {
2756 Register DstReg = I.getOperand(0).getReg();
2757 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2758 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2759 I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2761 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2763 return RBI.constrainGenericRegister(
2764 DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2767 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2768 Register DstReg = I.getOperand(0).getReg();
2769 Register SrcReg = I.getOperand(1).getReg();
2770 Register MaskReg = I.getOperand(2).getReg();
2771 LLT Ty = MRI->getType(DstReg);
2772 LLT MaskTy = MRI->getType(MaskReg);
2773 MachineBasicBlock *BB = I.getParent();
2774 const DebugLoc &DL = I.getDebugLoc();
2776 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2777 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2778 const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2779 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2780 if (DstRB != SrcRB) // Should only happen for hand written MIR.
2783 // Try to avoid emitting a bit operation when we only need to touch half of
2784 // the 64-bit pointer.
2785 APInt MaskOnes = KB->getKnownOnes(MaskReg).zext(64);
2786 const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2787 const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2789 const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32;
2790 const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32;
2792 if (!IsVGPR && Ty.getSizeInBits() == 64 &&
2793 !CanCopyLow32 && !CanCopyHi32) {
2794 auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg)
2797 I.eraseFromParent();
2798 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2801 unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2802 const TargetRegisterClass &RegRC
2803 = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2805 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB);
2806 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB);
2807 const TargetRegisterClass *MaskRC =
2808 TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB);
2810 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2811 !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2812 !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2815 if (Ty.getSizeInBits() == 32) {
2816 assert(MaskTy.getSizeInBits() == 32 &&
2817 "ptrmask should have been narrowed during legalize");
2819 BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2822 I.eraseFromParent();
2826 Register HiReg = MRI->createVirtualRegister(&RegRC);
2827 Register LoReg = MRI->createVirtualRegister(&RegRC);
2829 // Extract the subregisters from the source pointer.
2830 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2831 .addReg(SrcReg, 0, AMDGPU::sub0);
2832 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2833 .addReg(SrcReg, 0, AMDGPU::sub1);
2835 Register MaskedLo, MaskedHi;
2838 // If all the bits in the low half are 1, we only need a copy for it.
2841 // Extract the mask subregister and apply the and.
2842 Register MaskLo = MRI->createVirtualRegister(&RegRC);
2843 MaskedLo = MRI->createVirtualRegister(&RegRC);
2845 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2846 .addReg(MaskReg, 0, AMDGPU::sub0);
2847 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2853 // If all the bits in the high half are 1, we only need a copy for it.
2856 Register MaskHi = MRI->createVirtualRegister(&RegRC);
2857 MaskedHi = MRI->createVirtualRegister(&RegRC);
2859 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2860 .addReg(MaskReg, 0, AMDGPU::sub1);
2861 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2866 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2868 .addImm(AMDGPU::sub0)
2870 .addImm(AMDGPU::sub1);
2871 I.eraseFromParent();
2875 /// Return the register to use for the index value, and the subregister to use
2876 /// for the indirectly accessed register.
2877 static std::pair<Register, unsigned>
2878 computeIndirectRegIndex(MachineRegisterInfo &MRI, const SIRegisterInfo &TRI,
2879 const TargetRegisterClass *SuperRC, Register IdxReg,
2880 unsigned EltSize, GISelKnownBits &KnownBits) {
2881 Register IdxBaseReg;
2884 std::tie(IdxBaseReg, Offset) =
2885 AMDGPU::getBaseWithConstantOffset(MRI, IdxReg, &KnownBits);
2886 if (IdxBaseReg == AMDGPU::NoRegister) {
2887 // This will happen if the index is a known constant. This should ordinarily
2888 // be legalized out, but handle it as a register just in case.
2889 assert(Offset == 0);
2890 IdxBaseReg = IdxReg;
2893 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2895 // Skip out of bounds offsets, or else we would end up using an undefined
2897 if (static_cast<unsigned>(Offset) >= SubRegs.size())
2898 return std::pair(IdxReg, SubRegs[0]);
2899 return std::pair(IdxBaseReg, SubRegs[Offset]);
2902 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2903 MachineInstr &MI) const {
2904 Register DstReg = MI.getOperand(0).getReg();
2905 Register SrcReg = MI.getOperand(1).getReg();
2906 Register IdxReg = MI.getOperand(2).getReg();
2908 LLT DstTy = MRI->getType(DstReg);
2909 LLT SrcTy = MRI->getType(SrcReg);
2911 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2912 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2913 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2915 // The index must be scalar. If it wasn't RegBankSelect should have moved this
2916 // into a waterfall loop.
2917 if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2920 const TargetRegisterClass *SrcRC =
2921 TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB);
2922 const TargetRegisterClass *DstRC =
2923 TRI.getRegClassForTypeOnBank(DstTy, *DstRB);
2924 if (!SrcRC || !DstRC)
2926 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2927 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2928 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2931 MachineBasicBlock *BB = MI.getParent();
2932 const DebugLoc &DL = MI.getDebugLoc();
2933 const bool Is64 = DstTy.getSizeInBits() == 64;
2936 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(
2937 *MRI, TRI, SrcRC, IdxReg, DstTy.getSizeInBits() / 8, *KB);
2939 if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2940 if (DstTy.getSizeInBits() != 32 && !Is64)
2943 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2946 unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2947 BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2948 .addReg(SrcReg, 0, SubReg)
2949 .addReg(SrcReg, RegState::Implicit);
2950 MI.eraseFromParent();
2954 if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2957 if (!STI.useVGPRIndexMode()) {
2958 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2960 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2961 .addReg(SrcReg, 0, SubReg)
2962 .addReg(SrcReg, RegState::Implicit);
2963 MI.eraseFromParent();
2967 const MCInstrDesc &GPRIDXDesc =
2968 TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2969 BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2974 MI.eraseFromParent();
2978 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2979 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2980 MachineInstr &MI) const {
2981 Register DstReg = MI.getOperand(0).getReg();
2982 Register VecReg = MI.getOperand(1).getReg();
2983 Register ValReg = MI.getOperand(2).getReg();
2984 Register IdxReg = MI.getOperand(3).getReg();
2986 LLT VecTy = MRI->getType(DstReg);
2987 LLT ValTy = MRI->getType(ValReg);
2988 unsigned VecSize = VecTy.getSizeInBits();
2989 unsigned ValSize = ValTy.getSizeInBits();
2991 const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2992 const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2993 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2995 assert(VecTy.getElementType() == ValTy);
2997 // The index must be scalar. If it wasn't RegBankSelect should have moved this
2998 // into a waterfall loop.
2999 if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
3002 const TargetRegisterClass *VecRC =
3003 TRI.getRegClassForTypeOnBank(VecTy, *VecRB);
3004 const TargetRegisterClass *ValRC =
3005 TRI.getRegClassForTypeOnBank(ValTy, *ValRB);
3007 if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
3008 !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
3009 !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
3010 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
3013 if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
3017 std::tie(IdxReg, SubReg) =
3018 computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg, ValSize / 8, *KB);
3020 const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
3021 STI.useVGPRIndexMode();
3023 MachineBasicBlock *BB = MI.getParent();
3024 const DebugLoc &DL = MI.getDebugLoc();
3027 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3030 const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
3031 VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
3032 BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
3036 MI.eraseFromParent();
3040 const MCInstrDesc &GPRIDXDesc =
3041 TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
3042 BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
3048 MI.eraseFromParent();
3052 bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
3054 unsigned Size = MI.getOperand(3).getImm();
3056 // The struct intrinsic variants add one additional operand over raw.
3057 const bool HasVIndex = MI.getNumOperands() == 9;
3061 VIndex = MI.getOperand(4).getReg();
3065 Register VOffset = MI.getOperand(4 + OpOffset).getReg();
3066 std::optional<ValueAndVReg> MaybeVOffset =
3067 getIConstantVRegValWithLookThrough(VOffset, *MRI);
3068 const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue();
3074 Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_BOTHEN
3075 : AMDGPU::BUFFER_LOAD_UBYTE_LDS_IDXEN
3076 : HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFEN
3077 : AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFSET;
3080 Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_BOTHEN
3081 : AMDGPU::BUFFER_LOAD_USHORT_LDS_IDXEN
3082 : HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFEN
3083 : AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFSET;
3086 Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_BOTHEN
3087 : AMDGPU::BUFFER_LOAD_DWORD_LDS_IDXEN
3088 : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN
3089 : AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET;
3093 MachineBasicBlock *MBB = MI.getParent();
3094 const DebugLoc &DL = MI.getDebugLoc();
3095 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3096 .add(MI.getOperand(2));
3098 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc));
3100 if (HasVIndex && HasVOffset) {
3101 Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3102 BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3104 .addImm(AMDGPU::sub0)
3106 .addImm(AMDGPU::sub1);
3109 } else if (HasVIndex) {
3111 } else if (HasVOffset) {
3112 MIB.addReg(VOffset);
3115 MIB.add(MI.getOperand(1)); // rsrc
3116 MIB.add(MI.getOperand(5 + OpOffset)); // soffset
3117 MIB.add(MI.getOperand(6 + OpOffset)); // imm offset
3118 unsigned Aux = MI.getOperand(7 + OpOffset).getImm();
3119 MIB.addImm(Aux & AMDGPU::CPol::ALL); // cpol
3120 MIB.addImm((Aux >> 3) & 1); // swz
3122 MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3123 MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3124 LoadPtrI.Offset = MI.getOperand(6 + OpOffset).getImm();
3125 MachinePointerInfo StorePtrI = LoadPtrI;
3126 StorePtrI.V = nullptr;
3127 StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3129 auto F = LoadMMO->getFlags() &
3130 ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3131 LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3132 Size, LoadMMO->getBaseAlign());
3134 MachineMemOperand *StoreMMO =
3135 MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3136 sizeof(int32_t), LoadMMO->getBaseAlign());
3138 MIB.setMemRefs({LoadMMO, StoreMMO});
3140 MI.eraseFromParent();
3141 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3144 /// Match a zero extend from a 32-bit value to 64-bits.
3145 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3147 if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3148 return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3150 // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3151 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3152 if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3155 assert(Def->getNumOperands() == 3 &&
3156 MRI.getType(Def->getOperand(0).getReg()) == LLT::scalar(64));
3157 if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3158 return Def->getOperand(1).getReg();
3164 bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{
3166 unsigned Size = MI.getOperand(3).getImm();
3172 Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE;
3175 Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT;
3178 Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD;
3182 MachineBasicBlock *MBB = MI.getParent();
3183 const DebugLoc &DL = MI.getDebugLoc();
3184 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3185 .add(MI.getOperand(2));
3187 Register Addr = MI.getOperand(1).getReg();
3189 // Try to split SAddr and VOffset. Global and LDS pointers share the same
3190 // immediate offset, so we cannot use a regular SelectGlobalSAddr().
3191 if (!isSGPR(Addr)) {
3192 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3193 if (isSGPR(AddrDef->Reg)) {
3194 Addr = AddrDef->Reg;
3195 } else if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3197 getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3198 if (isSGPR(SAddr)) {
3199 Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3200 if (Register Off = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3209 Opc = AMDGPU::getGlobalSaddrOp(Opc);
3211 VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3212 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3217 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3221 MIB.addReg(VOffset);
3223 MIB.add(MI.getOperand(4)) // offset
3224 .add(MI.getOperand(5)); // cpol
3226 MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3227 MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3228 LoadPtrI.Offset = MI.getOperand(4).getImm();
3229 MachinePointerInfo StorePtrI = LoadPtrI;
3230 LoadPtrI.AddrSpace = AMDGPUAS::GLOBAL_ADDRESS;
3231 StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3232 auto F = LoadMMO->getFlags() &
3233 ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3234 LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3235 Size, LoadMMO->getBaseAlign());
3236 MachineMemOperand *StoreMMO =
3237 MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3238 sizeof(int32_t), Align(4));
3240 MIB.setMemRefs({LoadMMO, StoreMMO});
3242 MI.eraseFromParent();
3243 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3246 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3247 MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3248 MI.removeOperand(1);
3249 MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3253 bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const {
3255 switch (MI.getIntrinsicID()) {
3256 case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
3257 Opc = AMDGPU::V_SMFMAC_F32_16X16X32_F16_e64;
3259 case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
3260 Opc = AMDGPU::V_SMFMAC_F32_32X32X16_F16_e64;
3262 case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
3263 Opc = AMDGPU::V_SMFMAC_F32_16X16X32_BF16_e64;
3265 case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
3266 Opc = AMDGPU::V_SMFMAC_F32_32X32X16_BF16_e64;
3268 case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
3269 Opc = AMDGPU::V_SMFMAC_I32_16X16X64_I8_e64;
3271 case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
3272 Opc = AMDGPU::V_SMFMAC_I32_32X32X32_I8_e64;
3274 case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_bf8:
3275 Opc = AMDGPU::V_SMFMAC_F32_16X16X64_BF8_BF8_e64;
3277 case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_fp8:
3278 Opc = AMDGPU::V_SMFMAC_F32_16X16X64_BF8_FP8_e64;
3280 case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_bf8:
3281 Opc = AMDGPU::V_SMFMAC_F32_16X16X64_FP8_BF8_e64;
3283 case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_fp8:
3284 Opc = AMDGPU::V_SMFMAC_F32_16X16X64_FP8_FP8_e64;
3286 case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_bf8:
3287 Opc = AMDGPU::V_SMFMAC_F32_32X32X32_BF8_BF8_e64;
3289 case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_fp8:
3290 Opc = AMDGPU::V_SMFMAC_F32_32X32X32_BF8_FP8_e64;
3292 case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_bf8:
3293 Opc = AMDGPU::V_SMFMAC_F32_32X32X32_FP8_BF8_e64;
3295 case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_fp8:
3296 Opc = AMDGPU::V_SMFMAC_F32_32X32X32_FP8_FP8_e64;
3299 llvm_unreachable("unhandled smfmac intrinsic");
3302 auto VDst_In = MI.getOperand(4);
3304 MI.setDesc(TII.get(Opc));
3305 MI.removeOperand(4); // VDst_In
3306 MI.removeOperand(1); // Intrinsic ID
3307 MI.addOperand(VDst_In); // Readd VDst_In to the end
3308 MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3312 bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {
3313 Register DstReg = MI.getOperand(0).getReg();
3314 Register SrcReg = MI.getOperand(1).getReg();
3315 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
3316 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
3317 MachineBasicBlock *MBB = MI.getParent();
3318 const DebugLoc &DL = MI.getDebugLoc();
3321 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
3322 .addImm(Subtarget->getWavefrontSizeLog2())
3325 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
3327 .addImm(Subtarget->getWavefrontSizeLog2());
3330 const TargetRegisterClass &RC =
3331 IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
3332 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
3335 MI.eraseFromParent();
3339 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3341 return selectPHI(I);
3343 if (!I.isPreISelOpcode()) {
3345 return selectCOPY(I);
3349 switch (I.getOpcode()) {
3350 case TargetOpcode::G_AND:
3351 case TargetOpcode::G_OR:
3352 case TargetOpcode::G_XOR:
3353 if (selectImpl(I, *CoverageInfo))
3355 return selectG_AND_OR_XOR(I);
3356 case TargetOpcode::G_ADD:
3357 case TargetOpcode::G_SUB:
3358 if (selectImpl(I, *CoverageInfo))
3360 return selectG_ADD_SUB(I);
3361 case TargetOpcode::G_UADDO:
3362 case TargetOpcode::G_USUBO:
3363 case TargetOpcode::G_UADDE:
3364 case TargetOpcode::G_USUBE:
3365 return selectG_UADDO_USUBO_UADDE_USUBE(I);
3366 case AMDGPU::G_AMDGPU_MAD_U64_U32:
3367 case AMDGPU::G_AMDGPU_MAD_I64_I32:
3368 return selectG_AMDGPU_MAD_64_32(I);
3369 case TargetOpcode::G_INTTOPTR:
3370 case TargetOpcode::G_BITCAST:
3371 case TargetOpcode::G_PTRTOINT:
3372 return selectCOPY(I);
3373 case TargetOpcode::G_CONSTANT:
3374 case TargetOpcode::G_FCONSTANT:
3375 return selectG_CONSTANT(I);
3376 case TargetOpcode::G_FNEG:
3377 if (selectImpl(I, *CoverageInfo))
3379 return selectG_FNEG(I);
3380 case TargetOpcode::G_FABS:
3381 if (selectImpl(I, *CoverageInfo))
3383 return selectG_FABS(I);
3384 case TargetOpcode::G_EXTRACT:
3385 return selectG_EXTRACT(I);
3386 case TargetOpcode::G_MERGE_VALUES:
3387 case TargetOpcode::G_CONCAT_VECTORS:
3388 return selectG_MERGE_VALUES(I);
3389 case TargetOpcode::G_UNMERGE_VALUES:
3390 return selectG_UNMERGE_VALUES(I);
3391 case TargetOpcode::G_BUILD_VECTOR:
3392 case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3393 return selectG_BUILD_VECTOR(I);
3394 case TargetOpcode::G_PTR_ADD:
3395 if (selectImpl(I, *CoverageInfo))
3397 return selectG_PTR_ADD(I);
3398 case TargetOpcode::G_IMPLICIT_DEF:
3399 return selectG_IMPLICIT_DEF(I);
3400 case TargetOpcode::G_FREEZE:
3401 return selectCOPY(I);
3402 case TargetOpcode::G_INSERT:
3403 return selectG_INSERT(I);
3404 case TargetOpcode::G_INTRINSIC:
3405 return selectG_INTRINSIC(I);
3406 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3407 return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3408 case TargetOpcode::G_ICMP:
3409 if (selectG_ICMP(I))
3411 return selectImpl(I, *CoverageInfo);
3412 case TargetOpcode::G_LOAD:
3413 case TargetOpcode::G_STORE:
3414 case TargetOpcode::G_ATOMIC_CMPXCHG:
3415 case TargetOpcode::G_ATOMICRMW_XCHG:
3416 case TargetOpcode::G_ATOMICRMW_ADD:
3417 case TargetOpcode::G_ATOMICRMW_SUB:
3418 case TargetOpcode::G_ATOMICRMW_AND:
3419 case TargetOpcode::G_ATOMICRMW_OR:
3420 case TargetOpcode::G_ATOMICRMW_XOR:
3421 case TargetOpcode::G_ATOMICRMW_MIN:
3422 case TargetOpcode::G_ATOMICRMW_MAX:
3423 case TargetOpcode::G_ATOMICRMW_UMIN:
3424 case TargetOpcode::G_ATOMICRMW_UMAX:
3425 case TargetOpcode::G_ATOMICRMW_UINC_WRAP:
3426 case TargetOpcode::G_ATOMICRMW_UDEC_WRAP:
3427 case TargetOpcode::G_ATOMICRMW_FADD:
3428 case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3429 case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3430 return selectG_LOAD_STORE_ATOMICRMW(I);
3431 case TargetOpcode::G_SELECT:
3432 return selectG_SELECT(I);
3433 case TargetOpcode::G_TRUNC:
3434 return selectG_TRUNC(I);
3435 case TargetOpcode::G_SEXT:
3436 case TargetOpcode::G_ZEXT:
3437 case TargetOpcode::G_ANYEXT:
3438 case TargetOpcode::G_SEXT_INREG:
3439 // This is a workaround. For extension from type i1, `selectImpl()` uses
3440 // patterns from TD file and generates an illegal VGPR to SGPR COPY as type
3441 // i1 can only be hold in a SGPR class.
3442 if (MRI->getType(I.getOperand(1).getReg()) != LLT::scalar(1) &&
3443 selectImpl(I, *CoverageInfo))
3445 return selectG_SZA_EXT(I);
3446 case TargetOpcode::G_BRCOND:
3447 return selectG_BRCOND(I);
3448 case TargetOpcode::G_GLOBAL_VALUE:
3449 return selectG_GLOBAL_VALUE(I);
3450 case TargetOpcode::G_PTRMASK:
3451 return selectG_PTRMASK(I);
3452 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3453 return selectG_EXTRACT_VECTOR_ELT(I);
3454 case TargetOpcode::G_INSERT_VECTOR_ELT:
3455 return selectG_INSERT_VECTOR_ELT(I);
3456 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3457 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
3458 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
3459 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
3460 const AMDGPU::ImageDimIntrinsicInfo *Intr
3461 = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3462 assert(Intr && "not an image intrinsic with image pseudo");
3463 return selectImageIntrinsic(I, Intr);
3465 case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3466 return selectBVHIntrinsic(I);
3467 case AMDGPU::G_SBFX:
3468 case AMDGPU::G_UBFX:
3469 return selectG_SBFX_UBFX(I);
3470 case AMDGPU::G_SI_CALL:
3471 I.setDesc(TII.get(AMDGPU::SI_CALL));
3473 case AMDGPU::G_AMDGPU_WAVE_ADDRESS:
3474 return selectWaveAddress(I);
3476 return selectImpl(I, *CoverageInfo);
3481 InstructionSelector::ComplexRendererFns
3482 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3484 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3489 std::pair<Register, unsigned>
3490 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
3491 bool IsCanonicalizing,
3492 bool AllowAbs, bool OpSel) const {
3493 Register Src = Root.getReg();
3495 MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3497 if (MI->getOpcode() == AMDGPU::G_FNEG) {
3498 Src = MI->getOperand(1).getReg();
3499 Mods |= SISrcMods::NEG;
3500 MI = getDefIgnoringCopies(Src, *MRI);
3501 } else if (MI->getOpcode() == AMDGPU::G_FSUB && IsCanonicalizing) {
3502 // Fold fsub [+-]0 into fneg. This may not have folded depending on the
3503 // denormal mode, but we're implicitly canonicalizing in a source operand.
3504 const ConstantFP *LHS =
3505 getConstantFPVRegVal(MI->getOperand(1).getReg(), *MRI);
3506 if (LHS && LHS->isZero()) {
3507 Mods |= SISrcMods::NEG;
3508 Src = MI->getOperand(2).getReg();
3512 if (AllowAbs && MI->getOpcode() == AMDGPU::G_FABS) {
3513 Src = MI->getOperand(1).getReg();
3514 Mods |= SISrcMods::ABS;
3518 Mods |= SISrcMods::OP_SEL_0;
3520 return std::pair(Src, Mods);
3523 Register AMDGPUInstructionSelector::copyToVGPRIfSrcFolded(
3524 Register Src, unsigned Mods, MachineOperand Root, MachineInstr *InsertPt,
3525 bool ForceVGPR) const {
3526 if ((Mods != 0 || ForceVGPR) &&
3527 RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3529 // If we looked through copies to find source modifiers on an SGPR operand,
3530 // we now have an SGPR register source. To avoid potentially violating the
3531 // constant bus restriction, we need to insert a copy to a VGPR.
3532 Register VGPRSrc = MRI->cloneVirtualRegister(Root.getReg());
3533 BuildMI(*InsertPt->getParent(), InsertPt, InsertPt->getDebugLoc(),
3534 TII.get(AMDGPU::COPY), VGPRSrc)
3543 /// This will select either an SGPR or VGPR operand and will save us from
3544 /// having to write an extra tablegen pattern.
3545 InstructionSelector::ComplexRendererFns
3546 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3548 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3552 InstructionSelector::ComplexRendererFns
3553 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3556 std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3559 [=](MachineInstrBuilder &MIB) {
3560 MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3562 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3563 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3564 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
3568 InstructionSelector::ComplexRendererFns
3569 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3572 std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3573 /*IsCanonicalizing=*/true,
3574 /*AllowAbs=*/false);
3577 [=](MachineInstrBuilder &MIB) {
3578 MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3580 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3581 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3582 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
3586 InstructionSelector::ComplexRendererFns
3587 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3589 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3590 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3591 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
3595 InstructionSelector::ComplexRendererFns
3596 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3599 std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3602 [=](MachineInstrBuilder &MIB) {
3603 MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3605 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3609 InstructionSelector::ComplexRendererFns
3610 AMDGPUInstructionSelector::selectVOP3ModsNonCanonicalizing(
3611 MachineOperand &Root) const {
3614 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /*IsCanonicalizing=*/false);
3617 [=](MachineInstrBuilder &MIB) {
3618 MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3620 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3624 InstructionSelector::ComplexRendererFns
3625 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3628 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /*IsCanonicalizing=*/true,
3629 /*AllowAbs=*/false);
3632 [=](MachineInstrBuilder &MIB) {
3633 MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3635 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3639 InstructionSelector::ComplexRendererFns
3640 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3641 Register Reg = Root.getReg();
3642 const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3643 if (Def->getOpcode() == AMDGPU::G_FNEG || Def->getOpcode() == AMDGPU::G_FABS)
3646 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3650 std::pair<Register, unsigned>
3651 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3652 Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {
3654 MachineInstr *MI = MRI.getVRegDef(Src);
3656 if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3657 // It's possible to see an f32 fneg here, but unlikely.
3658 // TODO: Treat f32 fneg as only high bit.
3659 MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
3660 Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3661 Src = MI->getOperand(1).getReg();
3662 MI = MRI.getVRegDef(Src);
3665 // TODO: Handle G_FSUB 0 as fneg
3667 // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3668 (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard()
3670 // Packed instructions do not have abs modifiers.
3671 Mods |= SISrcMods::OP_SEL_1;
3673 return std::pair(Src, Mods);
3676 InstructionSelector::ComplexRendererFns
3677 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3678 MachineRegisterInfo &MRI
3679 = Root.getParent()->getParent()->getParent()->getRegInfo();
3683 std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3686 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3687 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3691 InstructionSelector::ComplexRendererFns
3692 AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {
3693 MachineRegisterInfo &MRI
3694 = Root.getParent()->getParent()->getParent()->getRegInfo();
3698 std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true);
3701 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3702 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3706 InstructionSelector::ComplexRendererFns
3707 AMDGPUInstructionSelector::selectDotIUVOP3PMods(MachineOperand &Root) const {
3708 // Literal i1 value set in intrinsic, represents SrcMods for the next operand.
3709 // Value is in Imm operand as i1 sign extended to int64_t.
3710 // 1(-1) promotes packed values to signed, 0 treats them as unsigned.
3711 assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&
3712 "expected i1 value");
3713 unsigned Mods = SISrcMods::OP_SEL_1;
3714 if (Root.getImm() == -1)
3715 Mods ^= SISrcMods::NEG;
3717 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3721 InstructionSelector::ComplexRendererFns
3722 AMDGPUInstructionSelector::selectWMMAOpSelVOP3PMods(
3723 MachineOperand &Root) const {
3724 assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&
3725 "expected i1 value");
3726 unsigned Mods = SISrcMods::OP_SEL_1;
3727 if (Root.getImm() != 0)
3728 Mods |= SISrcMods::OP_SEL_0;
3731 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3735 InstructionSelector::ComplexRendererFns
3736 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3739 std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3741 // FIXME: Handle op_sel
3743 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3744 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3748 InstructionSelector::ComplexRendererFns
3749 AMDGPUInstructionSelector::selectVINTERPMods(MachineOperand &Root) const {
3752 std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3753 /*IsCanonicalizing=*/true,
3758 [=](MachineInstrBuilder &MIB) {
3760 copyToVGPRIfSrcFolded(Src, Mods, Root, MIB, /* ForceVGPR */ true));
3762 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3766 InstructionSelector::ComplexRendererFns
3767 AMDGPUInstructionSelector::selectVINTERPModsHi(MachineOperand &Root) const {
3770 std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3771 /*IsCanonicalizing=*/true,
3776 [=](MachineInstrBuilder &MIB) {
3778 copyToVGPRIfSrcFolded(Src, Mods, Root, MIB, /* ForceVGPR */ true));
3780 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3784 bool AMDGPUInstructionSelector::selectSmrdOffset(MachineOperand &Root,
3787 int64_t *Offset) const {
3788 MachineInstr *MI = Root.getParent();
3789 MachineBasicBlock *MBB = MI->getParent();
3791 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3792 // then we can select all ptr + 32-bit offsets.
3793 SmallVector<GEPInfo, 4> AddrInfo;
3794 getAddrModeInfo(*MI, *MRI, AddrInfo);
3796 if (AddrInfo.empty())
3799 const GEPInfo &GEPI = AddrInfo[0];
3800 std::optional<int64_t> EncodedImm =
3801 AMDGPU::getSMRDEncodedOffset(STI, GEPI.Imm, false);
3803 if (SOffset && Offset) {
3804 if (GEPI.SgprParts.size() == 1 && GEPI.Imm != 0 && EncodedImm &&
3805 AddrInfo.size() > 1) {
3806 const GEPInfo &GEPI2 = AddrInfo[1];
3807 if (GEPI2.SgprParts.size() == 2 && GEPI2.Imm == 0) {
3808 if (Register OffsetReg =
3809 matchZeroExtendFromS32(*MRI, GEPI2.SgprParts[1])) {
3810 Base = GEPI2.SgprParts[0];
3811 *SOffset = OffsetReg;
3812 *Offset = *EncodedImm;
3820 if (Offset && GEPI.SgprParts.size() == 1 && EncodedImm) {
3821 Base = GEPI.SgprParts[0];
3822 *Offset = *EncodedImm;
3826 // SGPR offset is unsigned.
3827 if (SOffset && GEPI.SgprParts.size() == 1 && isUInt<32>(GEPI.Imm) &&
3829 // If we make it this far we have a load with an 32-bit immediate offset.
3830 // It is OK to select this using a sgpr offset, because we have already
3831 // failed trying to select this load into one of the _IMM variants since
3832 // the _IMM Patterns are considered before the _SGPR patterns.
3833 Base = GEPI.SgprParts[0];
3834 *SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3835 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), *SOffset)
3840 if (SOffset && GEPI.SgprParts.size() && GEPI.Imm == 0) {
3841 if (Register OffsetReg = matchZeroExtendFromS32(*MRI, GEPI.SgprParts[1])) {
3842 Base = GEPI.SgprParts[0];
3843 *SOffset = OffsetReg;
3851 InstructionSelector::ComplexRendererFns
3852 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3855 if (!selectSmrdOffset(Root, Base, /* SOffset= */ nullptr, &Offset))
3856 return std::nullopt;
3858 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
3859 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }}};
3862 InstructionSelector::ComplexRendererFns
3863 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3864 SmallVector<GEPInfo, 4> AddrInfo;
3865 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3867 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3868 return std::nullopt;
3870 const GEPInfo &GEPInfo = AddrInfo[0];
3871 Register PtrReg = GEPInfo.SgprParts[0];
3872 std::optional<int64_t> EncodedImm =
3873 AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3875 return std::nullopt;
3878 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3879 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3883 InstructionSelector::ComplexRendererFns
3884 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3885 Register Base, SOffset;
3886 if (!selectSmrdOffset(Root, Base, &SOffset, /* Offset= */ nullptr))
3887 return std::nullopt;
3889 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
3890 [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); }}};
3893 InstructionSelector::ComplexRendererFns
3894 AMDGPUInstructionSelector::selectSmrdSgprImm(MachineOperand &Root) const {
3895 Register Base, SOffset;
3897 if (!selectSmrdOffset(Root, Base, &SOffset, &Offset))
3898 return std::nullopt;
3900 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
3901 [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); },
3902 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }}};
3905 std::pair<Register, int>
3906 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
3907 uint64_t FlatVariant) const {
3908 MachineInstr *MI = Root.getParent();
3910 auto Default = std::pair(Root.getReg(), 0);
3912 if (!STI.hasFlatInstOffsets())
3916 int64_t ConstOffset;
3917 std::tie(PtrBase, ConstOffset) =
3918 getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3919 if (ConstOffset == 0 || !isFlatScratchBaseLegal(PtrBase, FlatVariant))
3922 unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3923 if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
3926 return std::pair(PtrBase, ConstOffset);
3929 InstructionSelector::ComplexRendererFns
3930 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3931 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
3934 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3935 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3939 InstructionSelector::ComplexRendererFns
3940 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
3941 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
3944 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3945 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3949 InstructionSelector::ComplexRendererFns
3950 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
3951 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
3954 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3955 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3959 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3960 InstructionSelector::ComplexRendererFns
3961 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3962 Register Addr = Root.getReg();
3964 int64_t ConstOffset;
3965 int64_t ImmOffset = 0;
3967 // Match the immediate offset first, which canonically is moved as low as
3969 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3971 if (ConstOffset != 0) {
3972 if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
3973 SIInstrFlags::FlatGlobal)) {
3975 ImmOffset = ConstOffset;
3977 auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3978 if (isSGPR(PtrBaseDef->Reg)) {
3979 if (ConstOffset > 0) {
3980 // Offset is too large.
3982 // saddr + large_offset -> saddr +
3983 // (voffset = large_offset & ~MaxOffset) +
3984 // (large_offset & MaxOffset);
3985 int64_t SplitImmOffset, RemainderOffset;
3986 std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
3987 ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
3989 if (isUInt<32>(RemainderOffset)) {
3990 MachineInstr *MI = Root.getParent();
3991 MachineBasicBlock *MBB = MI->getParent();
3993 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3995 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3997 .addImm(RemainderOffset);
4000 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
4001 [=](MachineInstrBuilder &MIB) {
4002 MIB.addReg(HighBits);
4004 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
4009 // We are adding a 64 bit SGPR and a constant. If constant bus limit
4010 // is 1 we would need to perform 1 or 2 extra moves for each half of
4011 // the constant and it is better to do a scalar add and then issue a
4012 // single VALU instruction to materialize zero. Otherwise it is less
4013 // instructions to perform VALU adds with immediates or inline literals.
4014 unsigned NumLiterals =
4015 !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) +
4016 !TII.isInlineConstant(APInt(32, ConstOffset >> 32));
4017 if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
4018 return std::nullopt;
4023 // Match the variable offset.
4024 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4025 if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
4026 // Look through the SGPR->VGPR copy.
4028 getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
4030 if (isSGPR(SAddr)) {
4031 Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
4033 // It's possible voffset is an SGPR here, but the copy to VGPR will be
4035 if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
4036 return {{[=](MachineInstrBuilder &MIB) { // saddr
4039 [=](MachineInstrBuilder &MIB) { // voffset
4040 MIB.addReg(VOffset);
4042 [=](MachineInstrBuilder &MIB) { // offset
4043 MIB.addImm(ImmOffset);
4049 // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
4051 if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
4052 AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
4053 return std::nullopt;
4055 // It's cheaper to materialize a single 32-bit zero for vaddr than the two
4056 // moves required to copy a 64-bit SGPR to VGPR.
4057 MachineInstr *MI = Root.getParent();
4058 MachineBasicBlock *MBB = MI->getParent();
4059 Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4061 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
4065 [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
4066 [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); }, // voffset
4067 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4071 InstructionSelector::ComplexRendererFns
4072 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
4073 Register Addr = Root.getReg();
4075 int64_t ConstOffset;
4076 int64_t ImmOffset = 0;
4078 // Match the immediate offset first, which canonically is moved as low as
4080 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4082 if (ConstOffset != 0 && isFlatScratchBaseLegal(PtrBase) &&
4083 TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS,
4084 SIInstrFlags::FlatScratch)) {
4086 ImmOffset = ConstOffset;
4089 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4090 if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4091 int FI = AddrDef->MI->getOperand(1).getIndex();
4093 [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4094 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4098 Register SAddr = AddrDef->Reg;
4100 if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
4101 Register LHS = AddrDef->MI->getOperand(1).getReg();
4102 Register RHS = AddrDef->MI->getOperand(2).getReg();
4103 auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4104 auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
4106 if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
4107 isSGPR(RHSDef->Reg)) {
4108 int FI = LHSDef->MI->getOperand(1).getIndex();
4109 MachineInstr &I = *Root.getParent();
4110 MachineBasicBlock *BB = I.getParent();
4111 const DebugLoc &DL = I.getDebugLoc();
4112 SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4114 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr)
4116 .addReg(RHSDef->Reg);
4121 return std::nullopt;
4124 [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
4125 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4129 // Check whether the flat scratch SVS swizzle bug affects this access.
4130 bool AMDGPUInstructionSelector::checkFlatScratchSVSSwizzleBug(
4131 Register VAddr, Register SAddr, uint64_t ImmOffset) const {
4132 if (!Subtarget->hasFlatScratchSVSSwizzleBug())
4135 // The bug affects the swizzling of SVS accesses if there is any carry out
4136 // from the two low order bits (i.e. from bit 1 into bit 2) when adding
4137 // voffset to (soffset + inst_offset).
4138 auto VKnown = KB->getKnownBits(VAddr);
4139 auto SKnown = KnownBits::computeForAddSub(
4140 true, false, KB->getKnownBits(SAddr),
4141 KnownBits::makeConstant(APInt(32, ImmOffset)));
4142 uint64_t VMax = VKnown.getMaxValue().getZExtValue();
4143 uint64_t SMax = SKnown.getMaxValue().getZExtValue();
4144 return (VMax & 3) + (SMax & 3) >= 4;
4147 InstructionSelector::ComplexRendererFns
4148 AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const {
4149 Register Addr = Root.getReg();
4151 int64_t ConstOffset;
4152 int64_t ImmOffset = 0;
4154 // Match the immediate offset first, which canonically is moved as low as
4156 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4158 if (ConstOffset != 0 &&
4159 TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) {
4161 ImmOffset = ConstOffset;
4164 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4165 if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD)
4166 return std::nullopt;
4168 Register RHS = AddrDef->MI->getOperand(2).getReg();
4169 if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID)
4170 return std::nullopt;
4172 Register LHS = AddrDef->MI->getOperand(1).getReg();
4173 auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4175 if (!isFlatScratchBaseLegal(LHS) || !isFlatScratchBaseLegal(RHS))
4176 return std::nullopt;
4178 if (checkFlatScratchSVSSwizzleBug(RHS, LHS, ImmOffset))
4179 return std::nullopt;
4181 if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4182 int FI = LHSDef->MI->getOperand(1).getIndex();
4184 [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4185 [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4186 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4191 return std::nullopt;
4194 [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4195 [=](MachineInstrBuilder &MIB) { MIB.addReg(LHS); }, // saddr
4196 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4200 InstructionSelector::ComplexRendererFns
4201 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
4202 MachineInstr *MI = Root.getParent();
4203 MachineBasicBlock *MBB = MI->getParent();
4204 MachineFunction *MF = MBB->getParent();
4205 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4208 if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
4209 Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
4210 Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4212 // TODO: Should this be inside the render function? The iterator seems to
4214 const uint32_t MaxOffset = SIInstrInfo::getMaxMUBUFImmOffset();
4215 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
4217 .addImm(Offset & ~MaxOffset);
4219 return {{[=](MachineInstrBuilder &MIB) { // rsrc
4220 MIB.addReg(Info->getScratchRSrcReg());
4222 [=](MachineInstrBuilder &MIB) { // vaddr
4223 MIB.addReg(HighBits);
4225 [=](MachineInstrBuilder &MIB) { // soffset
4226 // Use constant zero for soffset and rely on eliminateFrameIndex
4227 // to choose the appropriate frame register if need be.
4230 [=](MachineInstrBuilder &MIB) { // offset
4231 MIB.addImm(Offset & MaxOffset);
4235 assert(Offset == 0 || Offset == -1);
4237 // Try to fold a frame index directly into the MUBUF vaddr field, and any
4239 std::optional<int> FI;
4240 Register VAddr = Root.getReg();
4241 if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
4243 int64_t ConstOffset;
4244 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
4245 if (ConstOffset != 0) {
4246 if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
4247 (!STI.privateMemoryResourceIsRangeChecked() ||
4248 KB->signBitIsZero(PtrBase))) {
4249 const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
4250 if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
4251 FI = PtrBaseDef->getOperand(1).getIndex();
4254 Offset = ConstOffset;
4256 } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4257 FI = RootDef->getOperand(1).getIndex();
4261 return {{[=](MachineInstrBuilder &MIB) { // rsrc
4262 MIB.addReg(Info->getScratchRSrcReg());
4264 [=](MachineInstrBuilder &MIB) { // vaddr
4266 MIB.addFrameIndex(*FI);
4270 [=](MachineInstrBuilder &MIB) { // soffset
4271 // Use constant zero for soffset and rely on eliminateFrameIndex
4272 // to choose the appropriate frame register if need be.
4275 [=](MachineInstrBuilder &MIB) { // offset
4280 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
4281 int64_t Offset) const {
4282 if (!isUInt<16>(Offset))
4285 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4288 // On Southern Islands instruction with a negative base value and an offset
4289 // don't seem to work.
4290 return KB->signBitIsZero(Base);
4293 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
4295 unsigned Size) const {
4296 if (Offset0 % Size != 0 || Offset1 % Size != 0)
4298 if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
4301 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4304 // On Southern Islands instruction with a negative base value and an offset
4305 // don't seem to work.
4306 return KB->signBitIsZero(Base);
4309 bool AMDGPUInstructionSelector::isFlatScratchBaseLegal(
4310 Register Base, uint64_t FlatVariant) const {
4311 if (FlatVariant != SIInstrFlags::FlatScratch)
4314 // When value in 32-bit Base can be negative calculate scratch offset using
4315 // 32-bit add instruction, otherwise use Base(unsigned) + offset.
4316 return KB->signBitIsZero(Base);
4319 bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
4320 unsigned ShAmtBits) const {
4321 assert(MI.getOpcode() == TargetOpcode::G_AND);
4323 std::optional<APInt> RHS =
4324 getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
4328 if (RHS->countr_one() >= ShAmtBits)
4331 const APInt &LHSKnownZeros = KB->getKnownZeroes(MI.getOperand(1).getReg());
4332 return (LHSKnownZeros | *RHS).countr_one() >= ShAmtBits;
4335 // Return the wave level SGPR base address if this is a wave address.
4336 static Register getWaveAddress(const MachineInstr *Def) {
4337 return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS
4338 ? Def->getOperand(1).getReg()
4342 InstructionSelector::ComplexRendererFns
4343 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
4344 MachineOperand &Root) const {
4345 Register Reg = Root.getReg();
4346 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4348 const MachineInstr *Def = MRI->getVRegDef(Reg);
4349 if (Register WaveBase = getWaveAddress(Def)) {
4351 [=](MachineInstrBuilder &MIB) { // rsrc
4352 MIB.addReg(Info->getScratchRSrcReg());
4354 [=](MachineInstrBuilder &MIB) { // soffset
4355 MIB.addReg(WaveBase);
4357 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset
4363 // FIXME: Copy check is a hack
4365 if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) {
4366 if (!SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4368 const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr);
4369 Register WaveBase = getWaveAddress(BasePtrDef);
4374 [=](MachineInstrBuilder &MIB) { // rsrc
4375 MIB.addReg(Info->getScratchRSrcReg());
4377 [=](MachineInstrBuilder &MIB) { // soffset
4378 MIB.addReg(WaveBase);
4380 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4384 if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
4385 !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4389 [=](MachineInstrBuilder &MIB) { // rsrc
4390 MIB.addReg(Info->getScratchRSrcReg());
4392 [=](MachineInstrBuilder &MIB) { // soffset
4395 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4399 std::pair<Register, unsigned>
4400 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
4401 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4403 return std::pair(Root.getReg(), 0);
4405 int64_t ConstAddr = 0;
4409 std::tie(PtrBase, Offset) =
4410 getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4413 if (isDSOffsetLegal(PtrBase, Offset)) {
4415 return std::pair(PtrBase, Offset);
4417 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4421 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4426 return std::pair(Root.getReg(), 0);
4429 InstructionSelector::ComplexRendererFns
4430 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
4433 std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
4435 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4436 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
4440 InstructionSelector::ComplexRendererFns
4441 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
4442 return selectDSReadWrite2(Root, 4);
4445 InstructionSelector::ComplexRendererFns
4446 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
4447 return selectDSReadWrite2(Root, 8);
4450 InstructionSelector::ComplexRendererFns
4451 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
4452 unsigned Size) const {
4455 std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
4457 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4458 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
4459 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
4463 std::pair<Register, unsigned>
4464 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
4465 unsigned Size) const {
4466 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4468 return std::pair(Root.getReg(), 0);
4470 int64_t ConstAddr = 0;
4474 std::tie(PtrBase, Offset) =
4475 getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4478 int64_t OffsetValue0 = Offset;
4479 int64_t OffsetValue1 = Offset + Size;
4480 if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
4482 return std::pair(PtrBase, OffsetValue0 / Size);
4484 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4487 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4492 return std::pair(Root.getReg(), 0);
4495 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
4496 /// the base value with the constant offset. There may be intervening copies
4497 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
4498 /// not match the pattern.
4499 std::pair<Register, int64_t>
4500 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
4501 Register Root, const MachineRegisterInfo &MRI) const {
4502 MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
4503 if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
4506 MachineOperand &RHS = RootI->getOperand(2);
4507 std::optional<ValueAndVReg> MaybeOffset =
4508 getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
4511 return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
4514 static void addZeroImm(MachineInstrBuilder &MIB) {
4518 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
4519 /// BasePtr is not valid, a null base pointer will be used.
4520 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4521 uint32_t FormatLo, uint32_t FormatHi,
4523 Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4524 Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4525 Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4526 Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4528 B.buildInstr(AMDGPU::S_MOV_B32)
4531 B.buildInstr(AMDGPU::S_MOV_B32)
4535 // Build the half of the subregister with the constants before building the
4536 // full 128-bit register. If we are building multiple resource descriptors,
4537 // this will allow CSEing of the 2-component register.
4538 B.buildInstr(AMDGPU::REG_SEQUENCE)
4541 .addImm(AMDGPU::sub0)
4543 .addImm(AMDGPU::sub1);
4545 Register RSrcLo = BasePtr;
4547 RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4548 B.buildInstr(AMDGPU::S_MOV_B64)
4553 B.buildInstr(AMDGPU::REG_SEQUENCE)
4556 .addImm(AMDGPU::sub0_sub1)
4558 .addImm(AMDGPU::sub2_sub3);
4563 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4564 const SIInstrInfo &TII, Register BasePtr) {
4565 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4567 // FIXME: Why are half the "default" bits ignored based on the addressing
4569 return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
4572 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4573 const SIInstrInfo &TII, Register BasePtr) {
4574 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4576 // FIXME: Why are half the "default" bits ignored based on the addressing
4578 return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4581 AMDGPUInstructionSelector::MUBUFAddressData
4582 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4583 MUBUFAddressData Data;
4589 std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4590 if (isUInt<32>(Offset)) {
4592 Data.Offset = Offset;
4595 if (MachineInstr *InputAdd
4596 = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4597 Data.N2 = InputAdd->getOperand(1).getReg();
4598 Data.N3 = InputAdd->getOperand(2).getReg();
4600 // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4601 // FIXME: Don't know this was defined by operand 0
4603 // TODO: Remove this when we have copy folding optimizations after
4605 Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4606 Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4612 /// Return if the addr64 mubuf mode should be used for the given address.
4613 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4614 // (ptr_add N2, N3) -> addr64, or
4615 // (ptr_add (ptr_add N2, N3), C1) -> addr64
4619 const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4620 return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4623 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4624 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4626 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4627 MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4628 if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4631 // Illegal offset, store it in soffset.
4632 SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4633 B.buildInstr(AMDGPU::S_MOV_B32)
4639 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4640 MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4641 Register &SOffset, int64_t &Offset) const {
4642 // FIXME: Predicates should stop this from reaching here.
4643 // addr64 bit was removed for volcanic islands.
4644 if (!STI.hasAddr64() || STI.useFlatForGlobal())
4647 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4648 if (!shouldUseAddr64(AddrData))
4651 Register N0 = AddrData.N0;
4652 Register N2 = AddrData.N2;
4653 Register N3 = AddrData.N3;
4654 Offset = AddrData.Offset;
4656 // Base pointer for the SRD.
4660 if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4662 if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4663 // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4664 // addr64, and construct the default resource from a 0 address.
4671 // N2 is not divergent.
4675 } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4676 // Use the default null pointer in the resource
4680 // (N0 + C1) -> offset
4684 MachineIRBuilder B(*Root.getParent());
4685 RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4686 splitIllegalMUBUFOffset(B, SOffset, Offset);
4690 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4691 MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4692 int64_t &Offset) const {
4694 // FIXME: Pattern should not reach here.
4695 if (STI.useFlatForGlobal())
4698 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4699 if (shouldUseAddr64(AddrData))
4703 // (N0 + C1) -> offset
4704 Register SRDPtr = AddrData.N0;
4705 Offset = AddrData.Offset;
4707 // TODO: Look through extensions for 32-bit soffset.
4708 MachineIRBuilder B(*Root.getParent());
4710 RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4711 splitIllegalMUBUFOffset(B, SOffset, Offset);
4715 InstructionSelector::ComplexRendererFns
4716 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4722 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4725 // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4728 [=](MachineInstrBuilder &MIB) { // rsrc
4729 MIB.addReg(RSrcReg);
4731 [=](MachineInstrBuilder &MIB) { // vaddr
4734 [=](MachineInstrBuilder &MIB) { // soffset
4736 MIB.addReg(SOffset);
4740 [=](MachineInstrBuilder &MIB) { // offset
4749 InstructionSelector::ComplexRendererFns
4750 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4755 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4759 [=](MachineInstrBuilder &MIB) { // rsrc
4760 MIB.addReg(RSrcReg);
4762 [=](MachineInstrBuilder &MIB) { // soffset
4764 MIB.addReg(SOffset);
4768 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4775 /// Get an immediate that must be 32-bits, and treated as zero extended.
4776 static std::optional<uint64_t>
4777 getConstantZext32Val(Register Reg, const MachineRegisterInfo &MRI) {
4778 // getIConstantVRegVal sexts any values, so see if that matters.
4779 std::optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
4780 if (!OffsetVal || !isInt<32>(*OffsetVal))
4781 return std::nullopt;
4782 return Lo_32(*OffsetVal);
4785 InstructionSelector::ComplexRendererFns
4786 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4787 std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4791 std::optional<int64_t> EncodedImm =
4792 AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4796 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }};
4799 InstructionSelector::ComplexRendererFns
4800 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4801 assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4803 std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4807 std::optional<int64_t> EncodedImm =
4808 AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4812 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }};
4815 InstructionSelector::ComplexRendererFns
4816 AMDGPUInstructionSelector::selectSMRDBufferSgprImm(MachineOperand &Root) const {
4817 // Match the (soffset + offset) pair as a 32-bit register base and
4818 // an immediate offset.
4821 std::tie(SOffset, Offset) =
4822 AMDGPU::getBaseWithConstantOffset(*MRI, Root.getReg(), KB);
4824 return std::nullopt;
4826 std::optional<int64_t> EncodedOffset =
4827 AMDGPU::getSMRDEncodedOffset(STI, Offset, /* IsBuffer */ true);
4829 return std::nullopt;
4831 assert(MRI->getType(SOffset) == LLT::scalar(32));
4832 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); },
4833 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedOffset); }}};
4836 // Variant of stripBitCast that returns the instruction instead of a
4838 static MachineInstr *stripBitCast(MachineInstr *MI, MachineRegisterInfo &MRI) {
4839 if (MI->getOpcode() == AMDGPU::G_BITCAST)
4840 return getDefIgnoringCopies(MI->getOperand(1).getReg(), MRI);
4844 // Figure out if this is really an extract of the high 16-bits of a dword,
4845 // returns nullptr if it isn't.
4846 static MachineInstr *isExtractHiElt(MachineInstr *Inst,
4847 MachineRegisterInfo &MRI) {
4848 Inst = stripBitCast(Inst, MRI);
4850 if (Inst->getOpcode() != AMDGPU::G_TRUNC)
4853 MachineInstr *TruncOp =
4854 getDefIgnoringCopies(Inst->getOperand(1).getReg(), MRI);
4855 TruncOp = stripBitCast(TruncOp, MRI);
4857 // G_LSHR x, (G_CONSTANT i32 16)
4858 if (TruncOp->getOpcode() == AMDGPU::G_LSHR) {
4859 auto SrlAmount = getIConstantVRegValWithLookThrough(
4860 TruncOp->getOperand(2).getReg(), MRI);
4861 if (SrlAmount && SrlAmount->Value.getZExtValue() == 16) {
4862 MachineInstr *SrlOp =
4863 getDefIgnoringCopies(TruncOp->getOperand(1).getReg(), MRI);
4864 return stripBitCast(SrlOp, MRI);
4868 // G_SHUFFLE_VECTOR x, y, shufflemask(1, 1|0)
4869 // 1, 0 swaps the low/high 16 bits.
4870 // 1, 1 sets the high 16 bits to be the same as the low 16.
4871 // in any case, it selects the high elts.
4872 if (TruncOp->getOpcode() == AMDGPU::G_SHUFFLE_VECTOR) {
4873 assert(MRI.getType(TruncOp->getOperand(0).getReg()) ==
4874 LLT::fixed_vector(2, 16));
4876 ArrayRef<int> Mask = TruncOp->getOperand(3).getShuffleMask();
4877 assert(Mask.size() == 2);
4879 if (Mask[0] == 1 && Mask[1] <= 1) {
4881 getDefIgnoringCopies(TruncOp->getOperand(1).getReg(), MRI);
4882 return stripBitCast(LHS, MRI);
4889 std::pair<Register, unsigned>
4890 AMDGPUInstructionSelector::selectVOP3PMadMixModsImpl(MachineOperand &Root,
4891 bool &Matched) const {
4896 std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
4898 MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
4899 if (MI->getOpcode() == AMDGPU::G_FPEXT) {
4900 MachineOperand *MO = &MI->getOperand(1);
4902 MI = getDefIgnoringCopies(Src, *MRI);
4904 assert(MRI->getType(Src) == LLT::scalar(16));
4906 // See through bitcasts.
4907 // FIXME: Would be nice to use stripBitCast here.
4908 if (MI->getOpcode() == AMDGPU::G_BITCAST) {
4909 MO = &MI->getOperand(1);
4911 MI = getDefIgnoringCopies(Src, *MRI);
4914 const auto CheckAbsNeg = [&]() {
4915 // Be careful about folding modifiers if we already have an abs. fneg is
4916 // applied last, so we don't want to apply an earlier fneg.
4917 if ((Mods & SISrcMods::ABS) == 0) {
4919 std::tie(Src, ModsTmp) = selectVOP3ModsImpl(*MO);
4920 MI = getDefIgnoringCopies(Src, *MRI);
4922 if ((ModsTmp & SISrcMods::NEG) != 0)
4923 Mods ^= SISrcMods::NEG;
4925 if ((ModsTmp & SISrcMods::ABS) != 0)
4926 Mods |= SISrcMods::ABS;
4932 // op_sel/op_sel_hi decide the source type and source.
4933 // If the source's op_sel_hi is set, it indicates to do a conversion from
4934 // fp16. If the sources's op_sel is set, it picks the high half of the
4937 Mods |= SISrcMods::OP_SEL_1;
4939 if (MachineInstr *ExtractHiEltMI = isExtractHiElt(MI, *MRI)) {
4940 Mods |= SISrcMods::OP_SEL_0;
4941 MI = ExtractHiEltMI;
4942 MO = &MI->getOperand(0);
4954 InstructionSelector::ComplexRendererFns
4955 AMDGPUInstructionSelector::selectVOP3PMadMixModsExt(
4956 MachineOperand &Root) const {
4960 std::tie(Src, Mods) = selectVOP3PMadMixModsImpl(Root, Matched);
4965 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
4966 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
4970 InstructionSelector::ComplexRendererFns
4971 AMDGPUInstructionSelector::selectVOP3PMadMixMods(MachineOperand &Root) const {
4975 std::tie(Src, Mods) = selectVOP3PMadMixModsImpl(Root, Matched);
4978 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
4979 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
4983 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4984 const MachineInstr &MI,
4986 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4987 "Expected G_CONSTANT");
4988 MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4991 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4992 const MachineInstr &MI,
4994 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4995 "Expected G_CONSTANT");
4996 MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4999 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
5000 const MachineInstr &MI,
5002 assert(OpIdx == -1);
5004 const MachineOperand &Op = MI.getOperand(1);
5005 if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
5006 MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
5008 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
5009 MIB.addImm(Op.getCImm()->getSExtValue());
5013 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
5014 const MachineInstr &MI,
5016 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
5017 "Expected G_CONSTANT");
5018 MIB.addImm(MI.getOperand(1).getCImm()->getValue().popcount());
5021 /// This only really exists to satisfy DAG type checking machinery, so is a
5023 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
5024 const MachineInstr &MI,
5026 MIB.addImm(MI.getOperand(OpIdx).getImm());
5029 void AMDGPUInstructionSelector::renderOpSelTImm(MachineInstrBuilder &MIB,
5030 const MachineInstr &MI,
5032 assert(OpIdx >= 0 && "expected to match an immediate operand");
5033 MIB.addImm(MI.getOperand(OpIdx).getImm() ? (int64_t)SISrcMods::OP_SEL_0 : 0);
5036 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
5037 const MachineInstr &MI,
5039 assert(OpIdx >= 0 && "expected to match an immediate operand");
5040 MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
5043 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
5044 const MachineInstr &MI,
5046 assert(OpIdx >= 0 && "expected to match an immediate operand");
5047 MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
5050 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
5051 const MachineInstr &MI,
5053 assert(OpIdx >= 0 && "expected to match an immediate operand");
5054 MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
5057 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
5058 const MachineInstr &MI,
5060 MIB.addFrameIndex((MI.getOperand(1).getIndex()));
5063 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
5064 return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
5067 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
5068 return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
5071 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
5072 return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
5075 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
5076 return TII.isInlineConstant(Imm);