1 //===-- SIPeepholeSDWA.cpp - Peephole optimization for SDWA instructions --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// \file This pass tries to apply several peephole SDWA patterns.
13 /// V_LSHRREV_B32_e32 %vreg0, 16, %vreg1
14 /// V_ADD_I32_e32 %vreg2, %vreg0, %vreg3
15 /// V_LSHLREV_B32_e32 %vreg4, 16, %vreg2
18 /// V_ADD_I32_sdwa %vreg4, %vreg1, %vreg3
19 /// dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
21 //===----------------------------------------------------------------------===//
24 #include "AMDGPUSubtarget.h"
25 #include "SIDefines.h"
26 #include "SIInstrInfo.h"
27 #include "llvm/ADT/STLExtras.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/CodeGen/MachineFunctionPass.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include <unordered_map>
32 #include <unordered_set>
36 #define DEBUG_TYPE "si-peephole-sdwa"
38 STATISTIC(NumSDWAPatternsFound, "Number of SDWA patterns found.");
39 STATISTIC(NumSDWAInstructionsPeepholed,
40 "Number of instruction converted to SDWA.");
46 class SIPeepholeSDWA : public MachineFunctionPass {
48 typedef SmallVector<SDWAOperand *, 4> SDWAOperandsVector;
51 MachineRegisterInfo *MRI;
52 const SIRegisterInfo *TRI;
53 const SIInstrInfo *TII;
55 std::unordered_map<MachineInstr *, std::unique_ptr<SDWAOperand>> SDWAOperands;
56 std::unordered_map<MachineInstr *, SDWAOperandsVector> PotentialMatches;
57 SmallVector<MachineInstr *, 8> ConvertedInstructions;
59 Optional<int64_t> foldToImm(const MachineOperand &Op) const;
64 SIPeepholeSDWA() : MachineFunctionPass(ID) {
65 initializeSIPeepholeSDWAPass(*PassRegistry::getPassRegistry());
68 bool runOnMachineFunction(MachineFunction &MF) override;
69 void matchSDWAOperands(MachineFunction &MF);
70 bool isConvertibleToSDWA(const MachineInstr &MI, const SISubtarget &ST) const;
71 bool convertToSDWA(MachineInstr &MI, const SDWAOperandsVector &SDWAOperands);
72 void legalizeScalarOperands(MachineInstr &MI, const SISubtarget &ST) const;
74 StringRef getPassName() const override { return "SI Peephole SDWA"; }
76 void getAnalysisUsage(AnalysisUsage &AU) const override {
78 MachineFunctionPass::getAnalysisUsage(AU);
84 MachineOperand *Target; // Operand that would be used in converted instruction
85 MachineOperand *Replaced; // Operand that would be replace by Target
88 SDWAOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp)
89 : Target(TargetOp), Replaced(ReplacedOp) {
90 assert(Target->isReg());
91 assert(Replaced->isReg());
94 virtual ~SDWAOperand() {}
96 virtual MachineInstr *potentialToConvert(const SIInstrInfo *TII) = 0;
97 virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) = 0;
99 MachineOperand *getTargetOperand() const { return Target; }
100 MachineOperand *getReplacedOperand() const { return Replaced; }
101 MachineInstr *getParentInst() const { return Target->getParent(); }
102 MachineRegisterInfo *getMRI() const {
103 return &getParentInst()->getParent()->getParent()->getRegInfo();
107 using namespace AMDGPU::SDWA;
109 class SDWASrcOperand : public SDWAOperand {
117 SDWASrcOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
118 SdwaSel SrcSel_ = DWORD, bool Abs_ = false, bool Neg_ = false,
120 : SDWAOperand(TargetOp, ReplacedOp), SrcSel(SrcSel_), Abs(Abs_),
121 Neg(Neg_), Sext(Sext_) {}
123 virtual MachineInstr *potentialToConvert(const SIInstrInfo *TII) override;
124 virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
126 SdwaSel getSrcSel() const { return SrcSel; }
127 bool getAbs() const { return Abs; }
128 bool getNeg() const { return Neg; }
129 bool getSext() const { return Sext; }
131 uint64_t getSrcMods(const SIInstrInfo *TII,
132 const MachineOperand *SrcOp) const;
135 class SDWADstOperand : public SDWAOperand {
141 SDWADstOperand(MachineOperand *TargetOp, MachineOperand *ReplacedOp,
142 SdwaSel DstSel_ = DWORD, DstUnused DstUn_ = UNUSED_PAD)
143 : SDWAOperand(TargetOp, ReplacedOp), DstSel(DstSel_), DstUn(DstUn_) {}
145 virtual MachineInstr *potentialToConvert(const SIInstrInfo *TII) override;
146 virtual bool convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) override;
148 SdwaSel getDstSel() const { return DstSel; }
149 DstUnused getDstUnused() const { return DstUn; }
152 } // End anonymous namespace.
154 INITIALIZE_PASS(SIPeepholeSDWA, DEBUG_TYPE, "SI Peephole SDWA", false, false)
156 char SIPeepholeSDWA::ID = 0;
158 char &llvm::SIPeepholeSDWAID = SIPeepholeSDWA::ID;
160 FunctionPass *llvm::createSIPeepholeSDWAPass() {
161 return new SIPeepholeSDWA();
166 static raw_ostream& operator<<(raw_ostream &OS, const SdwaSel &Sel) {
168 case BYTE_0: OS << "BYTE_0"; break;
169 case BYTE_1: OS << "BYTE_1"; break;
170 case BYTE_2: OS << "BYTE_2"; break;
171 case BYTE_3: OS << "BYTE_3"; break;
172 case WORD_0: OS << "WORD_0"; break;
173 case WORD_1: OS << "WORD_1"; break;
174 case DWORD: OS << "DWORD"; break;
179 static raw_ostream& operator<<(raw_ostream &OS, const DstUnused &Un) {
181 case UNUSED_PAD: OS << "UNUSED_PAD"; break;
182 case UNUSED_SEXT: OS << "UNUSED_SEXT"; break;
183 case UNUSED_PRESERVE: OS << "UNUSED_PRESERVE"; break;
188 static raw_ostream& operator<<(raw_ostream &OS, const SDWASrcOperand &Src) {
189 OS << "SDWA src: " << *Src.getTargetOperand()
190 << " src_sel:" << Src.getSrcSel()
191 << " abs:" << Src.getAbs() << " neg:" << Src.getNeg()
192 << " sext:" << Src.getSext() << '\n';
196 static raw_ostream& operator<<(raw_ostream &OS, const SDWADstOperand &Dst) {
197 OS << "SDWA dst: " << *Dst.getTargetOperand()
198 << " dst_sel:" << Dst.getDstSel()
199 << " dst_unused:" << Dst.getDstUnused() << '\n';
205 static void copyRegOperand(MachineOperand &To, const MachineOperand &From) {
206 assert(To.isReg() && From.isReg());
207 To.setReg(From.getReg());
208 To.setSubReg(From.getSubReg());
209 To.setIsUndef(From.isUndef());
211 To.setIsKill(From.isKill());
213 To.setIsDead(From.isDead());
217 static bool isSameReg(const MachineOperand &LHS, const MachineOperand &RHS) {
218 return LHS.isReg() &&
220 LHS.getReg() == RHS.getReg() &&
221 LHS.getSubReg() == RHS.getSubReg();
224 static bool isSubregOf(const MachineOperand &SubReg,
225 const MachineOperand &SuperReg,
226 const TargetRegisterInfo *TRI) {
228 if (!SuperReg.isReg() || !SubReg.isReg())
231 if (isSameReg(SuperReg, SubReg))
234 if (SuperReg.getReg() != SubReg.getReg())
237 LaneBitmask SuperMask = TRI->getSubRegIndexLaneMask(SuperReg.getSubReg());
238 LaneBitmask SubMask = TRI->getSubRegIndexLaneMask(SubReg.getSubReg());
239 SuperMask |= ~SubMask;
240 return SuperMask.all();
243 uint64_t SDWASrcOperand::getSrcMods(const SIInstrInfo *TII,
244 const MachineOperand *SrcOp) const {
246 const auto *MI = SrcOp->getParent();
247 if (TII->getNamedOperand(*MI, AMDGPU::OpName::src0) == SrcOp) {
248 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) {
249 Mods = Mod->getImm();
251 } else if (TII->getNamedOperand(*MI, AMDGPU::OpName::src1) == SrcOp) {
252 if (auto *Mod = TII->getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers)) {
253 Mods = Mod->getImm();
258 "Float and integer src modifiers can't be set simulteniously");
259 Mods |= Abs ? SISrcMods::ABS : 0;
260 Mods ^= Neg ? SISrcMods::NEG : 0;
262 Mods |= SISrcMods::SEXT;
268 MachineInstr *SDWASrcOperand::potentialToConvert(const SIInstrInfo *TII) {
269 // For SDWA src operand potential instruction is one that use register
270 // defined by parent instruction
271 MachineRegisterInfo *MRI = getMRI();
272 MachineOperand *Replaced = getReplacedOperand();
273 assert(Replaced->isReg());
275 MachineInstr *PotentialMI = nullptr;
276 for (MachineOperand &PotentialMO : MRI->use_operands(Replaced->getReg())) {
277 // If this is use of another subreg of dst reg then do nothing
278 if (!isSubregOf(*Replaced, PotentialMO, MRI->getTargetRegisterInfo()))
281 // If there exist use of superreg of dst then we should not combine this
283 if (!isSameReg(PotentialMO, *Replaced))
286 // Check that PotentialMI is only instruction that uses dst reg
287 if (PotentialMI == nullptr) {
288 PotentialMI = PotentialMO.getParent();
289 } else if (PotentialMI != PotentialMO.getParent()) {
297 bool SDWASrcOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
298 // Find operand in instruction that matches source operand and replace it with
299 // target operand. Set corresponding src_sel
301 MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
302 MachineOperand *SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src0_sel);
303 MachineOperand *SrcMods =
304 TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers);
305 assert(Src && (Src->isReg() || Src->isImm()));
306 if (!isSameReg(*Src, *getReplacedOperand())) {
307 // If this is not src0 then it should be src1
308 Src = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
309 SrcSel = TII->getNamedOperand(MI, AMDGPU::OpName::src1_sel);
310 SrcMods = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
312 assert(Src && Src->isReg());
314 if ((MI.getOpcode() == AMDGPU::V_MAC_F16_sdwa ||
315 MI.getOpcode() == AMDGPU::V_MAC_F32_sdwa) &&
316 !isSameReg(*Src, *getReplacedOperand())) {
317 // In case of v_mac_f16/32_sdwa this pass can try to apply src operand to
318 // src2. This is not allowed.
322 assert(isSameReg(*Src, *getReplacedOperand()) && SrcSel && SrcMods);
324 copyRegOperand(*Src, *getTargetOperand());
325 SrcSel->setImm(getSrcSel());
326 SrcMods->setImm(getSrcMods(TII, Src));
327 getTargetOperand()->setIsKill(false);
331 MachineInstr *SDWADstOperand::potentialToConvert(const SIInstrInfo *TII) {
332 // For SDWA dst operand potential instruction is one that defines register
333 // that this operand uses
334 MachineRegisterInfo *MRI = getMRI();
335 MachineInstr *ParentMI = getParentInst();
336 MachineOperand *Replaced = getReplacedOperand();
337 assert(Replaced->isReg());
339 for (MachineOperand &PotentialMO : MRI->def_operands(Replaced->getReg())) {
340 if (!isSubregOf(*Replaced, PotentialMO, MRI->getTargetRegisterInfo()))
343 if (!isSameReg(*Replaced, PotentialMO))
346 // Check that ParentMI is the only instruction that uses replaced register
347 for (MachineOperand &UseMO : MRI->use_operands(PotentialMO.getReg())) {
348 if (isSubregOf(UseMO, PotentialMO, MRI->getTargetRegisterInfo()) &&
349 UseMO.getParent() != ParentMI) {
354 // Due to SSA this should be onle def of replaced register, so return it
355 return PotentialMO.getParent();
361 bool SDWADstOperand::convertToSDWA(MachineInstr &MI, const SIInstrInfo *TII) {
362 // Replace vdst operand in MI with target operand. Set dst_sel and dst_unused
364 if ((MI.getOpcode() == AMDGPU::V_MAC_F16_sdwa ||
365 MI.getOpcode() == AMDGPU::V_MAC_F32_sdwa) &&
366 getDstSel() != AMDGPU::SDWA::DWORD) {
367 // v_mac_f16/32_sdwa allow dst_sel to be equal only to DWORD
371 MachineOperand *Operand = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
374 isSameReg(*Operand, *getReplacedOperand()));
375 copyRegOperand(*Operand, *getTargetOperand());
376 MachineOperand *DstSel= TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel);
378 DstSel->setImm(getDstSel());
379 MachineOperand *DstUnused= TII->getNamedOperand(MI, AMDGPU::OpName::dst_unused);
381 DstUnused->setImm(getDstUnused());
383 // Remove original instruction because it would conflict with our new
384 // instruction by register definition
385 getParentInst()->eraseFromParent();
389 Optional<int64_t> SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const {
394 // If this is not immediate then it can be copy of immediate value, e.g.:
395 // %vreg1<def> = S_MOV_B32 255;
397 for (const MachineOperand &Def : MRI->def_operands(Op.getReg())) {
398 if (!isSameReg(Op, Def))
401 const MachineInstr *DefInst = Def.getParent();
402 if (!TII->isFoldableCopy(*DefInst))
405 const MachineOperand &Copied = DefInst->getOperand(1);
409 return Copied.getImm();
416 void SIPeepholeSDWA::matchSDWAOperands(MachineFunction &MF) {
417 for (MachineBasicBlock &MBB : MF) {
418 for (MachineInstr &MI : MBB) {
419 unsigned Opcode = MI.getOpcode();
421 case AMDGPU::V_LSHRREV_B32_e32:
422 case AMDGPU::V_ASHRREV_I32_e32:
423 case AMDGPU::V_LSHLREV_B32_e32:
424 case AMDGPU::V_LSHRREV_B32_e64:
425 case AMDGPU::V_ASHRREV_I32_e64:
426 case AMDGPU::V_LSHLREV_B32_e64: {
427 // from: v_lshrrev_b32_e32 v1, 16/24, v0
428 // to SDWA src:v0 src_sel:WORD_1/BYTE_3
430 // from: v_ashrrev_i32_e32 v1, 16/24, v0
431 // to SDWA src:v0 src_sel:WORD_1/BYTE_3 sext:1
433 // from: v_lshlrev_b32_e32 v1, 16/24, v0
434 // to SDWA dst:v1 dst_sel:WORD_1/BYTE_3 dst_unused:UNUSED_PAD
435 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
436 auto Imm = foldToImm(*Src0);
440 if (*Imm != 16 && *Imm != 24)
443 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
444 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
445 if (TRI->isPhysicalRegister(Src1->getReg()) ||
446 TRI->isPhysicalRegister(Dst->getReg()))
449 if (Opcode == AMDGPU::V_LSHLREV_B32_e32 ||
450 Opcode == AMDGPU::V_LSHLREV_B32_e64) {
451 auto SDWADst = make_unique<SDWADstOperand>(
452 Dst, Src1, *Imm == 16 ? WORD_1 : BYTE_3, UNUSED_PAD);
453 DEBUG(dbgs() << "Match: " << MI << "To: " << *SDWADst << '\n');
454 SDWAOperands[&MI] = std::move(SDWADst);
455 ++NumSDWAPatternsFound;
457 auto SDWASrc = make_unique<SDWASrcOperand>(
458 Src1, Dst, *Imm == 16 ? WORD_1 : BYTE_3, false, false,
459 Opcode != AMDGPU::V_LSHRREV_B32_e32 &&
460 Opcode != AMDGPU::V_LSHRREV_B32_e64);
461 DEBUG(dbgs() << "Match: " << MI << "To: " << *SDWASrc << '\n');
462 SDWAOperands[&MI] = std::move(SDWASrc);
463 ++NumSDWAPatternsFound;
468 case AMDGPU::V_LSHRREV_B16_e32:
469 case AMDGPU::V_ASHRREV_I16_e32:
470 case AMDGPU::V_LSHLREV_B16_e32:
471 case AMDGPU::V_LSHRREV_B16_e64:
472 case AMDGPU::V_ASHRREV_I16_e64:
473 case AMDGPU::V_LSHLREV_B16_e64: {
474 // from: v_lshrrev_b16_e32 v1, 8, v0
475 // to SDWA src:v0 src_sel:BYTE_1
477 // from: v_ashrrev_i16_e32 v1, 8, v0
478 // to SDWA src:v0 src_sel:BYTE_1 sext:1
480 // from: v_lshlrev_b16_e32 v1, 8, v0
481 // to SDWA dst:v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD
482 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
483 auto Imm = foldToImm(*Src0);
484 if (!Imm || *Imm != 8)
487 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
488 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
490 if (TRI->isPhysicalRegister(Src1->getReg()) ||
491 TRI->isPhysicalRegister(Dst->getReg()))
494 if (Opcode == AMDGPU::V_LSHLREV_B16_e32 ||
495 Opcode == AMDGPU::V_LSHLREV_B16_e64) {
497 make_unique<SDWADstOperand>(Dst, Src1, BYTE_1, UNUSED_PAD);
498 DEBUG(dbgs() << "Match: " << MI << "To: " << *SDWADst << '\n');
499 SDWAOperands[&MI] = std::move(SDWADst);
500 ++NumSDWAPatternsFound;
502 auto SDWASrc = make_unique<SDWASrcOperand>(
503 Src1, Dst, BYTE_1, false, false,
504 Opcode != AMDGPU::V_LSHRREV_B16_e32 &&
505 Opcode != AMDGPU::V_LSHRREV_B16_e64);
506 DEBUG(dbgs() << "Match: " << MI << "To: " << *SDWASrc << '\n');
507 SDWAOperands[&MI] = std::move(SDWASrc);
508 ++NumSDWAPatternsFound;
513 case AMDGPU::V_BFE_I32:
514 case AMDGPU::V_BFE_U32: {
516 // from: v_bfe_u32 v1, v0, 8, 8
517 // to SDWA src:v0 src_sel:BYTE_1
519 // offset | width | src_sel
520 // ------------------------
529 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
530 auto Offset = foldToImm(*Src1);
534 MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
535 auto Width = foldToImm(*Src2);
539 SdwaSel SrcSel = DWORD;
541 if (*Offset == 0 && *Width == 8)
543 else if (*Offset == 0 && *Width == 16)
545 else if (*Offset == 0 && *Width == 32)
547 else if (*Offset == 8 && *Width == 8)
549 else if (*Offset == 16 && *Width == 8)
551 else if (*Offset == 16 && *Width == 16)
553 else if (*Offset == 24 && *Width == 8)
558 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
559 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
561 if (TRI->isPhysicalRegister(Src0->getReg()) ||
562 TRI->isPhysicalRegister(Dst->getReg()))
565 auto SDWASrc = make_unique<SDWASrcOperand>(
566 Src0, Dst, SrcSel, false, false,
567 Opcode == AMDGPU::V_BFE_U32 ? false : true);
568 DEBUG(dbgs() << "Match: " << MI << "To: " << *SDWASrc << '\n');
569 SDWAOperands[&MI] = std::move(SDWASrc);
570 ++NumSDWAPatternsFound;
573 case AMDGPU::V_AND_B32_e32:
574 case AMDGPU::V_AND_B32_e64: {
576 // from: v_and_b32_e32 v1, 0x0000ffff/0x000000ff, v0
577 // to SDWA src:v0 src_sel:WORD_0/BYTE_0
579 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
580 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
582 auto Imm = foldToImm(*Src0);
585 Imm = foldToImm(*Src1);
589 if (!Imm || (*Imm != 0x0000ffff && *Imm != 0x000000ff))
592 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
594 if (TRI->isPhysicalRegister(Src1->getReg()) ||
595 TRI->isPhysicalRegister(Dst->getReg()))
598 auto SDWASrc = make_unique<SDWASrcOperand>(
599 ValSrc, Dst, *Imm == 0x0000ffff ? WORD_0 : BYTE_0);
600 DEBUG(dbgs() << "Match: " << MI << "To: " << *SDWASrc << '\n');
601 SDWAOperands[&MI] = std::move(SDWASrc);
602 ++NumSDWAPatternsFound;
610 bool SIPeepholeSDWA::isConvertibleToSDWA(const MachineInstr &MI,
611 const SISubtarget &ST) const {
612 // Check if this instruction has opcode that supports SDWA
613 int Opc = MI.getOpcode();
614 if (AMDGPU::getSDWAOp(Opc) == -1)
615 Opc = AMDGPU::getVOPe32(Opc);
617 if (Opc == -1 || AMDGPU::getSDWAOp(Opc) == -1)
620 if (!ST.hasSDWAOmod() && TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
623 if (TII->isVOPC(Opc)) {
624 if (!ST.hasSDWASdst()) {
625 const MachineOperand *SDst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
626 if (SDst && SDst->getReg() != AMDGPU::VCC)
630 if (!ST.hasSDWAOutModsVOPC() &&
631 (TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) ||
632 TII->hasModifiersSet(MI, AMDGPU::OpName::omod)))
635 } else if (TII->getNamedOperand(MI, AMDGPU::OpName::sdst) ||
636 !TII->getNamedOperand(MI, AMDGPU::OpName::vdst)) {
640 if (!ST.hasSDWAMac() && (Opc == AMDGPU::V_MAC_F16_e32 ||
641 Opc == AMDGPU::V_MAC_F32_e32))
647 bool SIPeepholeSDWA::convertToSDWA(MachineInstr &MI,
648 const SDWAOperandsVector &SDWAOperands) {
650 int SDWAOpcode = AMDGPU::getSDWAOp(MI.getOpcode());
651 if (SDWAOpcode == -1)
652 SDWAOpcode = AMDGPU::getSDWAOp(AMDGPU::getVOPe32(MI.getOpcode()));
653 assert(SDWAOpcode != -1);
655 const MCInstrDesc &SDWADesc = TII->get(SDWAOpcode);
657 // Create SDWA version of instruction MI and initialize its operands
658 MachineInstrBuilder SDWAInst =
659 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), SDWADesc);
661 // Copy dst, if it is present in original then should also be present in SDWA
662 MachineOperand *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst);
664 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::vdst) != -1);
666 } else if ((Dst = TII->getNamedOperand(MI, AMDGPU::OpName::sdst))) {
668 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1);
671 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::sdst) != -1);
672 SDWAInst.addReg(AMDGPU::VCC, RegState::Define);
675 // Copy src0, initialize src0_modifiers. All sdwa instructions has src0 and
676 // src0_modifiers (except for v_nop_sdwa, but it can't get here)
677 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
680 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0) != -1 &&
681 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0_modifiers) != -1);
682 if (auto *Mod = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers))
683 SDWAInst.addImm(Mod->getImm());
688 // Copy src1 if present, initialize src1_modifiers.
689 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
692 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1) != -1 &&
693 AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1_modifiers) != -1);
694 if (auto *Mod = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers))
695 SDWAInst.addImm(Mod->getImm());
701 if (SDWAOpcode == AMDGPU::V_MAC_F16_sdwa ||
702 SDWAOpcode == AMDGPU::V_MAC_F32_sdwa) {
703 // v_mac_f16/32 has additional src2 operand tied to vdst
704 MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
709 // Copy clamp if present, initialize otherwise
710 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::clamp) != -1);
711 MachineOperand *Clamp = TII->getNamedOperand(MI, AMDGPU::OpName::clamp);
713 SDWAInst.add(*Clamp);
718 // Copy omod if present, initialize otherwise if needed
719 if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::omod) != -1) {
720 MachineOperand *OMod = TII->getNamedOperand(MI, AMDGPU::OpName::omod);
728 // Initialize dst_sel if present
729 if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::dst_sel) != -1) {
730 SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
733 // Initialize dst_unused if present
734 if (AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::dst_unused) != -1) {
735 SDWAInst.addImm(AMDGPU::SDWA::DstUnused::UNUSED_PAD);
738 // Initialize src0_sel
739 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src0_sel) != -1);
740 SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
743 // Initialize src1_sel if present
745 assert(AMDGPU::getNamedOperandIdx(SDWAOpcode, AMDGPU::OpName::src1_sel) != -1);
746 SDWAInst.addImm(AMDGPU::SDWA::SdwaSel::DWORD);
749 // Apply all sdwa operand pattenrs
750 bool Converted = false;
751 for (auto &Operand : SDWAOperands) {
752 // There should be no intesection between SDWA operands and potential MIs
754 // v_and_b32 v0, 0xff, v1 -> src:v1 sel:BYTE_0
755 // v_and_b32 v2, 0xff, v0 -> src:v0 sel:BYTE_0
756 // v_add_u32 v3, v4, v2
758 // In that example it is possible that we would fold 2nd instruction into 3rd
759 // (v_add_u32_sdwa) and then try to fold 1st instruction into 2nd (that was
760 // already destroyed). So if SDWAOperand is also a potential MI then do not
762 if (PotentialMatches.count(Operand->getParentInst()) == 0)
763 Converted |= Operand->convertToSDWA(*SDWAInst, TII);
766 ConvertedInstructions.push_back(SDWAInst);
768 SDWAInst->eraseFromParent();
772 DEBUG(dbgs() << "Convert instruction:" << MI
773 << "Into:" << *SDWAInst << '\n');
774 ++NumSDWAInstructionsPeepholed;
776 MI.eraseFromParent();
780 // If an instruction was converted to SDWA it should not have immediates or SGPR
781 // operands (allowed one SGPR on GFX9). Copy its scalar operands into VGPRs.
782 void SIPeepholeSDWA::legalizeScalarOperands(MachineInstr &MI, const SISubtarget &ST) const {
783 const MCInstrDesc &Desc = TII->get(MI.getOpcode());
784 unsigned ConstantBusCount = 0;
785 for (MachineOperand &Op: MI.explicit_uses()) {
786 if (!Op.isImm() && !(Op.isReg() && !TRI->isVGPR(*MRI, Op.getReg())))
789 unsigned I = MI.getOperandNo(&Op);
790 if (Desc.OpInfo[I].RegClass == -1 ||
791 !TRI->hasVGPRs(TRI->getRegClass(Desc.OpInfo[I].RegClass)))
794 if (ST.hasSDWAScalar() && ConstantBusCount == 0 && Op.isReg() &&
795 TRI->isSGPRReg(*MRI, Op.getReg())) {
800 unsigned VGPR = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
801 auto Copy = BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
802 TII->get(AMDGPU::V_MOV_B32_e32), VGPR);
804 Copy.addImm(Op.getImm());
806 Copy.addReg(Op.getReg(), Op.isKill() ? RegState::Kill : 0,
808 Op.ChangeToRegister(VGPR, false);
812 bool SIPeepholeSDWA::runOnMachineFunction(MachineFunction &MF) {
813 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
818 MRI = &MF.getRegInfo();
819 TRI = ST.getRegisterInfo();
820 TII = ST.getInstrInfo();
822 // Find all SDWA operands in MF.
823 matchSDWAOperands(MF);
825 for (const auto &OperandPair : SDWAOperands) {
826 const auto &Operand = OperandPair.second;
827 MachineInstr *PotentialMI = Operand->potentialToConvert(TII);
828 if (PotentialMI && isConvertibleToSDWA(*PotentialMI, ST)) {
829 PotentialMatches[PotentialMI].push_back(Operand.get());
833 for (auto &PotentialPair : PotentialMatches) {
834 MachineInstr &PotentialMI = *PotentialPair.first;
835 convertToSDWA(PotentialMI, PotentialPair.second);
838 PotentialMatches.clear();
839 SDWAOperands.clear();
841 bool Ret = !ConvertedInstructions.empty();
842 while (!ConvertedInstructions.empty())
843 legalizeScalarOperands(*ConvertedInstructions.pop_back_val(), ST);