1 //===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 /// The pass tries to use the 32-bit encoding for instructions when possible.
8 //===----------------------------------------------------------------------===//
12 #include "GCNSubtarget.h"
13 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
14 #include "Utils/AMDGPUBaseInfo.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/CodeGen/MachineFunctionPass.h"
18 #define DEBUG_TYPE "si-shrink-instructions"
20 STATISTIC(NumInstructionsShrunk,
21 "Number of 64-bit instruction reduced to 32-bit.");
22 STATISTIC(NumLiteralConstantsFolded,
23 "Number of literal constants folded into 32-bit instructions.");
29 class SIShrinkInstructions : public MachineFunctionPass {
31 MachineRegisterInfo *MRI;
32 const GCNSubtarget *ST;
33 const SIInstrInfo *TII;
34 const SIRegisterInfo *TRI;
40 SIShrinkInstructions() : MachineFunctionPass(ID) {
43 bool foldImmediates(MachineInstr &MI, bool TryToCommute = true) const;
44 bool shouldShrinkTrue16(MachineInstr &MI) const;
45 bool isKImmOperand(const MachineOperand &Src) const;
46 bool isKUImmOperand(const MachineOperand &Src) const;
47 bool isKImmOrKUImmOperand(const MachineOperand &Src, bool &IsUnsigned) const;
48 bool isReverseInlineImm(const MachineOperand &Src, int32_t &ReverseImm) const;
49 void copyExtraImplicitOps(MachineInstr &NewMI, MachineInstr &MI) const;
50 void shrinkScalarCompare(MachineInstr &MI) const;
51 void shrinkMIMG(MachineInstr &MI) const;
52 void shrinkMadFma(MachineInstr &MI) const;
53 bool shrinkScalarLogicOp(MachineInstr &MI) const;
54 bool tryReplaceDeadSDST(MachineInstr &MI) const;
55 bool instAccessReg(iterator_range<MachineInstr::const_mop_iterator> &&R,
56 Register Reg, unsigned SubReg) const;
57 bool instReadsReg(const MachineInstr *MI, unsigned Reg,
58 unsigned SubReg) const;
59 bool instModifiesReg(const MachineInstr *MI, unsigned Reg,
60 unsigned SubReg) const;
61 TargetInstrInfo::RegSubRegPair getSubRegForIndex(Register Reg, unsigned Sub,
63 void dropInstructionKeepingImpDefs(MachineInstr &MI) const;
64 MachineInstr *matchSwap(MachineInstr &MovT) const;
66 bool runOnMachineFunction(MachineFunction &MF) override;
68 StringRef getPassName() const override { return "SI Shrink Instructions"; }
70 void getAnalysisUsage(AnalysisUsage &AU) const override {
72 MachineFunctionPass::getAnalysisUsage(AU);
76 } // End anonymous namespace.
78 INITIALIZE_PASS(SIShrinkInstructions, DEBUG_TYPE,
79 "SI Shrink Instructions", false, false)
81 char SIShrinkInstructions::ID = 0;
83 FunctionPass *llvm::createSIShrinkInstructionsPass() {
84 return new SIShrinkInstructions();
87 /// This function checks \p MI for operands defined by a move immediate
88 /// instruction and then folds the literal constant into the instruction if it
89 /// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instructions.
90 bool SIShrinkInstructions::foldImmediates(MachineInstr &MI,
91 bool TryToCommute) const {
92 assert(TII->isVOP1(MI) || TII->isVOP2(MI) || TII->isVOPC(MI));
94 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
97 MachineOperand &Src0 = MI.getOperand(Src0Idx);
99 Register Reg = Src0.getReg();
100 if (Reg.isVirtual()) {
101 MachineInstr *Def = MRI->getUniqueVRegDef(Reg);
102 if (Def && Def->isMoveImmediate()) {
103 MachineOperand &MovSrc = Def->getOperand(1);
104 bool ConstantFolded = false;
106 if (TII->isOperandLegal(MI, Src0Idx, &MovSrc)) {
107 if (MovSrc.isImm() &&
108 (isInt<32>(MovSrc.getImm()) || isUInt<32>(MovSrc.getImm()))) {
109 Src0.ChangeToImmediate(MovSrc.getImm());
110 ConstantFolded = true;
111 } else if (MovSrc.isFI()) {
112 Src0.ChangeToFrameIndex(MovSrc.getIndex());
113 ConstantFolded = true;
114 } else if (MovSrc.isGlobal()) {
115 Src0.ChangeToGA(MovSrc.getGlobal(), MovSrc.getOffset(),
116 MovSrc.getTargetFlags());
117 ConstantFolded = true;
121 if (ConstantFolded) {
122 if (MRI->use_nodbg_empty(Reg))
123 Def->eraseFromParent();
124 ++NumLiteralConstantsFolded;
131 // We have failed to fold src0, so commute the instruction and try again.
132 if (TryToCommute && MI.isCommutable()) {
133 if (TII->commuteInstruction(MI)) {
134 if (foldImmediates(MI, false))
138 TII->commuteInstruction(MI);
145 /// Do not shrink the instruction if its registers are not expressible in the
147 bool SIShrinkInstructions::shouldShrinkTrue16(MachineInstr &MI) const {
148 for (unsigned I = 0, E = MI.getNumExplicitOperands(); I != E; ++I) {
149 const MachineOperand &MO = MI.getOperand(I);
151 Register Reg = MO.getReg();
152 assert(!Reg.isVirtual() && "Prior checks should ensure we only shrink "
153 "True16 Instructions post-RA");
154 if (AMDGPU::VGPR_32RegClass.contains(Reg) &&
155 !AMDGPU::VGPR_32_Lo128RegClass.contains(Reg))
162 bool SIShrinkInstructions::isKImmOperand(const MachineOperand &Src) const {
163 return isInt<16>(Src.getImm()) &&
164 !TII->isInlineConstant(*Src.getParent(),
165 Src.getParent()->getOperandNo(&Src));
168 bool SIShrinkInstructions::isKUImmOperand(const MachineOperand &Src) const {
169 return isUInt<16>(Src.getImm()) &&
170 !TII->isInlineConstant(*Src.getParent(),
171 Src.getParent()->getOperandNo(&Src));
174 bool SIShrinkInstructions::isKImmOrKUImmOperand(const MachineOperand &Src,
175 bool &IsUnsigned) const {
176 if (isInt<16>(Src.getImm())) {
178 return !TII->isInlineConstant(Src);
181 if (isUInt<16>(Src.getImm())) {
183 return !TII->isInlineConstant(Src);
189 /// \returns true if the constant in \p Src should be replaced with a bitreverse
190 /// of an inline immediate.
191 bool SIShrinkInstructions::isReverseInlineImm(const MachineOperand &Src,
192 int32_t &ReverseImm) const {
193 if (!isInt<32>(Src.getImm()) || TII->isInlineConstant(Src))
196 ReverseImm = reverseBits<int32_t>(static_cast<int32_t>(Src.getImm()));
197 return ReverseImm >= -16 && ReverseImm <= 64;
200 /// Copy implicit register operands from specified instruction to this
201 /// instruction that are not part of the instruction definition.
202 void SIShrinkInstructions::copyExtraImplicitOps(MachineInstr &NewMI,
203 MachineInstr &MI) const {
204 MachineFunction &MF = *MI.getMF();
205 for (unsigned i = MI.getDesc().getNumOperands() +
206 MI.getDesc().implicit_uses().size() +
207 MI.getDesc().implicit_defs().size(),
208 e = MI.getNumOperands();
210 const MachineOperand &MO = MI.getOperand(i);
211 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
212 NewMI.addOperand(MF, MO);
216 void SIShrinkInstructions::shrinkScalarCompare(MachineInstr &MI) const {
217 // cmpk instructions do scc = dst <cc op> imm16, so commute the instruction to
218 // get constants on the RHS.
219 if (!MI.getOperand(0).isReg())
220 TII->commuteInstruction(MI, false, 0, 1);
222 // cmpk requires src0 to be a register
223 const MachineOperand &Src0 = MI.getOperand(0);
227 const MachineOperand &Src1 = MI.getOperand(1);
231 int SOPKOpc = AMDGPU::getSOPKOp(MI.getOpcode());
235 // eq/ne is special because the imm16 can be treated as signed or unsigned,
236 // and initially selected to the unsigned versions.
237 if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) {
239 if (isKImmOrKUImmOperand(Src1, HasUImm)) {
241 SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ?
242 AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32;
245 MI.setDesc(TII->get(SOPKOpc));
251 const MCInstrDesc &NewDesc = TII->get(SOPKOpc);
253 if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(Src1)) ||
254 (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(Src1))) {
259 // Shrink NSA encoded instructions with contiguous VGPRs to non-NSA encoding.
260 void SIShrinkInstructions::shrinkMIMG(MachineInstr &MI) const {
261 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(MI.getOpcode());
266 switch (Info->MIMGEncoding) {
267 case AMDGPU::MIMGEncGfx10NSA:
268 NewEncoding = AMDGPU::MIMGEncGfx10Default;
270 case AMDGPU::MIMGEncGfx11NSA:
271 NewEncoding = AMDGPU::MIMGEncGfx11Default;
278 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vaddr0);
279 unsigned NewAddrDwords = Info->VAddrDwords;
280 const TargetRegisterClass *RC;
282 if (Info->VAddrDwords == 2) {
283 RC = &AMDGPU::VReg_64RegClass;
284 } else if (Info->VAddrDwords == 3) {
285 RC = &AMDGPU::VReg_96RegClass;
286 } else if (Info->VAddrDwords == 4) {
287 RC = &AMDGPU::VReg_128RegClass;
288 } else if (Info->VAddrDwords == 5) {
289 RC = &AMDGPU::VReg_160RegClass;
290 } else if (Info->VAddrDwords == 6) {
291 RC = &AMDGPU::VReg_192RegClass;
292 } else if (Info->VAddrDwords == 7) {
293 RC = &AMDGPU::VReg_224RegClass;
294 } else if (Info->VAddrDwords == 8) {
295 RC = &AMDGPU::VReg_256RegClass;
296 } else if (Info->VAddrDwords == 9) {
297 RC = &AMDGPU::VReg_288RegClass;
298 } else if (Info->VAddrDwords == 10) {
299 RC = &AMDGPU::VReg_320RegClass;
300 } else if (Info->VAddrDwords == 11) {
301 RC = &AMDGPU::VReg_352RegClass;
302 } else if (Info->VAddrDwords == 12) {
303 RC = &AMDGPU::VReg_384RegClass;
305 RC = &AMDGPU::VReg_512RegClass;
309 unsigned VgprBase = 0;
310 unsigned NextVgpr = 0;
312 bool IsKill = NewAddrDwords == Info->VAddrDwords;
313 for (unsigned Idx = 0; Idx < Info->VAddrOperands; ++Idx) {
314 const MachineOperand &Op = MI.getOperand(VAddr0Idx + Idx);
315 unsigned Vgpr = TRI->getHWRegIndex(Op.getReg());
316 unsigned Dwords = TRI->getRegSizeInBits(Op.getReg(), *MRI) / 32;
317 assert(Dwords > 0 && "Un-implemented for less than 32 bit regs");
321 NextVgpr = Vgpr + Dwords;
322 } else if (Vgpr == NextVgpr) {
323 NextVgpr = Vgpr + Dwords;
334 if (VgprBase + NewAddrDwords > 256)
337 // Further check for implicit tied operands - this may be present if TFE is
339 int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::tfe);
340 int LWEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::lwe);
341 unsigned TFEVal = (TFEIdx == -1) ? 0 : MI.getOperand(TFEIdx).getImm();
342 unsigned LWEVal = (LWEIdx == -1) ? 0 : MI.getOperand(LWEIdx).getImm();
344 if (TFEVal || LWEVal) {
345 // TFE/LWE is enabled so we need to deal with an implicit tied operand
346 for (unsigned i = LWEIdx + 1, e = MI.getNumOperands(); i != e; ++i) {
347 if (MI.getOperand(i).isReg() && MI.getOperand(i).isTied() &&
348 MI.getOperand(i).isImplicit()) {
349 // This is the tied operand
352 "found more than one tied implicit operand when expecting only 1");
354 MI.untieRegOperand(ToUntie);
359 unsigned NewOpcode = AMDGPU::getMIMGOpcode(Info->BaseOpcode, NewEncoding,
360 Info->VDataDwords, NewAddrDwords);
361 MI.setDesc(TII->get(NewOpcode));
362 MI.getOperand(VAddr0Idx).setReg(RC->getRegister(VgprBase));
363 MI.getOperand(VAddr0Idx).setIsUndef(IsUndef);
364 MI.getOperand(VAddr0Idx).setIsKill(IsKill);
366 for (int i = 1; i < Info->VAddrOperands; ++i)
367 MI.removeOperand(VAddr0Idx + 1);
371 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata),
372 ToUntie - (Info->VAddrOperands - 1));
376 // Shrink MAD to MADAK/MADMK and FMA to FMAAK/FMAMK.
377 void SIShrinkInstructions::shrinkMadFma(MachineInstr &MI) const {
378 // Pre-GFX10 VOP3 instructions like MAD/FMA cannot take a literal operand so
379 // there is no reason to try to shrink them.
380 if (!ST->hasVOP3Literal())
383 // There is no advantage to doing this pre-RA.
384 if (!MF->getProperties().hasProperty(
385 MachineFunctionProperties::Property::NoVRegs))
388 if (TII->hasAnyModifiersSet(MI))
391 const unsigned Opcode = MI.getOpcode();
392 MachineOperand &Src0 = *TII->getNamedOperand(MI, AMDGPU::OpName::src0);
393 MachineOperand &Src1 = *TII->getNamedOperand(MI, AMDGPU::OpName::src1);
394 MachineOperand &Src2 = *TII->getNamedOperand(MI, AMDGPU::OpName::src2);
395 unsigned NewOpcode = AMDGPU::INSTRUCTION_LIST_END;
399 // Detect "Dst = VSrc * VGPR + Imm" and convert to AK form.
400 if (Src2.isImm() && !TII->isInlineConstant(Src2)) {
401 if (Src1.isReg() && TRI->isVGPR(*MRI, Src1.getReg()))
403 else if (Src0.isReg() && TRI->isVGPR(*MRI, Src0.getReg()))
410 llvm_unreachable("Unexpected mad/fma opcode!");
411 case AMDGPU::V_MAD_F32_e64:
412 NewOpcode = AMDGPU::V_MADAK_F32;
414 case AMDGPU::V_FMA_F32_e64:
415 NewOpcode = AMDGPU::V_FMAAK_F32;
417 case AMDGPU::V_MAD_F16_e64:
418 NewOpcode = AMDGPU::V_MADAK_F16;
420 case AMDGPU::V_FMA_F16_e64:
421 case AMDGPU::V_FMA_F16_gfx9_e64:
422 NewOpcode = ST->hasTrue16BitInsts() ? AMDGPU::V_FMAAK_F16_t16
423 : AMDGPU::V_FMAAK_F16;
428 // Detect "Dst = VSrc * Imm + VGPR" and convert to MK form.
429 if (Src2.isReg() && TRI->isVGPR(*MRI, Src2.getReg())) {
430 if (Src1.isImm() && !TII->isInlineConstant(Src1))
432 else if (Src0.isImm() && !TII->isInlineConstant(Src0))
439 llvm_unreachable("Unexpected mad/fma opcode!");
440 case AMDGPU::V_MAD_F32_e64:
441 NewOpcode = AMDGPU::V_MADMK_F32;
443 case AMDGPU::V_FMA_F32_e64:
444 NewOpcode = AMDGPU::V_FMAMK_F32;
446 case AMDGPU::V_MAD_F16_e64:
447 NewOpcode = AMDGPU::V_MADMK_F16;
449 case AMDGPU::V_FMA_F16_e64:
450 case AMDGPU::V_FMA_F16_gfx9_e64:
451 NewOpcode = ST->hasTrue16BitInsts() ? AMDGPU::V_FMAMK_F16_t16
452 : AMDGPU::V_FMAMK_F16;
457 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END)
460 if (AMDGPU::isTrue16Inst(NewOpcode) && !shouldShrinkTrue16(MI))
464 // Swap Src0 and Src1 by building a new instruction.
465 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(NewOpcode),
466 MI.getOperand(0).getReg())
470 .setMIFlags(MI.getFlags());
471 MI.eraseFromParent();
473 TII->removeModOperands(MI);
474 MI.setDesc(TII->get(NewOpcode));
478 /// Attempt to shink AND/OR/XOR operations requiring non-inlineable literals.
479 /// For AND or OR, try using S_BITSET{0,1} to clear or set bits.
480 /// If the inverse of the immediate is legal, use ANDN2, ORN2 or
481 /// XNOR (as a ^ b == ~(a ^ ~b)).
482 /// \returns true if the caller should continue the machine function iterator
483 bool SIShrinkInstructions::shrinkScalarLogicOp(MachineInstr &MI) const {
484 unsigned Opc = MI.getOpcode();
485 const MachineOperand *Dest = &MI.getOperand(0);
486 MachineOperand *Src0 = &MI.getOperand(1);
487 MachineOperand *Src1 = &MI.getOperand(2);
488 MachineOperand *SrcReg = Src0;
489 MachineOperand *SrcImm = Src1;
491 if (!SrcImm->isImm() ||
492 AMDGPU::isInlinableLiteral32(SrcImm->getImm(), ST->hasInv2PiInlineImm()))
495 uint32_t Imm = static_cast<uint32_t>(SrcImm->getImm());
498 if (Opc == AMDGPU::S_AND_B32) {
499 if (isPowerOf2_32(~Imm)) {
500 NewImm = countTrailingOnes(Imm);
501 Opc = AMDGPU::S_BITSET0_B32;
502 } else if (AMDGPU::isInlinableLiteral32(~Imm, ST->hasInv2PiInlineImm())) {
504 Opc = AMDGPU::S_ANDN2_B32;
506 } else if (Opc == AMDGPU::S_OR_B32) {
507 if (isPowerOf2_32(Imm)) {
508 NewImm = countTrailingZeros(Imm);
509 Opc = AMDGPU::S_BITSET1_B32;
510 } else if (AMDGPU::isInlinableLiteral32(~Imm, ST->hasInv2PiInlineImm())) {
512 Opc = AMDGPU::S_ORN2_B32;
514 } else if (Opc == AMDGPU::S_XOR_B32) {
515 if (AMDGPU::isInlinableLiteral32(~Imm, ST->hasInv2PiInlineImm())) {
517 Opc = AMDGPU::S_XNOR_B32;
520 llvm_unreachable("unexpected opcode");
524 if (Dest->getReg().isVirtual() && SrcReg->isReg()) {
525 MRI->setRegAllocationHint(Dest->getReg(), 0, SrcReg->getReg());
526 MRI->setRegAllocationHint(SrcReg->getReg(), 0, Dest->getReg());
530 if (SrcReg->isReg() && SrcReg->getReg() == Dest->getReg()) {
531 const bool IsUndef = SrcReg->isUndef();
532 const bool IsKill = SrcReg->isKill();
533 MI.setDesc(TII->get(Opc));
534 if (Opc == AMDGPU::S_BITSET0_B32 ||
535 Opc == AMDGPU::S_BITSET1_B32) {
536 Src0->ChangeToImmediate(NewImm);
537 // Remove the immediate and add the tied input.
538 MI.getOperand(2).ChangeToRegister(Dest->getReg(), /*IsDef*/ false,
539 /*isImp*/ false, IsKill,
540 /*isDead*/ false, IsUndef);
541 MI.tieOperands(0, 2);
543 SrcImm->setImm(NewImm);
551 // This is the same as MachineInstr::readsRegister/modifiesRegister except
552 // it takes subregs into account.
553 bool SIShrinkInstructions::instAccessReg(
554 iterator_range<MachineInstr::const_mop_iterator> &&R, Register Reg,
555 unsigned SubReg) const {
556 for (const MachineOperand &MO : R) {
560 if (Reg.isPhysical() && MO.getReg().isPhysical()) {
561 if (TRI->regsOverlap(Reg, MO.getReg()))
563 } else if (MO.getReg() == Reg && Reg.isVirtual()) {
564 LaneBitmask Overlap = TRI->getSubRegIndexLaneMask(SubReg) &
565 TRI->getSubRegIndexLaneMask(MO.getSubReg());
573 bool SIShrinkInstructions::instReadsReg(const MachineInstr *MI, unsigned Reg,
574 unsigned SubReg) const {
575 return instAccessReg(MI->uses(), Reg, SubReg);
578 bool SIShrinkInstructions::instModifiesReg(const MachineInstr *MI, unsigned Reg,
579 unsigned SubReg) const {
580 return instAccessReg(MI->defs(), Reg, SubReg);
583 TargetInstrInfo::RegSubRegPair
584 SIShrinkInstructions::getSubRegForIndex(Register Reg, unsigned Sub,
586 if (TRI->getRegSizeInBits(Reg, *MRI) != 32) {
587 if (Reg.isPhysical()) {
588 Reg = TRI->getSubReg(Reg, TRI->getSubRegFromChannel(I));
590 Sub = TRI->getSubRegFromChannel(I + TRI->getChannelFromSubReg(Sub));
593 return TargetInstrInfo::RegSubRegPair(Reg, Sub);
596 void SIShrinkInstructions::dropInstructionKeepingImpDefs(
597 MachineInstr &MI) const {
598 for (unsigned i = MI.getDesc().getNumOperands() +
599 MI.getDesc().implicit_uses().size() +
600 MI.getDesc().implicit_defs().size(),
601 e = MI.getNumOperands();
603 const MachineOperand &Op = MI.getOperand(i);
606 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
607 TII->get(AMDGPU::IMPLICIT_DEF), Op.getReg());
610 MI.eraseFromParent();
620 // mov t, x (t is potentially dead and move eliminated)
623 // Returns next valid instruction pointer if was able to create v_swap_b32.
625 // This shall not be done too early not to prevent possible folding which may
626 // remove matched moves, and this should preferably be done before RA to
627 // release saved registers and also possibly after RA which can insert copies
630 // This is really just a generic peephole that is not a canonical shrinking,
631 // although requirements match the pass placement and it reduces code size too.
632 MachineInstr *SIShrinkInstructions::matchSwap(MachineInstr &MovT) const {
633 assert(MovT.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
634 MovT.getOpcode() == AMDGPU::COPY);
636 Register T = MovT.getOperand(0).getReg();
637 unsigned Tsub = MovT.getOperand(0).getSubReg();
638 MachineOperand &Xop = MovT.getOperand(1);
642 Register X = Xop.getReg();
643 unsigned Xsub = Xop.getSubReg();
645 unsigned Size = TII->getOpSize(MovT, 0) / 4;
647 if (!TRI->isVGPR(*MRI, X))
650 const unsigned SearchLimit = 16;
652 bool KilledT = false;
653 for (auto Iter = std::next(MovT.getIterator()),
654 E = MovT.getParent()->instr_end();
655 Iter != E && Count < SearchLimit && !KilledT; ++Iter, ++Count) {
657 MachineInstr *MovY = &*Iter;
658 KilledT = MovY->killsRegister(T, TRI);
660 if ((MovY->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
661 MovY->getOpcode() != AMDGPU::COPY) ||
662 !MovY->getOperand(1).isReg() ||
663 MovY->getOperand(1).getReg() != T ||
664 MovY->getOperand(1).getSubReg() != Tsub)
667 Register Y = MovY->getOperand(0).getReg();
668 unsigned Ysub = MovY->getOperand(0).getSubReg();
670 if (!TRI->isVGPR(*MRI, Y))
673 MachineInstr *MovX = nullptr;
674 for (auto IY = MovY->getIterator(), I = std::next(MovT.getIterator());
676 if (instReadsReg(&*I, X, Xsub) || instModifiesReg(&*I, Y, Ysub) ||
677 instModifiesReg(&*I, T, Tsub) ||
678 (MovX && instModifiesReg(&*I, X, Xsub))) {
682 if (!instReadsReg(&*I, Y, Ysub)) {
683 if (!MovX && instModifiesReg(&*I, X, Xsub)) {
690 (I->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
691 I->getOpcode() != AMDGPU::COPY) ||
692 I->getOperand(0).getReg() != X ||
693 I->getOperand(0).getSubReg() != Xsub) {
698 if (Size > 1 && (I->getNumImplicitOperands() > (I->isCopy() ? 0U : 1U)))
707 LLVM_DEBUG(dbgs() << "Matched v_swap_b32:\n" << MovT << *MovX << *MovY);
709 for (unsigned I = 0; I < Size; ++I) {
710 TargetInstrInfo::RegSubRegPair X1, Y1;
711 X1 = getSubRegForIndex(X, Xsub, I);
712 Y1 = getSubRegForIndex(Y, Ysub, I);
713 MachineBasicBlock &MBB = *MovT.getParent();
714 auto MIB = BuildMI(MBB, MovX->getIterator(), MovT.getDebugLoc(),
715 TII->get(AMDGPU::V_SWAP_B32))
716 .addDef(X1.Reg, 0, X1.SubReg)
717 .addDef(Y1.Reg, 0, Y1.SubReg)
718 .addReg(Y1.Reg, 0, Y1.SubReg)
719 .addReg(X1.Reg, 0, X1.SubReg).getInstr();
720 if (MovX->hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
721 // Drop implicit EXEC.
722 MIB->removeOperand(MIB->getNumExplicitOperands());
723 MIB->copyImplicitOps(*MBB.getParent(), *MovX);
726 MovX->eraseFromParent();
727 dropInstructionKeepingImpDefs(*MovY);
728 MachineInstr *Next = &*std::next(MovT.getIterator());
730 if (T.isVirtual() && MRI->use_nodbg_empty(T)) {
731 dropInstructionKeepingImpDefs(MovT);
733 Xop.setIsKill(false);
734 for (int I = MovT.getNumImplicitOperands() - 1; I >= 0; --I ) {
735 unsigned OpNo = MovT.getNumExplicitOperands() + I;
736 const MachineOperand &Op = MovT.getOperand(OpNo);
737 if (Op.isKill() && TRI->regsOverlap(X, Op.getReg()))
738 MovT.removeOperand(OpNo);
748 // If an instruction has dead sdst replace it with NULL register on gfx1030+
749 bool SIShrinkInstructions::tryReplaceDeadSDST(MachineInstr &MI) const {
750 if (!ST->hasGFX10_3Insts())
753 MachineOperand *Op = TII->getNamedOperand(MI, AMDGPU::OpName::sdst);
756 Register SDstReg = Op->getReg();
757 if (SDstReg.isPhysical() || !MRI->use_nodbg_empty(SDstReg))
760 Op->setReg(ST->isWave32() ? AMDGPU::SGPR_NULL : AMDGPU::SGPR_NULL64);
764 bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
765 if (skipFunction(MF.getFunction()))
769 MRI = &MF.getRegInfo();
770 ST = &MF.getSubtarget<GCNSubtarget>();
771 TII = ST->getInstrInfo();
772 TRI = &TII->getRegisterInfo();
774 unsigned VCCReg = ST->isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
776 std::vector<unsigned> I1Defs;
778 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
781 MachineBasicBlock &MBB = *BI;
782 MachineBasicBlock::iterator I, Next;
783 for (I = MBB.begin(); I != MBB.end(); I = Next) {
785 MachineInstr &MI = *I;
787 if (MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
788 // If this has a literal constant source that is the same as the
789 // reversed bits of an inline immediate, replace with a bitreverse of
790 // that constant. This saves 4 bytes in the common case of materializing
793 // Test if we are after regalloc. We only want to do this after any
794 // optimizations happen because this will confuse them.
795 // XXX - not exactly a check for post-regalloc run.
796 MachineOperand &Src = MI.getOperand(1);
797 if (Src.isImm() && MI.getOperand(0).getReg().isPhysical()) {
799 if (isReverseInlineImm(Src, ReverseImm)) {
800 MI.setDesc(TII->get(AMDGPU::V_BFREV_B32_e32));
801 Src.setImm(ReverseImm);
807 if (ST->hasSwap() && (MI.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
808 MI.getOpcode() == AMDGPU::COPY)) {
809 if (auto *NextMI = matchSwap(MI)) {
810 Next = NextMI->getIterator();
815 // Try to use S_ADDK_I32 and S_MULK_I32.
816 if (MI.getOpcode() == AMDGPU::S_ADD_I32 ||
817 MI.getOpcode() == AMDGPU::S_MUL_I32) {
818 const MachineOperand *Dest = &MI.getOperand(0);
819 MachineOperand *Src0 = &MI.getOperand(1);
820 MachineOperand *Src1 = &MI.getOperand(2);
822 if (!Src0->isReg() && Src1->isReg()) {
823 if (TII->commuteInstruction(MI, false, 1, 2))
824 std::swap(Src0, Src1);
827 // FIXME: This could work better if hints worked with subregisters. If
828 // we have a vector add of a constant, we usually don't get the correct
829 // allocation due to the subregister usage.
830 if (Dest->getReg().isVirtual() && Src0->isReg()) {
831 MRI->setRegAllocationHint(Dest->getReg(), 0, Src0->getReg());
832 MRI->setRegAllocationHint(Src0->getReg(), 0, Dest->getReg());
836 if (Src0->isReg() && Src0->getReg() == Dest->getReg()) {
837 if (Src1->isImm() && isKImmOperand(*Src1)) {
838 unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_I32) ?
839 AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
841 MI.setDesc(TII->get(Opc));
842 MI.tieOperands(0, 1);
847 // Try to use s_cmpk_*
848 if (MI.isCompare() && TII->isSOPC(MI)) {
849 shrinkScalarCompare(MI);
853 // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
854 if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
855 const MachineOperand &Dst = MI.getOperand(0);
856 MachineOperand &Src = MI.getOperand(1);
858 if (Src.isImm() && Dst.getReg().isPhysical()) {
860 if (isKImmOperand(Src))
861 MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
862 else if (isReverseInlineImm(Src, ReverseImm)) {
863 MI.setDesc(TII->get(AMDGPU::S_BREV_B32));
864 Src.setImm(ReverseImm);
871 // Shrink scalar logic operations.
872 if (MI.getOpcode() == AMDGPU::S_AND_B32 ||
873 MI.getOpcode() == AMDGPU::S_OR_B32 ||
874 MI.getOpcode() == AMDGPU::S_XOR_B32) {
875 if (shrinkScalarLogicOp(MI))
879 if (TII->isMIMG(MI.getOpcode()) &&
880 ST->getGeneration() >= AMDGPUSubtarget::GFX10 &&
881 MF.getProperties().hasProperty(
882 MachineFunctionProperties::Property::NoVRegs)) {
887 if (!TII->isVOP3(MI))
890 if (MI.getOpcode() == AMDGPU::V_MAD_F32_e64 ||
891 MI.getOpcode() == AMDGPU::V_FMA_F32_e64 ||
892 MI.getOpcode() == AMDGPU::V_MAD_F16_e64 ||
893 MI.getOpcode() == AMDGPU::V_FMA_F16_e64 ||
894 MI.getOpcode() == AMDGPU::V_FMA_F16_gfx9_e64) {
899 if (!TII->hasVALU32BitEncoding(MI.getOpcode())) {
900 // If there is no chance we will shrink it and use VCC as sdst to get
901 // a 32 bit form try to replace dead sdst with NULL.
902 tryReplaceDeadSDST(MI);
906 if (!TII->canShrink(MI, *MRI)) {
907 // Try commuting the instruction and see if that enables us to shrink
909 if (!MI.isCommutable() || !TII->commuteInstruction(MI) ||
910 !TII->canShrink(MI, *MRI)) {
911 tryReplaceDeadSDST(MI);
916 int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
918 if (TII->isVOPC(Op32)) {
919 MachineOperand &Op0 = MI.getOperand(0);
921 // Exclude VOPCX instructions as these don't explicitly write a
923 Register DstReg = Op0.getReg();
924 if (DstReg.isVirtual()) {
925 // VOPC instructions can only write to the VCC register. We can't
926 // force them to use VCC here, because this is only one register and
927 // cannot deal with sequences which would require multiple copies of
928 // VCC, e.g. S_AND_B64 (vcc = V_CMP_...), (vcc = V_CMP_...)
930 // So, instead of forcing the instruction to write to VCC, we
931 // provide a hint to the register allocator to use VCC and then we
932 // will run this pass again after RA and shrink it if it outputs to
934 MRI->setRegAllocationHint(DstReg, 0, VCCReg);
937 if (DstReg != VCCReg)
942 if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
943 // We shrink V_CNDMASK_B32_e64 using regalloc hints like we do for VOPC
945 const MachineOperand *Src2 =
946 TII->getNamedOperand(MI, AMDGPU::OpName::src2);
949 Register SReg = Src2->getReg();
950 if (SReg.isVirtual()) {
951 MRI->setRegAllocationHint(SReg, 0, VCCReg);
958 // Check for the bool flag output for instructions like V_ADD_I32_e64.
959 const MachineOperand *SDst = TII->getNamedOperand(MI,
960 AMDGPU::OpName::sdst);
965 if (SDst->getReg() != VCCReg) {
966 if (SDst->getReg().isVirtual())
967 MRI->setRegAllocationHint(SDst->getReg(), 0, VCCReg);
971 // All of the instructions with carry outs also have an SGPR input in
973 const MachineOperand *Src2 = TII->getNamedOperand(MI,
974 AMDGPU::OpName::src2);
975 if (Src2 && Src2->getReg() != VCCReg) {
976 if (Src2->getReg().isVirtual())
977 MRI->setRegAllocationHint(Src2->getReg(), 0, VCCReg);
985 // Pre-GFX10, shrinking VOP3 instructions pre-RA gave us the chance to
986 // fold an immediate into the shrunk instruction as a literal operand. In
987 // GFX10 VOP3 instructions can take a literal operand anyway, so there is
988 // no advantage to doing this.
989 if (ST->hasVOP3Literal() &&
990 !MF.getProperties().hasProperty(
991 MachineFunctionProperties::Property::NoVRegs))
994 if (ST->hasTrue16BitInsts() && AMDGPU::isTrue16Inst(MI.getOpcode()) &&
995 !shouldShrinkTrue16(MI))
998 // We can shrink this instruction
999 LLVM_DEBUG(dbgs() << "Shrinking " << MI);
1001 MachineInstr *Inst32 = TII->buildShrunkInst(MI, Op32);
1002 ++NumInstructionsShrunk;
1004 // Copy extra operands not present in the instruction definition.
1005 copyExtraImplicitOps(*Inst32, MI);
1007 // Copy deadness from the old explicit vcc def to the new implicit def.
1008 if (SDst && SDst->isDead())
1009 Inst32->findRegisterDefOperand(VCCReg)->setIsDead();
1011 MI.eraseFromParent();
1012 foldImmediates(*Inst32);
1014 LLVM_DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');