1 //===-- SIOptimizeExecMaskingPreRA.cpp ------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This pass performs exec mask handling peephole optimizations which needs
11 /// to be done before register allocation to reduce register pressure.
13 //===----------------------------------------------------------------------===//
16 #include "AMDGPUSubtarget.h"
17 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
18 #include "SIInstrInfo.h"
19 #include "llvm/CodeGen/LiveIntervals.h"
20 #include "llvm/CodeGen/MachineFunctionPass.h"
21 #include "llvm/InitializePasses.h"
25 #define DEBUG_TYPE "si-optimize-exec-masking-pre-ra"
29 class SIOptimizeExecMaskingPreRA : public MachineFunctionPass {
31 const SIRegisterInfo *TRI;
32 const SIInstrInfo *TII;
33 MachineRegisterInfo *MRI;
38 SIOptimizeExecMaskingPreRA() : MachineFunctionPass(ID) {
39 initializeSIOptimizeExecMaskingPreRAPass(*PassRegistry::getPassRegistry());
42 bool runOnMachineFunction(MachineFunction &MF) override;
44 StringRef getPassName() const override {
45 return "SI optimize exec mask operations pre-RA";
48 void getAnalysisUsage(AnalysisUsage &AU) const override {
49 AU.addRequired<LiveIntervals>();
51 MachineFunctionPass::getAnalysisUsage(AU);
55 } // End anonymous namespace.
57 INITIALIZE_PASS_BEGIN(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
58 "SI optimize exec mask operations pre-RA", false, false)
59 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
60 INITIALIZE_PASS_END(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
61 "SI optimize exec mask operations pre-RA", false, false)
63 char SIOptimizeExecMaskingPreRA::ID = 0;
65 char &llvm::SIOptimizeExecMaskingPreRAID = SIOptimizeExecMaskingPreRA::ID;
67 FunctionPass *llvm::createSIOptimizeExecMaskingPreRAPass() {
68 return new SIOptimizeExecMaskingPreRA();
71 static bool isFullExecCopy(const MachineInstr& MI, const GCNSubtarget& ST) {
72 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
74 if (MI.isFullCopy() && MI.getOperand(1).getReg() == Exec)
81 // %sel = V_CNDMASK_B32_e64 0, 1, %cc
82 // %cmp = V_CMP_NE_U32 1, %1
83 // $vcc = S_AND_B64 $exec, %cmp
86 // $vcc = S_ANDN2_B64 $exec, %cc
89 // It is the negation pattern inserted by DAGCombiner::visitBRCOND() in the
90 // rebuildSetCC(). We start with S_CBRANCH to avoid exhaustive search, but
91 // only 3 first instructions are really needed. S_AND_B64 with exec is a
92 // required part of the pattern since V_CNDMASK_B32 writes zeroes for inactive
95 // Returns %cc register on success.
96 static unsigned optimizeVcndVcmpPair(MachineBasicBlock &MBB,
97 const GCNSubtarget &ST,
98 MachineRegisterInfo &MRI,
100 const SIRegisterInfo *TRI = ST.getRegisterInfo();
101 const SIInstrInfo *TII = ST.getInstrInfo();
102 bool Wave32 = ST.isWave32();
103 const unsigned AndOpc = Wave32 ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
104 const unsigned Andn2Opc = Wave32 ? AMDGPU::S_ANDN2_B32 : AMDGPU::S_ANDN2_B64;
105 const unsigned CondReg = Wave32 ? AMDGPU::VCC_LO : AMDGPU::VCC;
106 const unsigned ExecReg = Wave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
108 auto I = llvm::find_if(MBB.terminators(), [](const MachineInstr &MI) {
109 unsigned Opc = MI.getOpcode();
110 return Opc == AMDGPU::S_CBRANCH_VCCZ ||
111 Opc == AMDGPU::S_CBRANCH_VCCNZ; });
112 if (I == MBB.terminators().end())
113 return AMDGPU::NoRegister;
115 auto *And = TRI->findReachingDef(CondReg, AMDGPU::NoSubRegister,
117 if (!And || And->getOpcode() != AndOpc ||
118 !And->getOperand(1).isReg() || !And->getOperand(2).isReg())
119 return AMDGPU::NoRegister;
121 MachineOperand *AndCC = &And->getOperand(1);
122 Register CmpReg = AndCC->getReg();
123 unsigned CmpSubReg = AndCC->getSubReg();
124 if (CmpReg == ExecReg) {
125 AndCC = &And->getOperand(2);
126 CmpReg = AndCC->getReg();
127 CmpSubReg = AndCC->getSubReg();
128 } else if (And->getOperand(2).getReg() != ExecReg) {
129 return AMDGPU::NoRegister;
132 auto *Cmp = TRI->findReachingDef(CmpReg, CmpSubReg, *And, MRI, LIS);
133 if (!Cmp || !(Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e32 ||
134 Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e64) ||
135 Cmp->getParent() != And->getParent())
136 return AMDGPU::NoRegister;
138 MachineOperand *Op1 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src0);
139 MachineOperand *Op2 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src1);
140 if (Op1->isImm() && Op2->isReg())
142 if (!Op1->isReg() || !Op2->isImm() || Op2->getImm() != 1)
143 return AMDGPU::NoRegister;
145 Register SelReg = Op1->getReg();
146 auto *Sel = TRI->findReachingDef(SelReg, Op1->getSubReg(), *Cmp, MRI, LIS);
147 if (!Sel || Sel->getOpcode() != AMDGPU::V_CNDMASK_B32_e64)
148 return AMDGPU::NoRegister;
150 if (TII->hasModifiersSet(*Sel, AMDGPU::OpName::src0_modifiers) ||
151 TII->hasModifiersSet(*Sel, AMDGPU::OpName::src1_modifiers))
152 return AMDGPU::NoRegister;
154 Op1 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src0);
155 Op2 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src1);
156 MachineOperand *CC = TII->getNamedOperand(*Sel, AMDGPU::OpName::src2);
157 if (!Op1->isImm() || !Op2->isImm() || !CC->isReg() ||
158 Op1->getImm() != 0 || Op2->getImm() != 1)
159 return AMDGPU::NoRegister;
161 LLVM_DEBUG(dbgs() << "Folding sequence:\n\t" << *Sel << '\t' << *Cmp << '\t'
164 Register CCReg = CC->getReg();
165 LIS->RemoveMachineInstrFromMaps(*And);
166 MachineInstr *Andn2 =
167 BuildMI(MBB, *And, And->getDebugLoc(), TII->get(Andn2Opc),
168 And->getOperand(0).getReg())
170 .addReg(CCReg, getUndefRegState(CC->isUndef()), CC->getSubReg());
171 MachineOperand &AndSCC = And->getOperand(3);
172 assert(AndSCC.getReg() == AMDGPU::SCC);
173 MachineOperand &Andn2SCC = Andn2->getOperand(3);
174 assert(Andn2SCC.getReg() == AMDGPU::SCC);
175 Andn2SCC.setIsDead(AndSCC.isDead());
176 And->eraseFromParent();
177 LIS->InsertMachineInstrInMaps(*Andn2);
179 LLVM_DEBUG(dbgs() << "=>\n\t" << *Andn2 << '\n');
181 // Try to remove compare. Cmp value should not used in between of cmp
182 // and s_and_b64 if VCC or just unused if any other register.
183 if ((Register::isVirtualRegister(CmpReg) && MRI.use_nodbg_empty(CmpReg)) ||
184 (CmpReg == CondReg &&
185 std::none_of(std::next(Cmp->getIterator()), Andn2->getIterator(),
186 [&](const MachineInstr &MI) {
187 return MI.readsRegister(CondReg, TRI);
189 LLVM_DEBUG(dbgs() << "Erasing: " << *Cmp << '\n');
191 LIS->RemoveMachineInstrFromMaps(*Cmp);
192 Cmp->eraseFromParent();
194 // Try to remove v_cndmask_b32.
195 if (Register::isVirtualRegister(SelReg) && MRI.use_nodbg_empty(SelReg)) {
196 LLVM_DEBUG(dbgs() << "Erasing: " << *Sel << '\n');
198 LIS->RemoveMachineInstrFromMaps(*Sel);
199 Sel->eraseFromParent();
206 bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
207 if (skipFunction(MF.getFunction()))
210 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
211 TRI = ST.getRegisterInfo();
212 TII = ST.getInstrInfo();
213 MRI = &MF.getRegInfo();
215 MachineRegisterInfo &MRI = MF.getRegInfo();
216 LiveIntervals *LIS = &getAnalysis<LiveIntervals>();
217 DenseSet<unsigned> RecalcRegs({AMDGPU::EXEC_LO, AMDGPU::EXEC_HI});
218 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
219 bool Changed = false;
221 for (MachineBasicBlock &MBB : MF) {
223 if (unsigned Reg = optimizeVcndVcmpPair(MBB, ST, MRI, LIS)) {
224 RecalcRegs.insert(Reg);
225 RecalcRegs.insert(AMDGPU::VCC_LO);
226 RecalcRegs.insert(AMDGPU::VCC_HI);
227 RecalcRegs.insert(AMDGPU::SCC);
231 // Try to remove unneeded instructions before s_endpgm.
232 if (MBB.succ_empty()) {
236 // Skip this if the endpgm has any implicit uses, otherwise we would need
237 // to be careful to update / remove them.
238 // S_ENDPGM always has a single imm operand that is not used other than to
239 // end up in the encoding
240 MachineInstr &Term = MBB.back();
241 if (Term.getOpcode() != AMDGPU::S_ENDPGM || Term.getNumOperands() != 1)
244 SmallVector<MachineBasicBlock*, 4> Blocks({&MBB});
246 while (!Blocks.empty()) {
247 auto CurBB = Blocks.pop_back_val();
248 auto I = CurBB->rbegin(), E = CurBB->rend();
250 if (I->isUnconditionalBranch() || I->getOpcode() == AMDGPU::S_ENDPGM)
252 else if (I->isBranch())
257 if (I->isDebugInstr()) {
262 if (I->mayStore() || I->isBarrier() || I->isCall() ||
263 I->hasUnmodeledSideEffects() || I->hasOrderedMemoryRef())
267 << "Removing no effect instruction: " << *I << '\n');
269 for (auto &Op : I->operands()) {
271 RecalcRegs.insert(Op.getReg());
274 auto Next = std::next(I);
275 LIS->RemoveMachineInstrFromMaps(*I);
276 I->eraseFromParent();
285 // Try to ascend predecessors.
286 for (auto *Pred : CurBB->predecessors()) {
287 if (Pred->succ_size() == 1)
288 Blocks.push_back(Pred);
294 // If the only user of a logical operation is move to exec, fold it now
295 // to prevent forming of saveexec. I.e:
297 // %0:sreg_64 = COPY $exec
298 // %1:sreg_64 = S_AND_B64 %0:sreg_64, %2:sreg_64
300 // %1 = S_AND_B64 $exec, %2:sreg_64
301 unsigned ScanThreshold = 10;
302 for (auto I = MBB.rbegin(), E = MBB.rend(); I != E
303 && ScanThreshold--; ++I) {
304 if (!isFullExecCopy(*I, ST))
307 Register SavedExec = I->getOperand(0).getReg();
308 if (SavedExec.isVirtual() && MRI.hasOneNonDBGUse(SavedExec) &&
309 MRI.use_instr_nodbg_begin(SavedExec)->getParent() == I->getParent()) {
310 LLVM_DEBUG(dbgs() << "Redundant EXEC COPY: " << *I << '\n');
311 LIS->RemoveMachineInstrFromMaps(*I);
312 I->eraseFromParent();
313 MRI.replaceRegWith(SavedExec, Exec);
314 LIS->removeInterval(SavedExec);
322 for (auto Reg : RecalcRegs) {
323 if (Register::isVirtualRegister(Reg)) {
324 LIS->removeInterval(Reg);
325 if (!MRI.reg_empty(Reg))
326 LIS->createAndComputeVirtRegInterval(Reg);
328 LIS->removeAllRegUnitsForPhysReg(Reg);