1 //===-- SIInsertSkips.cpp - Use predicates for control flow ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This pass inserts branches on the 0 exec mask over divergent branches
12 /// branches when it's expected that jumping over the untaken control flow will
13 /// be cheaper than having every workitem no-op through it.
15 //===----------------------------------------------------------------------===//
18 #include "AMDGPUSubtarget.h"
19 #include "SIInstrInfo.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/IR/CallingConv.h"
30 #include "llvm/IR/DebugLoc.h"
31 #include "llvm/MC/MCAsmInfo.h"
32 #include "llvm/Pass.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Target/TargetMachine.h"
41 #define DEBUG_TYPE "si-insert-skips"
43 static cl::opt<unsigned> SkipThresholdFlag(
44 "amdgpu-skip-threshold",
45 cl::desc("Number of instructions before jumping over divergent control flow"),
46 cl::init(12), cl::Hidden);
50 class SIInsertSkips : public MachineFunctionPass {
52 const SIRegisterInfo *TRI = nullptr;
53 const SIInstrInfo *TII = nullptr;
54 unsigned SkipThreshold = 0;
56 bool shouldSkip(const MachineBasicBlock &From,
57 const MachineBasicBlock &To) const;
59 bool skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB);
61 void kill(MachineInstr &MI);
63 MachineBasicBlock *insertSkipBlock(MachineBasicBlock &MBB,
64 MachineBasicBlock::iterator I) const;
66 bool skipMaskBranch(MachineInstr &MI, MachineBasicBlock &MBB);
71 SIInsertSkips() : MachineFunctionPass(ID) {}
73 bool runOnMachineFunction(MachineFunction &MF) override;
75 StringRef getPassName() const override {
76 return "SI insert s_cbranch_execz instructions";
79 void getAnalysisUsage(AnalysisUsage &AU) const override {
80 MachineFunctionPass::getAnalysisUsage(AU);
84 } // end anonymous namespace
86 char SIInsertSkips::ID = 0;
88 INITIALIZE_PASS(SIInsertSkips, DEBUG_TYPE,
89 "SI insert s_cbranch_execz instructions", false, false)
91 char &llvm::SIInsertSkipsPassID = SIInsertSkips::ID;
93 static bool opcodeEmitsNoInsts(unsigned Opc) {
95 case TargetOpcode::IMPLICIT_DEF:
96 case TargetOpcode::KILL:
97 case TargetOpcode::BUNDLE:
98 case TargetOpcode::CFI_INSTRUCTION:
99 case TargetOpcode::EH_LABEL:
100 case TargetOpcode::GC_LABEL:
101 case TargetOpcode::DBG_VALUE:
108 bool SIInsertSkips::shouldSkip(const MachineBasicBlock &From,
109 const MachineBasicBlock &To) const {
110 if (From.succ_empty())
113 unsigned NumInstr = 0;
114 const MachineFunction *MF = From.getParent();
116 for (MachineFunction::const_iterator MBBI(&From), ToI(&To), End = MF->end();
117 MBBI != End && MBBI != ToI; ++MBBI) {
118 const MachineBasicBlock &MBB = *MBBI;
120 for (MachineBasicBlock::const_iterator I = MBB.begin(), E = MBB.end();
121 NumInstr < SkipThreshold && I != E; ++I) {
122 if (opcodeEmitsNoInsts(I->getOpcode()))
125 // FIXME: Since this is required for correctness, this should be inserted
126 // during SILowerControlFlow.
128 // When a uniform loop is inside non-uniform control flow, the branch
129 // leaving the loop might be an S_CBRANCH_VCCNZ, which is never taken
130 // when EXEC = 0. We should skip the loop lest it becomes infinite.
131 if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ ||
132 I->getOpcode() == AMDGPU::S_CBRANCH_VCCZ)
135 // V_READFIRSTLANE/V_READLANE destination register may be used as operand
136 // by some SALU instruction. If exec mask is zero vector instruction
137 // defining the register that is used by the scalar one is not executed
138 // and scalar instruction will operate on undefined data. For
139 // V_READFIRSTLANE/V_READLANE we should avoid predicated execution.
140 if ((I->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) ||
141 (I->getOpcode() == AMDGPU::V_READLANE_B32)) {
145 if (I->isInlineAsm()) {
146 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
147 const char *AsmStr = I->getOperand(0).getSymbolName();
149 // inlineasm length estimate is number of bytes assuming the longest
151 uint64_t MaxAsmSize = TII->getInlineAsmLength(AsmStr, *MAI);
152 NumInstr += MaxAsmSize / MAI->getMaxInstLength();
157 if (NumInstr >= SkipThreshold)
165 bool SIInsertSkips::skipIfDead(MachineInstr &MI, MachineBasicBlock &NextBB) {
166 MachineBasicBlock &MBB = *MI.getParent();
167 MachineFunction *MF = MBB.getParent();
169 if (MF->getFunction().getCallingConv() != CallingConv::AMDGPU_PS ||
170 !shouldSkip(MBB, MBB.getParent()->back()))
173 MachineBasicBlock *SkipBB = insertSkipBlock(MBB, MI.getIterator());
175 const DebugLoc &DL = MI.getDebugLoc();
177 // If the exec mask is non-zero, skip the next two instructions
178 BuildMI(&MBB, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
181 MachineBasicBlock::iterator Insert = SkipBB->begin();
183 // Exec mask is zero: Export to NULL target...
184 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::EXP_DONE))
185 .addImm(0x09) // V_008DFC_SQ_EXP_NULL
186 .addReg(AMDGPU::VGPR0, RegState::Undef)
187 .addReg(AMDGPU::VGPR0, RegState::Undef)
188 .addReg(AMDGPU::VGPR0, RegState::Undef)
189 .addReg(AMDGPU::VGPR0, RegState::Undef)
194 // ... and terminate wavefront.
195 BuildMI(*SkipBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM));
200 void SIInsertSkips::kill(MachineInstr &MI) {
201 MachineBasicBlock &MBB = *MI.getParent();
202 DebugLoc DL = MI.getDebugLoc();
204 switch (MI.getOpcode()) {
205 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: {
208 // The opcodes are inverted because the inline immediate has to be
209 // the first operand, e.g. from "x < imm" to "imm > x"
210 switch (MI.getOperand(2).getImm()) {
213 Opcode = AMDGPU::V_CMPX_EQ_F32_e64;
217 Opcode = AMDGPU::V_CMPX_LT_F32_e64;
221 Opcode = AMDGPU::V_CMPX_LE_F32_e64;
225 Opcode = AMDGPU::V_CMPX_GT_F32_e64;
229 Opcode = AMDGPU::V_CMPX_GE_F32_e64;
233 Opcode = AMDGPU::V_CMPX_LG_F32_e64;
236 Opcode = AMDGPU::V_CMPX_O_F32_e64;
239 Opcode = AMDGPU::V_CMPX_U_F32_e64;
242 Opcode = AMDGPU::V_CMPX_NLG_F32_e64;
245 Opcode = AMDGPU::V_CMPX_NGE_F32_e64;
248 Opcode = AMDGPU::V_CMPX_NGT_F32_e64;
251 Opcode = AMDGPU::V_CMPX_NLE_F32_e64;
254 Opcode = AMDGPU::V_CMPX_NLT_F32_e64;
257 Opcode = AMDGPU::V_CMPX_NEQ_F32_e64;
260 llvm_unreachable("invalid ISD:SET cond code");
263 assert(MI.getOperand(0).isReg());
265 if (TRI->isVGPR(MBB.getParent()->getRegInfo(),
266 MI.getOperand(0).getReg())) {
267 Opcode = AMDGPU::getVOPe32(Opcode);
268 BuildMI(MBB, &MI, DL, TII->get(Opcode))
269 .add(MI.getOperand(1))
270 .add(MI.getOperand(0));
272 BuildMI(MBB, &MI, DL, TII->get(Opcode))
273 .addReg(AMDGPU::VCC, RegState::Define)
274 .addImm(0) // src0 modifiers
275 .add(MI.getOperand(1))
276 .addImm(0) // src1 modifiers
277 .add(MI.getOperand(0))
282 case AMDGPU::SI_KILL_I1_TERMINATOR: {
283 const MachineOperand &Op = MI.getOperand(0);
284 int64_t KillVal = MI.getOperand(1).getImm();
285 assert(KillVal == 0 || KillVal == -1);
287 // Kill all threads if Op0 is an immediate and equal to the Kill value.
289 int64_t Imm = Op.getImm();
290 assert(Imm == 0 || Imm == -1);
293 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
298 unsigned Opcode = KillVal ? AMDGPU::S_ANDN2_B64 : AMDGPU::S_AND_B64;
299 BuildMI(MBB, &MI, DL, TII->get(Opcode), AMDGPU::EXEC)
300 .addReg(AMDGPU::EXEC)
305 llvm_unreachable("invalid opcode, expected SI_KILL_*_TERMINATOR");
309 MachineBasicBlock *SIInsertSkips::insertSkipBlock(
310 MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const {
311 MachineFunction *MF = MBB.getParent();
313 MachineBasicBlock *SkipBB = MF->CreateMachineBasicBlock();
314 MachineFunction::iterator MBBI(MBB);
317 MF->insert(MBBI, SkipBB);
318 MBB.addSuccessor(SkipBB);
323 // Returns true if a branch over the block was inserted.
324 bool SIInsertSkips::skipMaskBranch(MachineInstr &MI,
325 MachineBasicBlock &SrcMBB) {
326 MachineBasicBlock *DestBB = MI.getOperand(0).getMBB();
328 if (!shouldSkip(**SrcMBB.succ_begin(), *DestBB))
331 const DebugLoc &DL = MI.getDebugLoc();
332 MachineBasicBlock::iterator InsPt = std::next(MI.getIterator());
334 BuildMI(SrcMBB, InsPt, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
340 bool SIInsertSkips::runOnMachineFunction(MachineFunction &MF) {
341 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
342 TII = ST.getInstrInfo();
343 TRI = &TII->getRegisterInfo();
344 SkipThreshold = SkipThresholdFlag;
346 bool HaveKill = false;
347 bool MadeChange = false;
349 // Track depth of exec mask, divergent branches.
350 SmallVector<MachineBasicBlock *, 16> ExecBranchStack;
352 MachineFunction::iterator NextBB;
354 MachineBasicBlock *EmptyMBBAtEnd = nullptr;
356 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
357 BI != BE; BI = NextBB) {
358 NextBB = std::next(BI);
359 MachineBasicBlock &MBB = *BI;
360 bool HaveSkipBlock = false;
362 if (!ExecBranchStack.empty() && ExecBranchStack.back() == &MBB) {
363 // Reached convergence point for last divergent branch.
364 ExecBranchStack.pop_back();
367 if (HaveKill && ExecBranchStack.empty()) {
370 // TODO: Insert skip if exec is 0?
373 MachineBasicBlock::iterator I, Next;
374 for (I = MBB.begin(); I != MBB.end(); I = Next) {
377 MachineInstr &MI = *I;
379 switch (MI.getOpcode()) {
380 case AMDGPU::SI_MASK_BRANCH:
381 ExecBranchStack.push_back(MI.getOperand(0).getMBB());
382 MadeChange |= skipMaskBranch(MI, MBB);
385 case AMDGPU::S_BRANCH:
386 // Optimize out branches to the next block.
387 // FIXME: Shouldn't this be handled by BranchFolding?
388 if (MBB.isLayoutSuccessor(MI.getOperand(0).getMBB())) {
389 MI.eraseFromParent();
390 } else if (HaveSkipBlock) {
391 // Remove the given unconditional branch when a skip block has been
392 // inserted after the current one and let skip the two instructions
393 // performing the kill if the exec mask is non-zero.
394 MI.eraseFromParent();
398 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
399 case AMDGPU::SI_KILL_I1_TERMINATOR:
403 if (ExecBranchStack.empty()) {
404 if (skipIfDead(MI, *NextBB)) {
405 HaveSkipBlock = true;
406 NextBB = std::next(BI);
413 MI.eraseFromParent();
416 case AMDGPU::SI_RETURN_TO_EPILOG:
417 // FIXME: Should move somewhere else
418 assert(!MF.getInfo<SIMachineFunctionInfo>()->returnsVoid());
420 // Graphics shaders returning non-void shouldn't contain S_ENDPGM,
421 // because external bytecode will be appended at the end.
422 if (BI != --MF.end() || I != MBB.getFirstTerminator()) {
423 // SI_RETURN_TO_EPILOG is not the last instruction. Add an empty block at
424 // the end and jump there.
425 if (!EmptyMBBAtEnd) {
426 EmptyMBBAtEnd = MF.CreateMachineBasicBlock();
427 MF.insert(MF.end(), EmptyMBBAtEnd);
430 MBB.addSuccessor(EmptyMBBAtEnd);
431 BuildMI(*BI, I, MI.getDebugLoc(), TII->get(AMDGPU::S_BRANCH))
432 .addMBB(EmptyMBBAtEnd);
433 I->eraseFromParent();