1 //===-- SIInsertWaitcnts.cpp - Insert Wait Instructions --------------------===/
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief Insert wait instructions for memory reads and writes.
13 /// Memory reads and writes are issued asynchronously, so we need to insert
14 /// S_WAITCNT instructions when we want to access any of their results or
15 /// overwrite any register that's used asynchronously.
17 //===----------------------------------------------------------------------===//
20 #include "AMDGPUSubtarget.h"
21 #include "SIDefines.h"
22 #include "SIInstrInfo.h"
23 #include "SIMachineFunctionInfo.h"
24 #include "Utils/AMDGPUBaseInfo.h"
25 #include "llvm/ADT/PostOrderIterator.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineFunctionPass.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #define DEBUG_TYPE "si-insert-waitcnts"
37 // Class of object that encapsulates latest instruction counter score
38 // associated with the operand. Used for determining whether
39 // s_waitcnt instruction needs to be emited.
41 #define CNT_MASK(t) (1u << (t))
43 enum InstCounterType { VM_CNT = 0, LGKM_CNT, EXP_CNT, NUM_INST_CNTS };
45 typedef std::pair<signed, signed> RegInterval;
63 VMEM_ACCESS, // vector-memory read & write
64 LDS_ACCESS, // lds read & write
65 GDS_ACCESS, // gds read & write
66 SQ_MESSAGE, // send message
67 SMEM_ACCESS, // scalar-memory read & write
68 EXP_GPR_LOCK, // export holding on its data src
69 GDS_GPR_LOCK, // GDS holding on its data and addr src
70 EXP_POS_ACCESS, // write to export position
71 EXP_PARAM_ACCESS, // write to export parameter
72 VMW_GPR_LOCK, // vector-memory write holding on its data src
77 // 0 .. SQ_MAX_PGM_VGPRS-1 real VGPRs
78 // SQ_MAX_PGM_VGPRS .. NUM_ALL_VGPRS-1 extra VGPR-like slots
79 // NUM_ALL_VGPRS .. NUM_ALL_VGPRS+SQ_MAX_PGM_SGPRS-1 real SGPRs
80 // We reserve a fixed number of VGPR slots in the scoring tables for
81 // special tokens like SCMEM_LDS (needed for buffer load to LDS).
82 enum RegisterMapping {
83 SQ_MAX_PGM_VGPRS = 256, // Maximum programmable VGPRs across all targets.
84 SQ_MAX_PGM_SGPRS = 256, // Maximum programmable SGPRs across all targets.
85 NUM_EXTRA_VGPRS = 1, // A reserved slot for DS.
86 EXTRA_VGPR_LDS = 0, // This is a placeholder the Shader algorithm uses.
87 NUM_ALL_VGPRS = SQ_MAX_PGM_VGPRS + NUM_EXTRA_VGPRS, // Where SGPR starts.
90 #define ForAllWaitEventType(w) \
91 for (enum WaitEventType w = (enum WaitEventType)0; \
92 (w) < (enum WaitEventType)NUM_WAIT_EVENTS; \
93 (w) = (enum WaitEventType)((w) + 1))
95 // This is a per-basic-block object that maintains current score brackets
96 // of each wait-counter, and a per-register scoreboard for each wait-couner.
97 // We also maintain the latest score for every event type that can change the
98 // waitcnt in order to know if there are multiple types of events within
99 // the brackets. When multiple types of event happen in the bracket,
100 // wait-count may get decreased out of order, therefore we need to put in
101 // "s_waitcnt 0" before use.
102 class BlockWaitcntBrackets {
104 static int32_t getWaitCountMax(InstCounterType T) {
107 return HardwareLimits.VmcntMax;
109 return HardwareLimits.LgkmcntMax;
111 return HardwareLimits.ExpcntMax;
118 void setScoreLB(InstCounterType T, int32_t Val) {
119 assert(T < NUM_INST_CNTS);
120 if (T >= NUM_INST_CNTS)
125 void setScoreUB(InstCounterType T, int32_t Val) {
126 assert(T < NUM_INST_CNTS);
127 if (T >= NUM_INST_CNTS)
131 int32_t UB = (int)(ScoreUBs[T] - getWaitCountMax(EXP_CNT));
132 if (ScoreLBs[T] < UB)
137 int32_t getScoreLB(InstCounterType T) {
138 assert(T < NUM_INST_CNTS);
139 if (T >= NUM_INST_CNTS)
144 int32_t getScoreUB(InstCounterType T) {
145 assert(T < NUM_INST_CNTS);
146 if (T >= NUM_INST_CNTS)
151 // Mapping from event to counter.
152 InstCounterType eventCounter(WaitEventType E) {
165 case EXP_PARAM_ACCESS:
168 llvm_unreachable("unhandled event type");
170 return NUM_INST_CNTS;
173 void setRegScore(int GprNo, InstCounterType T, int32_t Val) {
174 if (GprNo < NUM_ALL_VGPRS) {
175 if (GprNo > VgprUB) {
178 VgprScores[T][GprNo] = Val;
180 assert(T == LGKM_CNT);
181 if (GprNo - NUM_ALL_VGPRS > SgprUB) {
182 SgprUB = GprNo - NUM_ALL_VGPRS;
184 SgprScores[GprNo - NUM_ALL_VGPRS] = Val;
188 int32_t getRegScore(int GprNo, InstCounterType T) {
189 if (GprNo < NUM_ALL_VGPRS) {
190 return VgprScores[T][GprNo];
192 return SgprScores[GprNo - NUM_ALL_VGPRS];
196 memset(ScoreLBs, 0, sizeof(ScoreLBs));
197 memset(ScoreUBs, 0, sizeof(ScoreUBs));
198 memset(EventUBs, 0, sizeof(EventUBs));
199 for (enum InstCounterType T = VM_CNT; T < NUM_INST_CNTS;
200 T = (enum InstCounterType)(T + 1)) {
201 memset(VgprScores[T], 0, sizeof(VgprScores[T]));
203 memset(SgprScores, 0, sizeof(SgprScores));
206 RegInterval getRegInterval(const MachineInstr *MI, const SIInstrInfo *TII,
207 const MachineRegisterInfo *MRI,
208 const SIRegisterInfo *TRI, unsigned OpNo,
211 void setExpScore(const MachineInstr *MI, const SIInstrInfo *TII,
212 const SIRegisterInfo *TRI, const MachineRegisterInfo *MRI,
213 unsigned OpNo, int32_t Val);
215 void setWaitAtBeginning() { WaitAtBeginning = true; }
216 void clearWaitAtBeginning() { WaitAtBeginning = false; }
217 bool getWaitAtBeginning() const { return WaitAtBeginning; }
218 void setEventUB(enum WaitEventType W, int32_t Val) { EventUBs[W] = Val; }
219 int32_t getMaxVGPR() const { return VgprUB; }
220 int32_t getMaxSGPR() const { return SgprUB; }
221 int32_t getEventUB(enum WaitEventType W) const {
222 assert(W < NUM_WAIT_EVENTS);
225 bool counterOutOfOrder(InstCounterType T);
226 unsigned int updateByWait(InstCounterType T, int ScoreToWait);
227 void updateByEvent(const SIInstrInfo *TII, const SIRegisterInfo *TRI,
228 const MachineRegisterInfo *MRI, WaitEventType E,
231 BlockWaitcntBrackets()
232 : WaitAtBeginning(false), RevisitLoop(false), ValidLoop(false), MixedExpTypes(false),
233 LoopRegion(NULL), PostOrder(0), Waitcnt(NULL), VgprUB(0), SgprUB(0) {
234 for (enum InstCounterType T = VM_CNT; T < NUM_INST_CNTS;
235 T = (enum InstCounterType)(T + 1)) {
236 memset(VgprScores[T], 0, sizeof(VgprScores[T]));
239 ~BlockWaitcntBrackets(){};
241 bool hasPendingSMEM() const {
242 return (EventUBs[SMEM_ACCESS] > ScoreLBs[LGKM_CNT] &&
243 EventUBs[SMEM_ACCESS] <= ScoreUBs[LGKM_CNT]);
246 bool hasPendingFlat() const {
247 return ((LastFlat[LGKM_CNT] > ScoreLBs[LGKM_CNT] &&
248 LastFlat[LGKM_CNT] <= ScoreUBs[LGKM_CNT]) ||
249 (LastFlat[VM_CNT] > ScoreLBs[VM_CNT] &&
250 LastFlat[VM_CNT] <= ScoreUBs[VM_CNT]));
253 void setPendingFlat() {
254 LastFlat[VM_CNT] = ScoreUBs[VM_CNT];
255 LastFlat[LGKM_CNT] = ScoreUBs[LGKM_CNT];
258 int pendingFlat(InstCounterType Ct) const { return LastFlat[Ct]; }
260 void setLastFlat(InstCounterType Ct, int Val) { LastFlat[Ct] = Val; }
262 bool getRevisitLoop() const { return RevisitLoop; }
263 void setRevisitLoop(bool RevisitLoopIn) { RevisitLoop = RevisitLoopIn; }
265 void setPostOrder(int32_t PostOrderIn) { PostOrder = PostOrderIn; }
266 int32_t getPostOrder() const { return PostOrder; }
268 void setWaitcnt(MachineInstr *WaitcntIn) { Waitcnt = WaitcntIn; }
269 void clearWaitcnt() { Waitcnt = NULL; }
270 MachineInstr *getWaitcnt() const { return Waitcnt; }
272 bool mixedExpTypes() const { return MixedExpTypes; }
273 void setMixedExpTypes(bool MixedExpTypesIn) {
274 MixedExpTypes = MixedExpTypesIn;
277 void print(raw_ostream &);
278 void dump() { print(dbgs()); }
281 bool WaitAtBeginning;
285 MachineLoop *LoopRegion;
287 MachineInstr *Waitcnt;
288 int32_t ScoreLBs[NUM_INST_CNTS] = {0};
289 int32_t ScoreUBs[NUM_INST_CNTS] = {0};
290 int32_t EventUBs[NUM_WAIT_EVENTS] = {0};
291 // Remember the last flat memory operation.
292 int32_t LastFlat[NUM_INST_CNTS] = {0};
293 // wait_cnt scores for every vgpr.
294 // Keep track of the VgprUB and SgprUB to make merge at join efficient.
297 int32_t VgprScores[NUM_INST_CNTS][NUM_ALL_VGPRS];
298 // Wait cnt scores for every sgpr, only lgkmcnt is relevant.
299 int32_t SgprScores[SQ_MAX_PGM_SGPRS] = {0};
302 // This is a per-loop-region object that records waitcnt status at the end of
303 // loop footer from the previous iteration. We also maintain an iteration
304 // count to track the number of times the loop has been visited. When it
305 // doesn't converge naturally, we force convergence by inserting s_waitcnt 0
306 // at the end of the loop footer.
307 class LoopWaitcntData {
309 void incIterCnt() { IterCnt++; }
310 void resetIterCnt() { IterCnt = 0; }
311 int32_t getIterCnt() { return IterCnt; }
313 LoopWaitcntData() : LfWaitcnt(NULL), IterCnt(0) {}
314 ~LoopWaitcntData(){};
316 void setWaitcnt(MachineInstr *WaitcntIn) { LfWaitcnt = WaitcntIn; }
317 MachineInstr *getWaitcnt() const { return LfWaitcnt; }
320 DEBUG(dbgs() << " iteration " << IterCnt << '\n';);
325 // s_waitcnt added at the end of loop footer to stablize wait scores
326 // at the end of the loop footer.
327 MachineInstr *LfWaitcnt;
328 // Number of iterations the loop has been visited, not including the initial
333 class SIInsertWaitcnts : public MachineFunctionPass {
336 const SISubtarget *ST;
337 const SIInstrInfo *TII;
338 const SIRegisterInfo *TRI;
339 const MachineRegisterInfo *MRI;
340 const MachineLoopInfo *MLI;
341 AMDGPU::IsaInfo::IsaVersion IV;
344 DenseSet<MachineBasicBlock *> BlockVisitedSet;
345 DenseSet<MachineInstr *> CompilerGeneratedWaitcntSet;
346 DenseSet<MachineInstr *> VCCZBugHandledSet;
348 DenseMap<MachineBasicBlock *, std::unique_ptr<BlockWaitcntBrackets>>
349 BlockWaitcntBracketsMap;
351 DenseSet<MachineBasicBlock *> BlockWaitcntProcessedSet;
353 DenseMap<MachineLoop *, std::unique_ptr<LoopWaitcntData>> LoopWaitcntDataMap;
355 std::vector<std::unique_ptr<BlockWaitcntBrackets>> KillWaitBrackets;
361 : MachineFunctionPass(ID), ST(nullptr), TII(nullptr), TRI(nullptr),
362 MRI(nullptr), MLI(nullptr) {}
364 bool runOnMachineFunction(MachineFunction &MF) override;
366 StringRef getPassName() const override {
367 return "SI insert wait instructions";
370 void getAnalysisUsage(AnalysisUsage &AU) const override {
371 AU.setPreservesCFG();
372 AU.addRequired<MachineLoopInfo>();
373 MachineFunctionPass::getAnalysisUsage(AU);
376 void addKillWaitBracket(BlockWaitcntBrackets *Bracket) {
377 // The waitcnt information is copied because it changes as the block is
379 KillWaitBrackets.push_back(make_unique<BlockWaitcntBrackets>(*Bracket));
382 MachineInstr *generateSWaitCntInstBefore(MachineInstr &MI,
383 BlockWaitcntBrackets *ScoreBrackets);
384 void updateEventWaitCntAfter(MachineInstr &Inst,
385 BlockWaitcntBrackets *ScoreBrackets);
386 void mergeInputScoreBrackets(MachineBasicBlock &Block);
387 MachineBasicBlock *loopBottom(const MachineLoop *Loop);
388 void insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &Block);
389 void insertWaitcntBeforeCF(MachineBasicBlock &Block, MachineInstr *Inst);
392 } // End anonymous namespace.
394 RegInterval BlockWaitcntBrackets::getRegInterval(const MachineInstr *MI,
395 const SIInstrInfo *TII,
396 const MachineRegisterInfo *MRI,
397 const SIRegisterInfo *TRI,
400 const MachineOperand &Op = MI->getOperand(OpNo);
401 if (!Op.isReg() || !TRI->isInAllocatableClass(Op.getReg()) ||
402 (Def && !Op.isDef()))
405 // A use via a PW operand does not need a waitcnt.
406 // A partial write is not a WAW.
407 assert(!Op.getSubReg() || !Op.isUndef());
410 const MachineRegisterInfo &MRIA = *MRI;
412 unsigned Reg = TRI->getEncodingValue(Op.getReg());
414 if (TRI->isVGPR(MRIA, Op.getReg())) {
415 assert(Reg >= RegisterEncoding.VGPR0 && Reg <= RegisterEncoding.VGPRL);
416 Result.first = Reg - RegisterEncoding.VGPR0;
417 assert(Result.first >= 0 && Result.first < SQ_MAX_PGM_VGPRS);
418 } else if (TRI->isSGPRReg(MRIA, Op.getReg())) {
419 assert(Reg >= RegisterEncoding.SGPR0 && Reg < SQ_MAX_PGM_SGPRS);
420 Result.first = Reg - RegisterEncoding.SGPR0 + NUM_ALL_VGPRS;
421 assert(Result.first >= NUM_ALL_VGPRS &&
422 Result.first < SQ_MAX_PGM_SGPRS + NUM_ALL_VGPRS);
425 // else if (TRI->isTTMP(MRIA, Reg.getReg())) ...
429 const MachineInstr &MIA = *MI;
430 const TargetRegisterClass *RC = TII->getOpRegClass(MIA, OpNo);
431 unsigned Size = TRI->getRegSizeInBits(*RC);
432 Result.second = Result.first + (Size / 32);
437 void BlockWaitcntBrackets::setExpScore(const MachineInstr *MI,
438 const SIInstrInfo *TII,
439 const SIRegisterInfo *TRI,
440 const MachineRegisterInfo *MRI,
441 unsigned OpNo, int32_t Val) {
442 RegInterval Interval = getRegInterval(MI, TII, MRI, TRI, OpNo, false);
444 const MachineOperand &Opnd = MI->getOperand(OpNo);
445 assert(TRI->isVGPR(*MRI, Opnd.getReg()));
447 for (signed RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
448 setRegScore(RegNo, EXP_CNT, Val);
452 void BlockWaitcntBrackets::updateByEvent(const SIInstrInfo *TII,
453 const SIRegisterInfo *TRI,
454 const MachineRegisterInfo *MRI,
455 WaitEventType E, MachineInstr &Inst) {
456 const MachineRegisterInfo &MRIA = *MRI;
457 InstCounterType T = eventCounter(E);
458 int32_t CurrScore = getScoreUB(T) + 1;
459 // EventUB and ScoreUB need to be update regardless if this event changes
460 // the score of a register or not.
461 // Examples including vm_cnt when buffer-store or lgkm_cnt when send-message.
462 EventUBs[E] = CurrScore;
463 setScoreUB(T, CurrScore);
466 // Check for mixed export types. If they are mixed, then a waitcnt exp(0)
468 if (!MixedExpTypes) {
469 MixedExpTypes = counterOutOfOrder(EXP_CNT);
472 // Put score on the source vgprs. If this is a store, just use those
473 // specific register(s).
474 if (TII->isDS(Inst) && (Inst.mayStore() || Inst.mayLoad())) {
475 // All GDS operations must protect their address register (same as
477 if (Inst.getOpcode() != AMDGPU::DS_APPEND &&
478 Inst.getOpcode() != AMDGPU::DS_CONSUME) {
480 &Inst, TII, TRI, MRI,
481 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::addr),
484 if (Inst.mayStore()) {
486 &Inst, TII, TRI, MRI,
487 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data0),
489 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(),
490 AMDGPU::OpName::data1) != -1) {
491 setExpScore(&Inst, TII, TRI, MRI,
492 AMDGPU::getNamedOperandIdx(Inst.getOpcode(),
493 AMDGPU::OpName::data1),
496 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1 &&
497 Inst.getOpcode() != AMDGPU::DS_GWS_INIT &&
498 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_V &&
499 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_BR &&
500 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_P &&
501 Inst.getOpcode() != AMDGPU::DS_GWS_BARRIER &&
502 Inst.getOpcode() != AMDGPU::DS_APPEND &&
503 Inst.getOpcode() != AMDGPU::DS_CONSUME &&
504 Inst.getOpcode() != AMDGPU::DS_ORDERED_COUNT) {
505 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
506 const MachineOperand &Op = Inst.getOperand(I);
507 if (Op.isReg() && !Op.isDef() && TRI->isVGPR(MRIA, Op.getReg())) {
508 setExpScore(&Inst, TII, TRI, MRI, I, CurrScore);
512 } else if (TII->isFLAT(Inst)) {
513 if (Inst.mayStore()) {
515 &Inst, TII, TRI, MRI,
516 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
518 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) {
520 &Inst, TII, TRI, MRI,
521 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
524 } else if (TII->isMIMG(Inst)) {
525 if (Inst.mayStore()) {
526 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
527 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) {
529 &Inst, TII, TRI, MRI,
530 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
533 } else if (TII->isMTBUF(Inst)) {
534 if (Inst.mayStore()) {
535 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
537 } else if (TII->isMUBUF(Inst)) {
538 if (Inst.mayStore()) {
539 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
540 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) {
542 &Inst, TII, TRI, MRI,
543 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
547 if (TII->isEXP(Inst)) {
548 // For export the destination registers are really temps that
549 // can be used as the actual source after export patching, so
550 // we need to treat them like sources and set the EXP_CNT
552 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
553 MachineOperand &DefMO = Inst.getOperand(I);
554 if (DefMO.isReg() && DefMO.isDef() &&
555 TRI->isVGPR(MRIA, DefMO.getReg())) {
556 setRegScore(TRI->getEncodingValue(DefMO.getReg()), EXP_CNT,
561 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
562 MachineOperand &MO = Inst.getOperand(I);
563 if (MO.isReg() && !MO.isDef() && TRI->isVGPR(MRIA, MO.getReg())) {
564 setExpScore(&Inst, TII, TRI, MRI, I, CurrScore);
568 #if 0 // TODO: check if this is handled by MUBUF code above.
569 } else if (Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORD ||
570 Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX2 ||
571 Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX4) {
572 MachineOperand *MO = TII->getNamedOperand(Inst, AMDGPU::OpName::data);
573 unsigned OpNo;//TODO: find the OpNo for this operand;
574 RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, OpNo, false);
575 for (signed RegNo = Interval.first; RegNo < Interval.second;
577 setRegScore(RegNo + NUM_ALL_VGPRS, t, CurrScore);
581 // Match the score to the destination registers.
582 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
583 RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, I, true);
584 if (T == VM_CNT && Interval.first >= NUM_ALL_VGPRS)
586 for (signed RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
587 setRegScore(RegNo, T, CurrScore);
590 if (TII->isDS(Inst) && Inst.mayStore()) {
591 setRegScore(SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS, T, CurrScore);
596 void BlockWaitcntBrackets::print(raw_ostream &OS) {
598 for (enum InstCounterType T = VM_CNT; T < NUM_INST_CNTS;
599 T = (enum InstCounterType)(T + 1)) {
600 int LB = getScoreLB(T);
601 int UB = getScoreUB(T);
605 OS << " VM_CNT(" << UB - LB << "): ";
608 OS << " LGKM_CNT(" << UB - LB << "): ";
611 OS << " EXP_CNT(" << UB - LB << "): ";
614 OS << " UNKNOWN(" << UB - LB << "): ";
619 // Print vgpr scores.
620 for (int J = 0; J <= getMaxVGPR(); J++) {
621 int RegScore = getRegScore(J, T);
624 int RelScore = RegScore - LB - 1;
625 if (J < SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS) {
626 OS << RelScore << ":v" << J << " ";
628 OS << RelScore << ":ds ";
631 // Also need to print sgpr scores for lgkm_cnt.
633 for (int J = 0; J <= getMaxSGPR(); J++) {
634 int RegScore = getRegScore(J + NUM_ALL_VGPRS, LGKM_CNT);
637 int RelScore = RegScore - LB - 1;
638 OS << RelScore << ":s" << J << " ";
648 unsigned int BlockWaitcntBrackets::updateByWait(InstCounterType T,
650 unsigned int NeedWait = 0;
651 if (ScoreToWait == -1) {
652 // The score to wait is unknown. This implies that it was not encountered
653 // during the path of the CFG walk done during the current traversal but
654 // may be seen on a different path. Emit an s_wait counter with a
655 // conservative value of 0 for the counter.
656 NeedWait = CNT_MASK(T);
657 setScoreLB(T, getScoreUB(T));
661 // If the score of src_operand falls within the bracket, we need an
662 // s_waitcnt instruction.
663 const int32_t LB = getScoreLB(T);
664 const int32_t UB = getScoreUB(T);
665 if ((UB >= ScoreToWait) && (ScoreToWait > LB)) {
666 if (T == VM_CNT && hasPendingFlat()) {
667 // If there is a pending FLAT operation, and this is a VM waitcnt,
668 // then we need to force a waitcnt 0 for VM.
669 NeedWait = CNT_MASK(T);
670 setScoreLB(T, getScoreUB(T));
671 } else if (counterOutOfOrder(T)) {
672 // Counter can get decremented out-of-order when there
673 // are multiple types event in the brack. Also emit an s_wait counter
674 // with a conservative value of 0 for the counter.
675 NeedWait = CNT_MASK(T);
676 setScoreLB(T, getScoreUB(T));
678 NeedWait = CNT_MASK(T);
679 setScoreLB(T, ScoreToWait);
686 // Where there are multiple types of event in the bracket of a counter,
687 // the decrement may go out of order.
688 bool BlockWaitcntBrackets::counterOutOfOrder(InstCounterType T) {
693 if (EventUBs[SMEM_ACCESS] > ScoreLBs[LGKM_CNT] &&
694 EventUBs[SMEM_ACCESS] <= ScoreUBs[LGKM_CNT]) {
695 // Scalar memory read always can go out of order.
698 int NumEventTypes = 0;
699 if (EventUBs[LDS_ACCESS] > ScoreLBs[LGKM_CNT] &&
700 EventUBs[LDS_ACCESS] <= ScoreUBs[LGKM_CNT]) {
703 if (EventUBs[GDS_ACCESS] > ScoreLBs[LGKM_CNT] &&
704 EventUBs[GDS_ACCESS] <= ScoreUBs[LGKM_CNT]) {
707 if (EventUBs[SQ_MESSAGE] > ScoreLBs[LGKM_CNT] &&
708 EventUBs[SQ_MESSAGE] <= ScoreUBs[LGKM_CNT]) {
711 if (NumEventTypes <= 1) {
717 // If there has been a mixture of export types, then a waitcnt exp(0) is
721 int NumEventTypes = 0;
722 if (EventUBs[EXP_GPR_LOCK] > ScoreLBs[EXP_CNT] &&
723 EventUBs[EXP_GPR_LOCK] <= ScoreUBs[EXP_CNT]) {
726 if (EventUBs[GDS_GPR_LOCK] > ScoreLBs[EXP_CNT] &&
727 EventUBs[GDS_GPR_LOCK] <= ScoreUBs[EXP_CNT]) {
730 if (EventUBs[VMW_GPR_LOCK] > ScoreLBs[EXP_CNT] &&
731 EventUBs[VMW_GPR_LOCK] <= ScoreUBs[EXP_CNT]) {
734 if (EventUBs[EXP_PARAM_ACCESS] > ScoreLBs[EXP_CNT] &&
735 EventUBs[EXP_PARAM_ACCESS] <= ScoreUBs[EXP_CNT]) {
739 if (EventUBs[EXP_POS_ACCESS] > ScoreLBs[EXP_CNT] &&
740 EventUBs[EXP_POS_ACCESS] <= ScoreUBs[EXP_CNT]) {
744 if (NumEventTypes <= 1) {
755 INITIALIZE_PASS_BEGIN(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false,
757 INITIALIZE_PASS_END(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false,
760 char SIInsertWaitcnts::ID = 0;
762 char &llvm::SIInsertWaitcntsID = SIInsertWaitcnts::ID;
764 FunctionPass *llvm::createSIInsertWaitcntsPass() {
765 return new SIInsertWaitcnts();
768 static bool readsVCCZ(const MachineInstr &MI) {
769 unsigned Opc = MI.getOpcode();
770 return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) &&
771 !MI.getOperand(1).isUndef();
774 /// \brief Generate s_waitcnt instruction to be placed before cur_Inst.
775 /// Instructions of a given type are returned in order,
776 /// but instructions of different types can complete out of order.
777 /// We rely on this in-order completion
778 /// and simply assign a score to the memory access instructions.
779 /// We keep track of the active "score bracket" to determine
780 /// if an access of a memory read requires an s_waitcnt
781 /// and if so what the value of each counter is.
782 /// The "score bracket" is bound by the lower bound and upper bound
783 /// scores (*_score_LB and *_score_ub respectively).
784 MachineInstr *SIInsertWaitcnts::generateSWaitCntInstBefore(
785 MachineInstr &MI, BlockWaitcntBrackets *ScoreBrackets) {
786 // To emit, or not to emit - that's the question!
787 // Start with an assumption that there is no need to emit.
788 unsigned int EmitSwaitcnt = 0;
789 // s_waitcnt instruction to return; default is NULL.
790 MachineInstr *SWaitInst = nullptr;
791 // No need to wait before phi. If a phi-move exists, then the wait should
792 // has been inserted before the move. If a phi-move does not exist, then
793 // wait should be inserted before the real use. The same is true for
794 // sc-merge. It is not a coincident that all these cases correspond to the
795 // instructions that are skipped in the assembling loop.
796 bool NeedLineMapping = false; // TODO: Check on this.
797 if (MI.isDebugValue() &&
798 // TODO: any other opcode?
803 // See if an s_waitcnt is forced at block entry, or is needed at
805 if (ScoreBrackets->getWaitAtBeginning()) {
806 // Note that we have already cleared the state, so we don't need to update
808 ScoreBrackets->clearWaitAtBeginning();
809 for (enum InstCounterType T = VM_CNT; T < NUM_INST_CNTS;
810 T = (enum InstCounterType)(T + 1)) {
811 EmitSwaitcnt |= CNT_MASK(T);
812 ScoreBrackets->setScoreLB(T, ScoreBrackets->getScoreUB(T));
816 // See if this instruction has a forced S_WAITCNT VM.
817 // TODO: Handle other cases of NeedsWaitcntVmBefore()
818 else if (MI.getOpcode() == AMDGPU::BUFFER_WBINVL1 ||
819 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_SC ||
820 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_VOL) {
822 ScoreBrackets->updateByWait(VM_CNT, ScoreBrackets->getScoreUB(VM_CNT));
825 // All waits must be resolved at call return.
826 // NOTE: this could be improved with knowledge of all call sites or
827 // with knowledge of the called routines.
828 if (MI.getOpcode() == AMDGPU::RETURN ||
829 MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG ||
830 MI.getOpcode() == AMDGPU::S_SETPC_B64_return) {
831 for (enum InstCounterType T = VM_CNT; T < NUM_INST_CNTS;
832 T = (enum InstCounterType)(T + 1)) {
833 if (ScoreBrackets->getScoreUB(T) > ScoreBrackets->getScoreLB(T)) {
834 ScoreBrackets->setScoreLB(T, ScoreBrackets->getScoreUB(T));
835 EmitSwaitcnt |= CNT_MASK(T);
839 // Resolve vm waits before gs-done.
840 else if ((MI.getOpcode() == AMDGPU::S_SENDMSG ||
841 MI.getOpcode() == AMDGPU::S_SENDMSGHALT) &&
842 ((MI.getOperand(0).getImm() & AMDGPU::SendMsg::ID_MASK_) ==
843 AMDGPU::SendMsg::ID_GS_DONE)) {
844 if (ScoreBrackets->getScoreUB(VM_CNT) > ScoreBrackets->getScoreLB(VM_CNT)) {
845 ScoreBrackets->setScoreLB(VM_CNT, ScoreBrackets->getScoreUB(VM_CNT));
846 EmitSwaitcnt |= CNT_MASK(VM_CNT);
849 #if 0 // TODO: the following blocks of logic when we have fence.
850 else if (MI.getOpcode() == SC_FENCE) {
851 const unsigned int group_size =
852 context->shader_info->GetMaxThreadGroupSize();
853 // group_size == 0 means thread group size is unknown at compile time
854 const bool group_is_multi_wave =
855 (group_size == 0 || group_size > target_info->GetWaveFrontSize());
856 const bool fence_is_global = !((SCInstInternalMisc*)Inst)->IsGroupFence();
858 for (unsigned int i = 0; i < Inst->NumSrcOperands(); i++) {
859 SCRegType src_type = Inst->GetSrcType(i);
862 if (group_is_multi_wave ||
863 context->OptFlagIsOn(OPT_R1100_LDSMEM_FENCE_CHICKEN_BIT)) {
864 EmitSwaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT,
865 ScoreBrackets->getScoreUB(LGKM_CNT));
866 // LDS may have to wait for VM_CNT after buffer load to LDS
867 if (target_info->HasBufferLoadToLDS()) {
868 EmitSwaitcnt |= ScoreBrackets->updateByWait(VM_CNT,
869 ScoreBrackets->getScoreUB(VM_CNT));
875 if (group_is_multi_wave || fence_is_global) {
876 EmitSwaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
877 ScoreBrackets->getScoreUB(EXP_CNT));
878 EmitSwaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT,
879 ScoreBrackets->getScoreUB(LGKM_CNT));
887 if (group_is_multi_wave || fence_is_global) {
888 EmitSwaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
889 ScoreBrackets->getScoreUB(EXP_CNT));
890 EmitSwaitcnt |= ScoreBrackets->updateByWait(VM_CNT,
891 ScoreBrackets->getScoreUB(VM_CNT));
903 // Export & GDS instructions do not read the EXEC mask until after the export
904 // is granted (which can occur well after the instruction is issued).
905 // The shader program must flush all EXP operations on the export-count
906 // before overwriting the EXEC mask.
908 if (MI.modifiesRegister(AMDGPU::EXEC, TRI)) {
909 // Export and GDS are tracked individually, either may trigger a waitcnt
911 EmitSwaitcnt |= ScoreBrackets->updateByWait(
912 EXP_CNT, ScoreBrackets->getEventUB(EXP_GPR_LOCK));
913 EmitSwaitcnt |= ScoreBrackets->updateByWait(
914 EXP_CNT, ScoreBrackets->getEventUB(EXP_PARAM_ACCESS));
915 EmitSwaitcnt |= ScoreBrackets->updateByWait(
916 EXP_CNT, ScoreBrackets->getEventUB(EXP_POS_ACCESS));
917 EmitSwaitcnt |= ScoreBrackets->updateByWait(
918 EXP_CNT, ScoreBrackets->getEventUB(GDS_GPR_LOCK));
921 #if 0 // TODO: the following code to handle CALL.
922 // The argument passing for CALLs should suffice for VM_CNT and LGKM_CNT.
923 // However, there is a problem with EXP_CNT, because the call cannot
924 // easily tell if a register is used in the function, and if it did, then
925 // the referring instruction would have to have an S_WAITCNT, which is
926 // dependent on all call sites. So Instead, force S_WAITCNT for EXP_CNTs
928 if (MI.getOpcode() == SC_CALL) {
929 if (ScoreBrackets->getScoreUB(EXP_CNT) >
930 ScoreBrackets->getScoreLB(EXP_CNT)) {
931 ScoreBrackets->setScoreLB(EXP_CNT, ScoreBrackets->getScoreUB(EXP_CNT));
932 EmitSwaitcnt |= CNT_MASK(EXP_CNT);
937 // Look at the source operands of every instruction to see if
938 // any of them results from a previous memory operation that affects
939 // its current usage. If so, an s_waitcnt instruction needs to be
941 // If the source operand was defined by a load, add the s_waitcnt
943 for (const MachineMemOperand *Memop : MI.memoperands()) {
944 unsigned AS = Memop->getAddrSpace();
945 if (AS != AMDGPUASI.LOCAL_ADDRESS)
947 unsigned RegNo = SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS;
948 // VM_CNT is only relevant to vgpr or LDS.
949 EmitSwaitcnt |= ScoreBrackets->updateByWait(
950 VM_CNT, ScoreBrackets->getRegScore(RegNo, VM_CNT));
952 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
953 const MachineOperand &Op = MI.getOperand(I);
954 const MachineRegisterInfo &MRIA = *MRI;
955 RegInterval Interval =
956 ScoreBrackets->getRegInterval(&MI, TII, MRI, TRI, I, false);
957 for (signed RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
958 if (TRI->isVGPR(MRIA, Op.getReg())) {
959 // VM_CNT is only relevant to vgpr or LDS.
960 EmitSwaitcnt |= ScoreBrackets->updateByWait(
961 VM_CNT, ScoreBrackets->getRegScore(RegNo, VM_CNT));
963 EmitSwaitcnt |= ScoreBrackets->updateByWait(
964 LGKM_CNT, ScoreBrackets->getRegScore(RegNo, LGKM_CNT));
967 // End of for loop that looks at all source operands to decide vm_wait_cnt
970 // Two cases are handled for destination operands:
971 // 1) If the destination operand was defined by a load, add the s_waitcnt
972 // instruction to guarantee the right WAW order.
973 // 2) If a destination operand that was used by a recent export/store ins,
974 // add s_waitcnt on exp_cnt to guarantee the WAR order.
976 for (const MachineMemOperand *Memop : MI.memoperands()) {
977 unsigned AS = Memop->getAddrSpace();
978 if (AS != AMDGPUASI.LOCAL_ADDRESS)
980 unsigned RegNo = SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS;
981 EmitSwaitcnt |= ScoreBrackets->updateByWait(
982 VM_CNT, ScoreBrackets->getRegScore(RegNo, VM_CNT));
983 EmitSwaitcnt |= ScoreBrackets->updateByWait(
984 EXP_CNT, ScoreBrackets->getRegScore(RegNo, EXP_CNT));
987 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
988 MachineOperand &Def = MI.getOperand(I);
989 const MachineRegisterInfo &MRIA = *MRI;
990 RegInterval Interval =
991 ScoreBrackets->getRegInterval(&MI, TII, MRI, TRI, I, true);
992 for (signed RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
993 if (TRI->isVGPR(MRIA, Def.getReg())) {
994 EmitSwaitcnt |= ScoreBrackets->updateByWait(
995 VM_CNT, ScoreBrackets->getRegScore(RegNo, VM_CNT));
996 EmitSwaitcnt |= ScoreBrackets->updateByWait(
997 EXP_CNT, ScoreBrackets->getRegScore(RegNo, EXP_CNT));
999 EmitSwaitcnt |= ScoreBrackets->updateByWait(
1000 LGKM_CNT, ScoreBrackets->getRegScore(RegNo, LGKM_CNT));
1002 } // End of for loop that looks at all dest operands.
1005 // TODO: Tie force zero to a compiler triage option.
1006 bool ForceZero = false;
1008 // Check to see if this is an S_BARRIER, and if an implicit S_WAITCNT 0
1009 // occurs before the instruction. Doing it here prevents any additional
1010 // S_WAITCNTs from being emitted if the instruction was marked as
1011 // requiring a WAITCNT beforehand.
1012 if (MI.getOpcode() == AMDGPU::S_BARRIER &&
1013 !ST->hasAutoWaitcntBeforeBarrier()) {
1015 ScoreBrackets->updateByWait(VM_CNT, ScoreBrackets->getScoreUB(VM_CNT));
1016 EmitSwaitcnt |= ScoreBrackets->updateByWait(
1017 EXP_CNT, ScoreBrackets->getScoreUB(EXP_CNT));
1018 EmitSwaitcnt |= ScoreBrackets->updateByWait(
1019 LGKM_CNT, ScoreBrackets->getScoreUB(LGKM_CNT));
1022 // TODO: Remove this work-around, enable the assert for Bug 457939
1023 // after fixing the scheduler. Also, the Shader Compiler code is
1024 // independent of target.
1025 if (readsVCCZ(MI) && ST->getGeneration() <= SISubtarget::SEA_ISLANDS) {
1026 if (ScoreBrackets->getScoreLB(LGKM_CNT) <
1027 ScoreBrackets->getScoreUB(LGKM_CNT) &&
1028 ScoreBrackets->hasPendingSMEM()) {
1029 // Wait on everything, not just LGKM. vccz reads usually come from
1030 // terminators, and we always wait on everything at the end of the
1031 // block, so if we only wait on LGKM here, we might end up with
1032 // another s_waitcnt inserted right after this if there are non-LGKM
1033 // instructions still outstanding.
1035 EmitSwaitcnt = true;
1039 // Does this operand processing indicate s_wait counter update?
1041 int CntVal[NUM_INST_CNTS];
1043 bool UseDefaultWaitcntStrategy = true;
1045 // Force all waitcnts to 0.
1046 for (enum InstCounterType T = VM_CNT; T < NUM_INST_CNTS;
1047 T = (enum InstCounterType)(T + 1)) {
1048 ScoreBrackets->setScoreLB(T, ScoreBrackets->getScoreUB(T));
1051 CntVal[EXP_CNT] = 0;
1052 CntVal[LGKM_CNT] = 0;
1053 UseDefaultWaitcntStrategy = false;
1056 if (UseDefaultWaitcntStrategy) {
1057 for (enum InstCounterType T = VM_CNT; T < NUM_INST_CNTS;
1058 T = (enum InstCounterType)(T + 1)) {
1059 if (EmitSwaitcnt & CNT_MASK(T)) {
1061 ScoreBrackets->getScoreUB(T) - ScoreBrackets->getScoreLB(T);
1062 int MaxDelta = ScoreBrackets->getWaitCountMax(T);
1063 if (Delta >= MaxDelta) {
1066 ScoreBrackets->setScoreLB(
1067 T, ScoreBrackets->getScoreUB(T) - MaxDelta);
1069 EmitSwaitcnt &= ~CNT_MASK(T);
1073 // If we are not waiting for a particular counter then encode
1074 // it as -1 which means "don't care."
1080 // If we are not waiting on any counter we can skip the wait altogether.
1081 if (EmitSwaitcnt != 0) {
1082 MachineInstr *OldWaitcnt = ScoreBrackets->getWaitcnt();
1083 int Imm = (!OldWaitcnt) ? 0 : OldWaitcnt->getOperand(0).getImm();
1084 if (!OldWaitcnt || (AMDGPU::decodeVmcnt(IV, Imm) !=
1085 (CntVal[VM_CNT] & AMDGPU::getVmcntBitMask(IV))) ||
1086 (AMDGPU::decodeExpcnt(IV, Imm) !=
1087 (CntVal[EXP_CNT] & AMDGPU::getExpcntBitMask(IV))) ||
1088 (AMDGPU::decodeLgkmcnt(IV, Imm) !=
1089 (CntVal[LGKM_CNT] & AMDGPU::getLgkmcntBitMask(IV)))) {
1090 MachineLoop *ContainingLoop = MLI->getLoopFor(MI.getParent());
1091 if (ContainingLoop) {
1092 MachineBasicBlock *TBB = ContainingLoop->getHeader();
1093 BlockWaitcntBrackets *ScoreBracket =
1094 BlockWaitcntBracketsMap[TBB].get();
1095 if (!ScoreBracket) {
1096 assert(BlockVisitedSet.find(TBB) == BlockVisitedSet.end());
1097 BlockWaitcntBracketsMap[TBB] = make_unique<BlockWaitcntBrackets>();
1098 ScoreBracket = BlockWaitcntBracketsMap[TBB].get();
1100 ScoreBracket->setRevisitLoop(true);
1101 DEBUG(dbgs() << "set-revisit: block"
1102 << ContainingLoop->getHeader()->getNumber() << '\n';);
1106 // Update an existing waitcount, or make a new one.
1107 MachineFunction &MF = *MI.getParent()->getParent();
1108 if (OldWaitcnt && OldWaitcnt->getOpcode() != AMDGPU::S_WAITCNT) {
1109 SWaitInst = OldWaitcnt;
1111 SWaitInst = MF.CreateMachineInstr(TII->get(AMDGPU::S_WAITCNT),
1113 CompilerGeneratedWaitcntSet.insert(SWaitInst);
1116 const MachineOperand &Op =
1117 MachineOperand::CreateImm(AMDGPU::encodeWaitcnt(
1118 IV, CntVal[VM_CNT], CntVal[EXP_CNT], CntVal[LGKM_CNT]));
1119 SWaitInst->addOperand(MF, Op);
1121 if (CntVal[EXP_CNT] == 0) {
1122 ScoreBrackets->setMixedExpTypes(false);
1130 void SIInsertWaitcnts::insertWaitcntBeforeCF(MachineBasicBlock &MBB,
1131 MachineInstr *Waitcnt) {
1133 MBB.push_back(Waitcnt);
1137 MachineBasicBlock::iterator It = MBB.end();
1138 MachineInstr *MI = &*(--It);
1139 if (MI->isBranch()) {
1140 MBB.insert(It, Waitcnt);
1142 MBB.push_back(Waitcnt);
1148 void SIInsertWaitcnts::updateEventWaitCntAfter(
1149 MachineInstr &Inst, BlockWaitcntBrackets *ScoreBrackets) {
1150 // Now look at the instruction opcode. If it is a memory access
1151 // instruction, update the upper-bound of the appropriate counter's
1152 // bracket and the destination operand scores.
1153 // TODO: Use the (TSFlags & SIInstrFlags::LGKM_CNT) property everywhere.
1154 uint64_t TSFlags = Inst.getDesc().TSFlags;
1155 if (TII->isDS(Inst) && (TSFlags & SIInstrFlags::LGKM_CNT)) {
1156 if (TII->getNamedOperand(Inst, AMDGPU::OpName::gds) &&
1157 TII->getNamedOperand(Inst, AMDGPU::OpName::gds)->getImm() != 0) {
1158 ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_ACCESS, Inst);
1159 ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_GPR_LOCK, Inst);
1161 ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
1163 } else if (TII->isFLAT(Inst)) {
1164 assert(Inst.mayLoad() || Inst.mayStore());
1165 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst);
1166 ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
1168 // This is a flat memory operation. Check to see if it has memory
1169 // tokens for both LDS and Memory, and if so mark it as a flat.
1170 bool FoundLDSMem = false;
1171 for (const MachineMemOperand *Memop : Inst.memoperands()) {
1172 unsigned AS = Memop->getAddrSpace();
1173 if (AS == AMDGPUASI.LOCAL_ADDRESS || AS == AMDGPUASI.FLAT_ADDRESS)
1177 // This is a flat memory operation, so note it - it will require
1178 // that both the VM and LGKM be flushed to zero if it is pending when
1179 // a VM or LGKM dependency occurs.
1181 ScoreBrackets->setPendingFlat();
1183 } else if (SIInstrInfo::isVMEM(Inst) &&
1184 // TODO: get a better carve out.
1185 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1 &&
1186 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_SC &&
1187 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_VOL) {
1188 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst);
1189 if ( // TODO: assumed yes -- target_info->MemWriteNeedsExpWait() &&
1190 (Inst.mayStore() || AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1)) {
1191 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMW_GPR_LOCK, Inst);
1193 } else if (TII->isSMRD(Inst)) {
1194 ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst);
1196 switch (Inst.getOpcode()) {
1197 case AMDGPU::S_SENDMSG:
1198 case AMDGPU::S_SENDMSGHALT:
1199 ScoreBrackets->updateByEvent(TII, TRI, MRI, SQ_MESSAGE, Inst);
1202 case AMDGPU::EXP_DONE: {
1203 int Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm();
1204 if (Imm >= 32 && Imm <= 63)
1205 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_PARAM_ACCESS, Inst);
1206 else if (Imm >= 12 && Imm <= 15)
1207 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_POS_ACCESS, Inst);
1209 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_GPR_LOCK, Inst);
1212 case AMDGPU::S_MEMTIME:
1213 case AMDGPU::S_MEMREALTIME:
1214 ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst);
1222 void SIInsertWaitcnts::mergeInputScoreBrackets(MachineBasicBlock &Block) {
1223 BlockWaitcntBrackets *ScoreBrackets = BlockWaitcntBracketsMap[&Block].get();
1224 int32_t MaxPending[NUM_INST_CNTS] = {0};
1225 int32_t MaxFlat[NUM_INST_CNTS] = {0};
1226 bool MixedExpTypes = false;
1228 // Clear the score bracket state.
1229 ScoreBrackets->clear();
1231 // Compute the number of pending elements on block entry.
1233 // IMPORTANT NOTE: If iterative handling of loops is added, the code will
1234 // need to handle single BBs with backedges to themselves. This means that
1235 // they will need to retain and not clear their initial state.
1237 // See if there are any uninitialized predecessors. If so, emit an
1238 // s_waitcnt 0 at the beginning of the block.
1239 for (MachineBasicBlock *pred : Block.predecessors()) {
1240 BlockWaitcntBrackets *PredScoreBrackets =
1241 BlockWaitcntBracketsMap[pred].get();
1242 bool Visited = BlockVisitedSet.find(pred) != BlockVisitedSet.end();
1243 if (!Visited || PredScoreBrackets->getWaitAtBeginning()) {
1246 for (enum InstCounterType T = VM_CNT; T < NUM_INST_CNTS;
1247 T = (enum InstCounterType)(T + 1)) {
1249 PredScoreBrackets->getScoreUB(T) - PredScoreBrackets->getScoreLB(T);
1250 MaxPending[T] = std::max(MaxPending[T], span);
1252 PredScoreBrackets->pendingFlat(T) - PredScoreBrackets->getScoreLB(T);
1253 MaxFlat[T] = std::max(MaxFlat[T], span);
1256 MixedExpTypes |= PredScoreBrackets->mixedExpTypes();
1259 // TODO: Is SC Block->IsMainExit() same as Block.succ_empty()?
1260 // Also handle kills for exit block.
1261 if (Block.succ_empty() && !KillWaitBrackets.empty()) {
1262 for (unsigned int I = 0; I < KillWaitBrackets.size(); I++) {
1263 for (enum InstCounterType T = VM_CNT; T < NUM_INST_CNTS;
1264 T = (enum InstCounterType)(T + 1)) {
1265 int Span = KillWaitBrackets[I]->getScoreUB(T) -
1266 KillWaitBrackets[I]->getScoreLB(T);
1267 MaxPending[T] = std::max(MaxPending[T], Span);
1268 Span = KillWaitBrackets[I]->pendingFlat(T) -
1269 KillWaitBrackets[I]->getScoreLB(T);
1270 MaxFlat[T] = std::max(MaxFlat[T], Span);
1273 MixedExpTypes |= KillWaitBrackets[I]->mixedExpTypes();
1277 // Special handling for GDS_GPR_LOCK and EXP_GPR_LOCK.
1278 for (MachineBasicBlock *Pred : Block.predecessors()) {
1279 BlockWaitcntBrackets *PredScoreBrackets =
1280 BlockWaitcntBracketsMap[Pred].get();
1281 bool Visited = BlockVisitedSet.find(Pred) != BlockVisitedSet.end();
1282 if (!Visited || PredScoreBrackets->getWaitAtBeginning()) {
1286 int GDSSpan = PredScoreBrackets->getEventUB(GDS_GPR_LOCK) -
1287 PredScoreBrackets->getScoreLB(EXP_CNT);
1288 MaxPending[EXP_CNT] = std::max(MaxPending[EXP_CNT], GDSSpan);
1289 int EXPSpan = PredScoreBrackets->getEventUB(EXP_GPR_LOCK) -
1290 PredScoreBrackets->getScoreLB(EXP_CNT);
1291 MaxPending[EXP_CNT] = std::max(MaxPending[EXP_CNT], EXPSpan);
1294 // TODO: Is SC Block->IsMainExit() same as Block.succ_empty()?
1295 if (Block.succ_empty() && !KillWaitBrackets.empty()) {
1296 for (unsigned int I = 0; I < KillWaitBrackets.size(); I++) {
1297 int GDSSpan = KillWaitBrackets[I]->getEventUB(GDS_GPR_LOCK) -
1298 KillWaitBrackets[I]->getScoreLB(EXP_CNT);
1299 MaxPending[EXP_CNT] = std::max(MaxPending[EXP_CNT], GDSSpan);
1300 int EXPSpan = KillWaitBrackets[I]->getEventUB(EXP_GPR_LOCK) -
1301 KillWaitBrackets[I]->getScoreLB(EXP_CNT);
1302 MaxPending[EXP_CNT] = std::max(MaxPending[EXP_CNT], EXPSpan);
1307 // LC does not (unlike) add a waitcnt at beginning. Leaving it as marker.
1308 // TODO: how does LC distinguish between function entry and main entry?
1309 // If this is the entry to a function, force a wait.
1310 MachineBasicBlock &Entry = Block.getParent()->front();
1311 if (Entry.getNumber() == Block.getNumber()) {
1312 ScoreBrackets->setWaitAtBeginning();
1317 // Now set the current Block's brackets to the largest ending bracket.
1318 for (enum InstCounterType T = VM_CNT; T < NUM_INST_CNTS;
1319 T = (enum InstCounterType)(T + 1)) {
1320 ScoreBrackets->setScoreUB(T, MaxPending[T]);
1321 ScoreBrackets->setScoreLB(T, 0);
1322 ScoreBrackets->setLastFlat(T, MaxFlat[T]);
1325 ScoreBrackets->setMixedExpTypes(MixedExpTypes);
1327 // Set the register scoreboard.
1328 for (MachineBasicBlock *Pred : Block.predecessors()) {
1329 if (BlockVisitedSet.find(Pred) == BlockVisitedSet.end()) {
1333 BlockWaitcntBrackets *PredScoreBrackets =
1334 BlockWaitcntBracketsMap[Pred].get();
1336 // Now merge the gpr_reg_score information
1337 for (enum InstCounterType T = VM_CNT; T < NUM_INST_CNTS;
1338 T = (enum InstCounterType)(T + 1)) {
1339 int PredLB = PredScoreBrackets->getScoreLB(T);
1340 int PredUB = PredScoreBrackets->getScoreUB(T);
1341 if (PredLB < PredUB) {
1342 int PredScale = MaxPending[T] - PredUB;
1343 // Merge vgpr scores.
1344 for (int J = 0; J <= PredScoreBrackets->getMaxVGPR(); J++) {
1345 int PredRegScore = PredScoreBrackets->getRegScore(J, T);
1346 if (PredRegScore <= PredLB)
1348 int NewRegScore = PredScale + PredRegScore;
1349 ScoreBrackets->setRegScore(
1350 J, T, std::max(ScoreBrackets->getRegScore(J, T), NewRegScore));
1352 // Also need to merge sgpr scores for lgkm_cnt.
1353 if (T == LGKM_CNT) {
1354 for (int J = 0; J <= PredScoreBrackets->getMaxSGPR(); J++) {
1356 PredScoreBrackets->getRegScore(J + NUM_ALL_VGPRS, LGKM_CNT);
1357 if (PredRegScore <= PredLB)
1359 int NewRegScore = PredScale + PredRegScore;
1360 ScoreBrackets->setRegScore(
1361 J + NUM_ALL_VGPRS, LGKM_CNT,
1363 ScoreBrackets->getRegScore(J + NUM_ALL_VGPRS, LGKM_CNT),
1370 // Also merge the WaitEvent information.
1371 ForAllWaitEventType(W) {
1372 enum InstCounterType T = PredScoreBrackets->eventCounter(W);
1373 int PredEventUB = PredScoreBrackets->getEventUB(W);
1374 if (PredEventUB > PredScoreBrackets->getScoreLB(T)) {
1376 MaxPending[T] + PredEventUB - PredScoreBrackets->getScoreUB(T);
1377 if (NewEventUB > 0) {
1378 ScoreBrackets->setEventUB(
1379 W, std::max(ScoreBrackets->getEventUB(W), NewEventUB));
1385 // TODO: Is SC Block->IsMainExit() same as Block.succ_empty()?
1386 // Set the register scoreboard.
1387 if (Block.succ_empty() && !KillWaitBrackets.empty()) {
1388 for (unsigned int I = 0; I < KillWaitBrackets.size(); I++) {
1389 // Now merge the gpr_reg_score information.
1390 for (enum InstCounterType T = VM_CNT; T < NUM_INST_CNTS;
1391 T = (enum InstCounterType)(T + 1)) {
1392 int PredLB = KillWaitBrackets[I]->getScoreLB(T);
1393 int PredUB = KillWaitBrackets[I]->getScoreUB(T);
1394 if (PredLB < PredUB) {
1395 int PredScale = MaxPending[T] - PredUB;
1396 // Merge vgpr scores.
1397 for (int J = 0; J <= KillWaitBrackets[I]->getMaxVGPR(); J++) {
1398 int PredRegScore = KillWaitBrackets[I]->getRegScore(J, T);
1399 if (PredRegScore <= PredLB)
1401 int NewRegScore = PredScale + PredRegScore;
1402 ScoreBrackets->setRegScore(
1403 J, T, std::max(ScoreBrackets->getRegScore(J, T), NewRegScore));
1405 // Also need to merge sgpr scores for lgkm_cnt.
1406 if (T == LGKM_CNT) {
1407 for (int J = 0; J <= KillWaitBrackets[I]->getMaxSGPR(); J++) {
1409 KillWaitBrackets[I]->getRegScore(J + NUM_ALL_VGPRS, LGKM_CNT);
1410 if (PredRegScore <= PredLB)
1412 int NewRegScore = PredScale + PredRegScore;
1413 ScoreBrackets->setRegScore(
1414 J + NUM_ALL_VGPRS, LGKM_CNT,
1416 ScoreBrackets->getRegScore(J + NUM_ALL_VGPRS, LGKM_CNT),
1423 // Also merge the WaitEvent information.
1424 ForAllWaitEventType(W) {
1425 enum InstCounterType T = KillWaitBrackets[I]->eventCounter(W);
1426 int PredEventUB = KillWaitBrackets[I]->getEventUB(W);
1427 if (PredEventUB > KillWaitBrackets[I]->getScoreLB(T)) {
1429 MaxPending[T] + PredEventUB - KillWaitBrackets[I]->getScoreUB(T);
1430 if (NewEventUB > 0) {
1431 ScoreBrackets->setEventUB(
1432 W, std::max(ScoreBrackets->getEventUB(W), NewEventUB));
1439 // Special case handling of GDS_GPR_LOCK and EXP_GPR_LOCK. Merge this for the
1440 // sequencing predecessors, because changes to EXEC require waitcnts due to
1441 // the delayed nature of these operations.
1442 for (MachineBasicBlock *Pred : Block.predecessors()) {
1443 if (BlockVisitedSet.find(Pred) == BlockVisitedSet.end()) {
1447 BlockWaitcntBrackets *PredScoreBrackets =
1448 BlockWaitcntBracketsMap[Pred].get();
1450 int pred_gds_ub = PredScoreBrackets->getEventUB(GDS_GPR_LOCK);
1451 if (pred_gds_ub > PredScoreBrackets->getScoreLB(EXP_CNT)) {
1452 int new_gds_ub = MaxPending[EXP_CNT] + pred_gds_ub -
1453 PredScoreBrackets->getScoreUB(EXP_CNT);
1454 if (new_gds_ub > 0) {
1455 ScoreBrackets->setEventUB(
1457 std::max(ScoreBrackets->getEventUB(GDS_GPR_LOCK), new_gds_ub));
1460 int pred_exp_ub = PredScoreBrackets->getEventUB(EXP_GPR_LOCK);
1461 if (pred_exp_ub > PredScoreBrackets->getScoreLB(EXP_CNT)) {
1462 int new_exp_ub = MaxPending[EXP_CNT] + pred_exp_ub -
1463 PredScoreBrackets->getScoreUB(EXP_CNT);
1464 if (new_exp_ub > 0) {
1465 ScoreBrackets->setEventUB(
1467 std::max(ScoreBrackets->getEventUB(EXP_GPR_LOCK), new_exp_ub));
1473 /// Return the "bottom" block of a loop. This differs from
1474 /// MachineLoop::getBottomBlock in that it works even if the loop is
1476 MachineBasicBlock *SIInsertWaitcnts::loopBottom(const MachineLoop *Loop) {
1477 MachineBasicBlock *Bottom = Loop->getHeader();
1478 for (MachineBasicBlock *MBB : Loop->blocks())
1479 if (MBB->getNumber() > Bottom->getNumber())
1484 // Generate s_waitcnt instructions where needed.
1485 void SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
1486 MachineBasicBlock &Block) {
1487 // Initialize the state information.
1488 mergeInputScoreBrackets(Block);
1490 BlockWaitcntBrackets *ScoreBrackets = BlockWaitcntBracketsMap[&Block].get();
1493 dbgs() << "Block" << Block.getNumber();
1494 ScoreBrackets->dump();
1497 bool InsertNOP = false;
1499 // Walk over the instructions.
1500 for (MachineBasicBlock::iterator Iter = Block.begin(), E = Block.end();
1502 MachineInstr &Inst = *Iter;
1503 // Remove any previously existing waitcnts.
1504 if (Inst.getOpcode() == AMDGPU::S_WAITCNT) {
1505 // TODO: Register the old waitcnt and optimize the following waitcnts.
1506 // Leaving the previously existing waitcnts is conservatively correct.
1507 if (CompilerGeneratedWaitcntSet.find(&Inst) ==
1508 CompilerGeneratedWaitcntSet.end())
1511 ScoreBrackets->setWaitcnt(&Inst);
1513 Inst.removeFromParent();
1518 // Kill instructions generate a conditional branch to the endmain block.
1519 // Merge the current waitcnt state into the endmain block information.
1520 // TODO: Are there other flavors of KILL instruction?
1521 if (Inst.getOpcode() == AMDGPU::KILL) {
1522 addKillWaitBracket(ScoreBrackets);
1525 bool VCCZBugWorkAround = false;
1526 if (readsVCCZ(Inst) &&
1527 (VCCZBugHandledSet.find(&Inst) == VCCZBugHandledSet.end())) {
1528 if (ScoreBrackets->getScoreLB(LGKM_CNT) <
1529 ScoreBrackets->getScoreUB(LGKM_CNT) &&
1530 ScoreBrackets->hasPendingSMEM()) {
1531 if (ST->getGeneration() <= SISubtarget::SEA_ISLANDS)
1532 VCCZBugWorkAround = true;
1536 // Generate an s_waitcnt instruction to be placed before
1537 // cur_Inst, if needed.
1538 MachineInstr *SWaitInst = generateSWaitCntInstBefore(Inst, ScoreBrackets);
1541 Block.insert(Inst, SWaitInst);
1542 if (ScoreBrackets->getWaitcnt() != SWaitInst) {
1543 DEBUG(dbgs() << "insertWaitcntInBlock\n"
1544 << "Old Instr: " << Inst << '\n'
1545 << "New Instr: " << *SWaitInst << '\n';);
1549 updateEventWaitCntAfter(Inst, ScoreBrackets);
1551 #if 0 // TODO: implement resource type check controlled by options with ub = LB.
1552 // If this instruction generates a S_SETVSKIP because it is an
1553 // indexed resource, and we are on Tahiti, then it will also force
1554 // an S_WAITCNT vmcnt(0)
1555 if (RequireCheckResourceType(Inst, context)) {
1556 // Force the score to as if an S_WAITCNT vmcnt(0) is emitted.
1557 ScoreBrackets->setScoreLB(VM_CNT,
1558 ScoreBrackets->getScoreUB(VM_CNT));
1562 ScoreBrackets->clearWaitcnt();
1565 DEBUG({ SWaitInst->print(dbgs() << '\n'); });
1569 ScoreBrackets->dump();
1572 // Check to see if this is a GWS instruction. If so, and if this is CI or
1573 // VI, then the generated code sequence will include an S_WAITCNT 0.
1574 // TODO: Are these the only GWS instructions?
1575 if (Inst.getOpcode() == AMDGPU::DS_GWS_INIT ||
1576 Inst.getOpcode() == AMDGPU::DS_GWS_SEMA_V ||
1577 Inst.getOpcode() == AMDGPU::DS_GWS_SEMA_BR ||
1578 Inst.getOpcode() == AMDGPU::DS_GWS_SEMA_P ||
1579 Inst.getOpcode() == AMDGPU::DS_GWS_BARRIER) {
1580 // TODO: && context->target_info->GwsRequiresMemViolTest() ) {
1581 ScoreBrackets->updateByWait(VM_CNT, ScoreBrackets->getScoreUB(VM_CNT));
1582 ScoreBrackets->updateByWait(EXP_CNT, ScoreBrackets->getScoreUB(EXP_CNT));
1583 ScoreBrackets->updateByWait(LGKM_CNT,
1584 ScoreBrackets->getScoreUB(LGKM_CNT));
1587 // TODO: Remove this work-around after fixing the scheduler and enable the
1589 if (VCCZBugWorkAround) {
1590 // Restore the vccz bit. Any time a value is written to vcc, the vcc
1591 // bit is updated, so we can restore the bit by reading the value of
1592 // vcc and then writing it back to the register.
1593 BuildMI(Block, Inst, Inst.getDebugLoc(), TII->get(AMDGPU::S_MOV_B64),
1595 .addReg(AMDGPU::VCC);
1596 VCCZBugHandledSet.insert(&Inst);
1599 if (ST->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
1601 // This avoids a s_nop after a waitcnt has just been inserted.
1602 if (!SWaitInst && InsertNOP) {
1603 BuildMI(Block, Inst, DebugLoc(), TII->get(AMDGPU::S_NOP)).addImm(0);
1607 // Any occurrence of consecutive VMEM or SMEM instructions forms a VMEM
1608 // or SMEM clause, respectively.
1610 // The temporary workaround is to break the clauses with S_NOP.
1612 // The proper solution would be to allocate registers such that all source
1613 // and destination registers don't overlap, e.g. this is illegal:
1616 bool IsSMEM = false;
1617 bool IsVMEM = false;
1618 if (TII->isSMRD(Inst))
1620 else if (TII->usesVM_CNT(Inst))
1627 MachineInstr &Next = *Iter;
1629 // TODO: How about consecutive SMEM instructions?
1630 // The comments above says break the clause but the code does not.
1631 // if ((TII->isSMRD(next) && isSMEM) ||
1632 if (!IsSMEM && TII->usesVM_CNT(Next) && IsVMEM &&
1633 // TODO: Enable this check when hasSoftClause is upstreamed.
1634 // ST->hasSoftClauses() &&
1635 ST->isXNACKEnabled()) {
1636 // Insert a NOP to break the clause.
1641 // There must be "S_NOP 0" between an instruction writing M0 and
1643 if ((Next.getOpcode() == AMDGPU::S_SENDMSG ||
1644 Next.getOpcode() == AMDGPU::S_SENDMSGHALT) &&
1645 Inst.definesRegister(AMDGPU::M0))
1654 // Check if we need to force convergence at loop footer.
1655 MachineLoop *ContainingLoop = MLI->getLoopFor(&Block);
1656 if (ContainingLoop && loopBottom(ContainingLoop) == &Block) {
1657 LoopWaitcntData *WaitcntData = LoopWaitcntDataMap[ContainingLoop].get();
1658 WaitcntData->print();
1659 DEBUG(dbgs() << '\n';);
1661 // The iterative waitcnt insertion algorithm aims for optimal waitcnt
1662 // placement and doesn't always guarantee convergence for a loop. Each
1663 // loop should take at most 2 iterations for it to converge naturally.
1664 // When this max is reached and result doesn't converge, we force
1665 // convergence by inserting a s_waitcnt at the end of loop footer.
1666 if (WaitcntData->getIterCnt() > 2) {
1667 // To ensure convergence, need to make wait events at loop footer be no
1668 // more than those from the previous iteration.
1669 // As a simplification, Instead of tracking individual scores and
1670 // generate the precise wait count, just wait on 0.
1671 bool HasPending = false;
1672 MachineInstr *SWaitInst = WaitcntData->getWaitcnt();
1673 for (enum InstCounterType T = VM_CNT; T < NUM_INST_CNTS;
1674 T = (enum InstCounterType)(T + 1)) {
1675 if (ScoreBrackets->getScoreUB(T) > ScoreBrackets->getScoreLB(T)) {
1676 ScoreBrackets->setScoreLB(T, ScoreBrackets->getScoreUB(T));
1683 SWaitInst = Block.getParent()->CreateMachineInstr(
1684 TII->get(AMDGPU::S_WAITCNT), DebugLoc());
1685 CompilerGeneratedWaitcntSet.insert(SWaitInst);
1686 const MachineOperand &Op = MachineOperand::CreateImm(0);
1687 SWaitInst->addOperand(MF, Op);
1688 #if 0 // TODO: Format the debug output
1689 OutputTransformBanner("insertWaitcntInBlock",0,"Create:",context);
1690 OutputTransformAdd(SWaitInst, context);
1694 _DEV( REPORTED_STATS->force_waitcnt_converge = 1; )
1700 SWaitInst->print(dbgs());
1701 dbgs() << "\nAdjusted score board:";
1702 ScoreBrackets->dump();
1705 // Add this waitcnt to the block. It is either newly created or
1706 // created in previous iterations and added back since block traversal
1707 // always remove waitcnt.
1708 insertWaitcntBeforeCF(Block, SWaitInst);
1709 WaitcntData->setWaitcnt(SWaitInst);
1715 bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) {
1716 ST = &MF.getSubtarget<SISubtarget>();
1717 TII = ST->getInstrInfo();
1718 TRI = &TII->getRegisterInfo();
1719 MRI = &MF.getRegInfo();
1720 MLI = &getAnalysis<MachineLoopInfo>();
1721 IV = AMDGPU::IsaInfo::getIsaVersion(ST->getFeatureBits());
1722 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1723 AMDGPUASI = ST->getAMDGPUAS();
1725 HardwareLimits.VmcntMax = AMDGPU::getVmcntBitMask(IV);
1726 HardwareLimits.ExpcntMax = AMDGPU::getExpcntBitMask(IV);
1727 HardwareLimits.LgkmcntMax = AMDGPU::getLgkmcntBitMask(IV);
1729 HardwareLimits.NumVGPRsMax = ST->getAddressableNumVGPRs();
1730 HardwareLimits.NumSGPRsMax = ST->getAddressableNumSGPRs();
1731 assert(HardwareLimits.NumVGPRsMax <= SQ_MAX_PGM_VGPRS);
1732 assert(HardwareLimits.NumSGPRsMax <= SQ_MAX_PGM_SGPRS);
1734 RegisterEncoding.VGPR0 = TRI->getEncodingValue(AMDGPU::VGPR0);
1735 RegisterEncoding.VGPRL =
1736 RegisterEncoding.VGPR0 + HardwareLimits.NumVGPRsMax - 1;
1737 RegisterEncoding.SGPR0 = TRI->getEncodingValue(AMDGPU::SGPR0);
1738 RegisterEncoding.SGPRL =
1739 RegisterEncoding.SGPR0 + HardwareLimits.NumSGPRsMax - 1;
1741 // Walk over the blocks in reverse post-dominator order, inserting
1742 // s_waitcnt where needed.
1743 ReversePostOrderTraversal<MachineFunction *> RPOT(&MF);
1744 bool Modified = false;
1745 for (ReversePostOrderTraversal<MachineFunction *>::rpo_iterator
1747 E = RPOT.end(), J = RPOT.begin();
1749 MachineBasicBlock &MBB = **I;
1751 BlockVisitedSet.insert(&MBB);
1753 BlockWaitcntBrackets *ScoreBrackets = BlockWaitcntBracketsMap[&MBB].get();
1754 if (!ScoreBrackets) {
1755 BlockWaitcntBracketsMap[&MBB] = make_unique<BlockWaitcntBrackets>();
1756 ScoreBrackets = BlockWaitcntBracketsMap[&MBB].get();
1758 ScoreBrackets->setPostOrder(MBB.getNumber());
1759 MachineLoop *ContainingLoop = MLI->getLoopFor(&MBB);
1760 if (ContainingLoop && LoopWaitcntDataMap[ContainingLoop] == nullptr)
1761 LoopWaitcntDataMap[ContainingLoop] = make_unique<LoopWaitcntData>();
1763 // If we are walking into the block from before the loop, then guarantee
1764 // at least 1 re-walk over the loop to propagate the information, even if
1765 // no S_WAITCNT instructions were generated.
1766 if (ContainingLoop && ContainingLoop->getHeader() == &MBB && J < I &&
1767 (BlockWaitcntProcessedSet.find(&MBB) ==
1768 BlockWaitcntProcessedSet.end())) {
1769 BlockWaitcntBracketsMap[&MBB]->setRevisitLoop(true);
1770 DEBUG(dbgs() << "set-revisit: block"
1771 << ContainingLoop->getHeader()->getNumber() << '\n';);
1774 // Walk over the instructions.
1775 insertWaitcntInBlock(MF, MBB);
1777 // Flag that waitcnts have been processed at least once.
1778 BlockWaitcntProcessedSet.insert(&MBB);
1780 // See if we want to revisit the loop.
1781 if (ContainingLoop && loopBottom(ContainingLoop) == &MBB) {
1782 MachineBasicBlock *EntryBB = ContainingLoop->getHeader();
1783 BlockWaitcntBrackets *EntrySB = BlockWaitcntBracketsMap[EntryBB].get();
1784 if (EntrySB && EntrySB->getRevisitLoop()) {
1785 EntrySB->setRevisitLoop(false);
1787 int32_t PostOrder = EntrySB->getPostOrder();
1788 // TODO: Avoid this loop. Find another way to set I.
1789 for (ReversePostOrderTraversal<MachineFunction *>::rpo_iterator
1793 MachineBasicBlock &MBBX = **X;
1794 if (MBBX.getNumber() == PostOrder) {
1799 LoopWaitcntData *WaitcntData = LoopWaitcntDataMap[ContainingLoop].get();
1800 WaitcntData->incIterCnt();
1801 DEBUG(dbgs() << "revisit: block" << EntryBB->getNumber() << '\n';);
1804 LoopWaitcntData *WaitcntData = LoopWaitcntDataMap[ContainingLoop].get();
1805 // Loop converged, reset iteration count. If this loop gets revisited,
1806 // it must be from an outer loop, the counter will restart, this will
1807 // ensure we don't force convergence on such revisits.
1808 WaitcntData->resetIterCnt();
1816 SmallVector<MachineBasicBlock *, 4> EndPgmBlocks;
1818 bool HaveScalarStores = false;
1820 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE;
1823 MachineBasicBlock &MBB = *BI;
1825 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;
1828 if (!HaveScalarStores && TII->isScalarStore(*I))
1829 HaveScalarStores = true;
1831 if (I->getOpcode() == AMDGPU::S_ENDPGM ||
1832 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG)
1833 EndPgmBlocks.push_back(&MBB);
1837 if (HaveScalarStores) {
1838 // If scalar writes are used, the cache must be flushed or else the next
1839 // wave to reuse the same scratch memory can be clobbered.
1841 // Insert s_dcache_wb at wave termination points if there were any scalar
1842 // stores, and only if the cache hasn't already been flushed. This could be
1843 // improved by looking across blocks for flushes in postdominating blocks
1844 // from the stores but an explicitly requested flush is probably very rare.
1845 for (MachineBasicBlock *MBB : EndPgmBlocks) {
1846 bool SeenDCacheWB = false;
1848 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
1851 if (I->getOpcode() == AMDGPU::S_DCACHE_WB)
1852 SeenDCacheWB = true;
1853 else if (TII->isScalarStore(*I))
1854 SeenDCacheWB = false;
1856 // FIXME: It would be better to insert this before a waitcnt if any.
1857 if ((I->getOpcode() == AMDGPU::S_ENDPGM ||
1858 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) &&
1861 BuildMI(*MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_DCACHE_WB));
1867 if (!MFI->isEntryFunction()) {
1868 // Wait for any outstanding memory operations that the input registers may
1869 // depend on. We can't track them and it's better to to the wait after the
1870 // costly call sequence.
1872 // TODO: Could insert earlier and schedule more liberally with operations
1873 // that only use caller preserved registers.
1874 MachineBasicBlock &EntryBB = MF.front();
1875 BuildMI(EntryBB, EntryBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WAITCNT))