1 //===- SIInsertWaitcnts.cpp - Insert Wait Instructions --------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// Insert wait instructions for memory reads and writes.
12 /// Memory reads and writes are issued asynchronously, so we need to insert
13 /// S_WAITCNT instructions when we want to access any of their results or
14 /// overwrite any register that's used asynchronously.
16 /// TODO: This pass currently keeps one timeline per hardware counter. A more
17 /// finely-grained approach that keeps one timeline per event type could
18 /// sometimes get away with generating weaker s_waitcnt instructions. For
19 /// example, when both SMEM and LDS are in flight and we need to wait for
20 /// the i-th-last LDS instruction, then an lgkmcnt(i) is actually sufficient,
21 /// but the pass will currently generate a conservative lgkmcnt(0) because
22 /// multiple event types are in flight.
24 //===----------------------------------------------------------------------===//
27 #include "AMDGPUSubtarget.h"
28 #include "SIDefines.h"
29 #include "SIInstrInfo.h"
30 #include "SIMachineFunctionInfo.h"
31 #include "SIRegisterInfo.h"
32 #include "Utils/AMDGPUBaseInfo.h"
33 #include "llvm/ADT/DenseMap.h"
34 #include "llvm/ADT/DenseSet.h"
35 #include "llvm/ADT/MapVector.h"
36 #include "llvm/ADT/PostOrderIterator.h"
37 #include "llvm/ADT/STLExtras.h"
38 #include "llvm/ADT/SmallVector.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFunction.h"
41 #include "llvm/CodeGen/MachineFunctionPass.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineMemOperand.h"
45 #include "llvm/CodeGen/MachineOperand.h"
46 #include "llvm/CodeGen/MachinePostDominators.h"
47 #include "llvm/CodeGen/MachineRegisterInfo.h"
48 #include "llvm/InitializePasses.h"
49 #include "llvm/IR/DebugLoc.h"
50 #include "llvm/Pass.h"
51 #include "llvm/Support/Debug.h"
52 #include "llvm/Support/DebugCounter.h"
53 #include "llvm/Support/ErrorHandling.h"
54 #include "llvm/Support/raw_ostream.h"
64 #define DEBUG_TYPE "si-insert-waitcnts"
66 DEBUG_COUNTER(ForceExpCounter, DEBUG_TYPE"-forceexp",
67 "Force emit s_waitcnt expcnt(0) instrs");
68 DEBUG_COUNTER(ForceLgkmCounter, DEBUG_TYPE"-forcelgkm",
69 "Force emit s_waitcnt lgkmcnt(0) instrs");
70 DEBUG_COUNTER(ForceVMCounter, DEBUG_TYPE"-forcevm",
71 "Force emit s_waitcnt vmcnt(0) instrs");
73 static cl::opt<bool> ForceEmitZeroFlag(
74 "amdgpu-waitcnt-forcezero",
75 cl::desc("Force all waitcnt instrs to be emitted as s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"),
76 cl::init(false), cl::Hidden);
80 template <typename EnumT>
82 : public iterator_facade_base<enum_iterator<EnumT>,
83 std::forward_iterator_tag, const EnumT> {
86 enum_iterator() = default;
87 enum_iterator(EnumT Value) : Value(Value) {}
89 enum_iterator &operator++() {
90 Value = static_cast<EnumT>(Value + 1);
94 bool operator==(const enum_iterator &RHS) const { return Value == RHS.Value; }
96 EnumT operator*() const { return Value; }
99 // Class of object that encapsulates latest instruction counter score
100 // associated with the operand. Used for determining whether
101 // s_waitcnt instruction needs to be emited.
103 #define CNT_MASK(t) (1u << (t))
105 enum InstCounterType { VM_CNT = 0, LGKM_CNT, EXP_CNT, VS_CNT, NUM_INST_CNTS };
107 iterator_range<enum_iterator<InstCounterType>> inst_counter_types() {
108 return make_range(enum_iterator<InstCounterType>(VM_CNT),
109 enum_iterator<InstCounterType>(NUM_INST_CNTS));
112 using RegInterval = std::pair<int, int>;
129 VMEM_ACCESS, // vector-memory read & write
130 VMEM_READ_ACCESS, // vector-memory read
131 VMEM_WRITE_ACCESS,// vector-memory write
132 LDS_ACCESS, // lds read & write
133 GDS_ACCESS, // gds read & write
134 SQ_MESSAGE, // send message
135 SMEM_ACCESS, // scalar-memory read & write
136 EXP_GPR_LOCK, // export holding on its data src
137 GDS_GPR_LOCK, // GDS holding on its data and addr src
138 EXP_POS_ACCESS, // write to export position
139 EXP_PARAM_ACCESS, // write to export parameter
140 VMW_GPR_LOCK, // vector-memory write holding on its data src
144 static const unsigned WaitEventMaskForInst[NUM_INST_CNTS] = {
145 (1 << VMEM_ACCESS) | (1 << VMEM_READ_ACCESS),
146 (1 << SMEM_ACCESS) | (1 << LDS_ACCESS) | (1 << GDS_ACCESS) |
148 (1 << EXP_GPR_LOCK) | (1 << GDS_GPR_LOCK) | (1 << VMW_GPR_LOCK) |
149 (1 << EXP_PARAM_ACCESS) | (1 << EXP_POS_ACCESS),
150 (1 << VMEM_WRITE_ACCESS)
154 // 0 .. SQ_MAX_PGM_VGPRS-1 real VGPRs
155 // SQ_MAX_PGM_VGPRS .. NUM_ALL_VGPRS-1 extra VGPR-like slots
156 // NUM_ALL_VGPRS .. NUM_ALL_VGPRS+SQ_MAX_PGM_SGPRS-1 real SGPRs
157 // We reserve a fixed number of VGPR slots in the scoring tables for
158 // special tokens like SCMEM_LDS (needed for buffer load to LDS).
159 enum RegisterMapping {
160 SQ_MAX_PGM_VGPRS = 256, // Maximum programmable VGPRs across all targets.
161 SQ_MAX_PGM_SGPRS = 256, // Maximum programmable SGPRs across all targets.
162 NUM_EXTRA_VGPRS = 1, // A reserved slot for DS.
163 EXTRA_VGPR_LDS = 0, // This is a placeholder the Shader algorithm uses.
164 NUM_ALL_VGPRS = SQ_MAX_PGM_VGPRS + NUM_EXTRA_VGPRS, // Where SGPR starts.
167 // Enumerate different types of result-returning VMEM operations. Although
168 // s_waitcnt orders them all with a single vmcnt counter, in the absence of
169 // s_waitcnt only instructions of the same VmemType are guaranteed to write
170 // their results in order -- so there is no need to insert an s_waitcnt between
171 // two instructions of the same type that write the same vgpr.
173 // BUF instructions and MIMG instructions without a sampler.
175 // MIMG instructions with a sampler.
179 VmemType getVmemType(const MachineInstr &Inst) {
180 assert(SIInstrInfo::isVMEM(Inst));
181 if (!SIInstrInfo::isMIMG(Inst))
182 return VMEM_NOSAMPLER;
183 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Inst.getOpcode());
184 return AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode)->Sampler
189 void addWait(AMDGPU::Waitcnt &Wait, InstCounterType T, unsigned Count) {
192 Wait.VmCnt = std::min(Wait.VmCnt, Count);
195 Wait.ExpCnt = std::min(Wait.ExpCnt, Count);
198 Wait.LgkmCnt = std::min(Wait.LgkmCnt, Count);
201 Wait.VsCnt = std::min(Wait.VsCnt, Count);
204 llvm_unreachable("bad InstCounterType");
208 // This objects maintains the current score brackets of each wait counter, and
209 // a per-register scoreboard for each wait counter.
211 // We also maintain the latest score for every event type that can change the
212 // waitcnt in order to know if there are multiple types of events within
213 // the brackets. When multiple types of event happen in the bracket,
214 // wait count may get decreased out of order, therefore we need to put in
215 // "s_waitcnt 0" before use.
216 class WaitcntBrackets {
218 WaitcntBrackets(const GCNSubtarget *SubTarget) : ST(SubTarget) {}
220 static unsigned getWaitCountMax(InstCounterType T) {
223 return HardwareLimits.VmcntMax;
225 return HardwareLimits.LgkmcntMax;
227 return HardwareLimits.ExpcntMax;
229 return HardwareLimits.VscntMax;
236 unsigned getScoreLB(InstCounterType T) const {
237 assert(T < NUM_INST_CNTS);
241 unsigned getScoreUB(InstCounterType T) const {
242 assert(T < NUM_INST_CNTS);
246 // Mapping from event to counter.
247 InstCounterType eventCounter(WaitEventType E) {
248 if (WaitEventMaskForInst[VM_CNT] & (1 << E))
250 if (WaitEventMaskForInst[LGKM_CNT] & (1 << E))
252 if (WaitEventMaskForInst[VS_CNT] & (1 << E))
254 assert(WaitEventMaskForInst[EXP_CNT] & (1 << E));
258 unsigned getRegScore(int GprNo, InstCounterType T) {
259 if (GprNo < NUM_ALL_VGPRS) {
260 return VgprScores[T][GprNo];
262 assert(T == LGKM_CNT);
263 return SgprScores[GprNo - NUM_ALL_VGPRS];
266 bool merge(const WaitcntBrackets &Other);
268 RegInterval getRegInterval(const MachineInstr *MI, const SIInstrInfo *TII,
269 const MachineRegisterInfo *MRI,
270 const SIRegisterInfo *TRI, unsigned OpNo) const;
272 bool counterOutOfOrder(InstCounterType T) const;
273 bool simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const;
274 bool simplifyWaitcnt(InstCounterType T, unsigned &Count) const;
275 void determineWait(InstCounterType T, unsigned ScoreToWait,
276 AMDGPU::Waitcnt &Wait) const;
277 void applyWaitcnt(const AMDGPU::Waitcnt &Wait);
278 void applyWaitcnt(InstCounterType T, unsigned Count);
279 void updateByEvent(const SIInstrInfo *TII, const SIRegisterInfo *TRI,
280 const MachineRegisterInfo *MRI, WaitEventType E,
283 bool hasPending() const { return PendingEvents != 0; }
284 bool hasPendingEvent(WaitEventType E) const {
285 return PendingEvents & (1 << E);
288 bool hasMixedPendingEvents(InstCounterType T) const {
289 unsigned Events = PendingEvents & WaitEventMaskForInst[T];
290 // Return true if more than one bit is set in Events.
291 return Events & (Events - 1);
294 bool hasPendingFlat() const {
295 return ((LastFlat[LGKM_CNT] > ScoreLBs[LGKM_CNT] &&
296 LastFlat[LGKM_CNT] <= ScoreUBs[LGKM_CNT]) ||
297 (LastFlat[VM_CNT] > ScoreLBs[VM_CNT] &&
298 LastFlat[VM_CNT] <= ScoreUBs[VM_CNT]));
301 void setPendingFlat() {
302 LastFlat[VM_CNT] = ScoreUBs[VM_CNT];
303 LastFlat[LGKM_CNT] = ScoreUBs[LGKM_CNT];
306 // Return true if there might be pending writes to the specified vgpr by VMEM
307 // instructions with types different from V.
308 bool hasOtherPendingVmemTypes(int GprNo, VmemType V) const {
309 assert(GprNo < NUM_ALL_VGPRS);
310 return VgprVmemTypes[GprNo] & ~(1 << V);
313 void clearVgprVmemTypes(int GprNo) {
314 assert(GprNo < NUM_ALL_VGPRS);
315 VgprVmemTypes[GprNo] = 0;
318 void print(raw_ostream &);
319 void dump() { print(dbgs()); }
328 static bool mergeScore(const MergeInfo &M, unsigned &Score,
329 unsigned OtherScore);
331 void setScoreLB(InstCounterType T, unsigned Val) {
332 assert(T < NUM_INST_CNTS);
336 void setScoreUB(InstCounterType T, unsigned Val) {
337 assert(T < NUM_INST_CNTS);
340 unsigned UB = ScoreUBs[T] - getWaitCountMax(EXP_CNT);
341 if (ScoreLBs[T] < UB && UB < ScoreUBs[T])
346 void setRegScore(int GprNo, InstCounterType T, unsigned Val) {
347 if (GprNo < NUM_ALL_VGPRS) {
348 VgprUB = std::max(VgprUB, GprNo);
349 VgprScores[T][GprNo] = Val;
351 assert(T == LGKM_CNT);
352 SgprUB = std::max(SgprUB, GprNo - NUM_ALL_VGPRS);
353 SgprScores[GprNo - NUM_ALL_VGPRS] = Val;
357 void setExpScore(const MachineInstr *MI, const SIInstrInfo *TII,
358 const SIRegisterInfo *TRI, const MachineRegisterInfo *MRI,
359 unsigned OpNo, unsigned Val);
361 const GCNSubtarget *ST = nullptr;
362 unsigned ScoreLBs[NUM_INST_CNTS] = {0};
363 unsigned ScoreUBs[NUM_INST_CNTS] = {0};
364 unsigned PendingEvents = 0;
365 // Remember the last flat memory operation.
366 unsigned LastFlat[NUM_INST_CNTS] = {0};
367 // wait_cnt scores for every vgpr.
368 // Keep track of the VgprUB and SgprUB to make merge at join efficient.
371 unsigned VgprScores[NUM_INST_CNTS][NUM_ALL_VGPRS] = {{0}};
372 // Wait cnt scores for every sgpr, only lgkmcnt is relevant.
373 unsigned SgprScores[SQ_MAX_PGM_SGPRS] = {0};
374 // Bitmask of the VmemTypes of VMEM instructions that might have a pending
375 // write to each vgpr.
376 unsigned char VgprVmemTypes[NUM_ALL_VGPRS] = {0};
379 class SIInsertWaitcnts : public MachineFunctionPass {
381 const GCNSubtarget *ST = nullptr;
382 const SIInstrInfo *TII = nullptr;
383 const SIRegisterInfo *TRI = nullptr;
384 const MachineRegisterInfo *MRI = nullptr;
385 AMDGPU::IsaVersion IV;
387 DenseSet<MachineInstr *> TrackedWaitcntSet;
388 DenseMap<const Value *, MachineBasicBlock *> SLoadAddresses;
389 MachinePostDominatorTree *PDT;
392 MachineBasicBlock *MBB;
393 std::unique_ptr<WaitcntBrackets> Incoming;
396 explicit BlockInfo(MachineBasicBlock *MBB) : MBB(MBB) {}
399 MapVector<MachineBasicBlock *, BlockInfo> BlockInfos;
401 // ForceEmitZeroWaitcnts: force all waitcnts insts to be s_waitcnt 0
402 // because of amdgpu-waitcnt-forcezero flag
403 bool ForceEmitZeroWaitcnts;
404 bool ForceEmitWaitcnt[NUM_INST_CNTS];
409 SIInsertWaitcnts() : MachineFunctionPass(ID) {
410 (void)ForceExpCounter;
411 (void)ForceLgkmCounter;
412 (void)ForceVMCounter;
415 bool runOnMachineFunction(MachineFunction &MF) override;
417 StringRef getPassName() const override {
418 return "SI insert wait instructions";
421 void getAnalysisUsage(AnalysisUsage &AU) const override {
422 AU.setPreservesCFG();
423 AU.addRequired<MachinePostDominatorTree>();
424 MachineFunctionPass::getAnalysisUsage(AU);
427 bool isForceEmitWaitcnt() const {
428 for (auto T : inst_counter_types())
429 if (ForceEmitWaitcnt[T])
434 void setForceEmitWaitcnt() {
435 // For non-debug builds, ForceEmitWaitcnt has been initialized to false;
436 // For debug builds, get the debug counter info and adjust if need be
438 if (DebugCounter::isCounterSet(ForceExpCounter) &&
439 DebugCounter::shouldExecute(ForceExpCounter)) {
440 ForceEmitWaitcnt[EXP_CNT] = true;
442 ForceEmitWaitcnt[EXP_CNT] = false;
445 if (DebugCounter::isCounterSet(ForceLgkmCounter) &&
446 DebugCounter::shouldExecute(ForceLgkmCounter)) {
447 ForceEmitWaitcnt[LGKM_CNT] = true;
449 ForceEmitWaitcnt[LGKM_CNT] = false;
452 if (DebugCounter::isCounterSet(ForceVMCounter) &&
453 DebugCounter::shouldExecute(ForceVMCounter)) {
454 ForceEmitWaitcnt[VM_CNT] = true;
456 ForceEmitWaitcnt[VM_CNT] = false;
461 bool mayAccessLDSThroughFlat(const MachineInstr &MI) const;
462 bool generateWaitcntInstBefore(MachineInstr &MI,
463 WaitcntBrackets &ScoreBrackets,
464 MachineInstr *OldWaitcntInstr);
465 void updateEventWaitcntAfter(MachineInstr &Inst,
466 WaitcntBrackets *ScoreBrackets);
467 bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &Block,
468 WaitcntBrackets &ScoreBrackets);
471 } // end anonymous namespace
473 RegInterval WaitcntBrackets::getRegInterval(const MachineInstr *MI,
474 const SIInstrInfo *TII,
475 const MachineRegisterInfo *MRI,
476 const SIRegisterInfo *TRI,
477 unsigned OpNo) const {
478 const MachineOperand &Op = MI->getOperand(OpNo);
480 if (!TRI->isInAllocatableClass(Op.getReg()) || TRI->isAGPR(*MRI, Op.getReg()))
483 // A use via a PW operand does not need a waitcnt.
484 // A partial write is not a WAW.
485 assert(!Op.getSubReg() || !Op.isUndef());
489 unsigned Reg = TRI->getEncodingValue(Op.getReg());
491 if (TRI->isVGPR(*MRI, Op.getReg())) {
492 assert(Reg >= RegisterEncoding.VGPR0 && Reg <= RegisterEncoding.VGPRL);
493 Result.first = Reg - RegisterEncoding.VGPR0;
494 assert(Result.first >= 0 && Result.first < SQ_MAX_PGM_VGPRS);
495 } else if (TRI->isSGPRReg(*MRI, Op.getReg())) {
496 assert(Reg >= RegisterEncoding.SGPR0 && Reg < SQ_MAX_PGM_SGPRS);
497 Result.first = Reg - RegisterEncoding.SGPR0 + NUM_ALL_VGPRS;
498 assert(Result.first >= NUM_ALL_VGPRS &&
499 Result.first < SQ_MAX_PGM_SGPRS + NUM_ALL_VGPRS);
502 // else if (TRI->isTTMP(*MRI, Reg.getReg())) ...
506 const TargetRegisterClass *RC = TII->getOpRegClass(*MI, OpNo);
507 unsigned Size = TRI->getRegSizeInBits(*RC);
508 Result.second = Result.first + ((Size + 16) / 32);
513 void WaitcntBrackets::setExpScore(const MachineInstr *MI,
514 const SIInstrInfo *TII,
515 const SIRegisterInfo *TRI,
516 const MachineRegisterInfo *MRI, unsigned OpNo,
518 RegInterval Interval = getRegInterval(MI, TII, MRI, TRI, OpNo);
519 assert(TRI->isVGPR(*MRI, MI->getOperand(OpNo).getReg()));
520 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
521 setRegScore(RegNo, EXP_CNT, Val);
525 void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII,
526 const SIRegisterInfo *TRI,
527 const MachineRegisterInfo *MRI,
528 WaitEventType E, MachineInstr &Inst) {
529 InstCounterType T = eventCounter(E);
530 unsigned CurrScore = getScoreUB(T) + 1;
532 report_fatal_error("InsertWaitcnt score wraparound");
533 // PendingEvents and ScoreUB need to be update regardless if this event
534 // changes the score of a register or not.
535 // Examples including vm_cnt when buffer-store or lgkm_cnt when send-message.
536 PendingEvents |= 1 << E;
537 setScoreUB(T, CurrScore);
540 // Put score on the source vgprs. If this is a store, just use those
541 // specific register(s).
542 if (TII->isDS(Inst) && (Inst.mayStore() || Inst.mayLoad())) {
544 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::addr);
545 // All GDS operations must protect their address register (same as
547 if (AddrOpIdx != -1) {
548 setExpScore(&Inst, TII, TRI, MRI, AddrOpIdx, CurrScore);
551 if (Inst.mayStore()) {
552 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(),
553 AMDGPU::OpName::data0) != -1) {
555 &Inst, TII, TRI, MRI,
556 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data0),
559 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(),
560 AMDGPU::OpName::data1) != -1) {
561 setExpScore(&Inst, TII, TRI, MRI,
562 AMDGPU::getNamedOperandIdx(Inst.getOpcode(),
563 AMDGPU::OpName::data1),
566 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1 &&
567 Inst.getOpcode() != AMDGPU::DS_GWS_INIT &&
568 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_V &&
569 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_BR &&
570 Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_P &&
571 Inst.getOpcode() != AMDGPU::DS_GWS_BARRIER &&
572 Inst.getOpcode() != AMDGPU::DS_APPEND &&
573 Inst.getOpcode() != AMDGPU::DS_CONSUME &&
574 Inst.getOpcode() != AMDGPU::DS_ORDERED_COUNT) {
575 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
576 const MachineOperand &Op = Inst.getOperand(I);
577 if (Op.isReg() && !Op.isDef() && TRI->isVGPR(*MRI, Op.getReg())) {
578 setExpScore(&Inst, TII, TRI, MRI, I, CurrScore);
582 } else if (TII->isFLAT(Inst)) {
583 if (Inst.mayStore()) {
585 &Inst, TII, TRI, MRI,
586 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
588 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) {
590 &Inst, TII, TRI, MRI,
591 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
594 } else if (TII->isMIMG(Inst)) {
595 if (Inst.mayStore()) {
596 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
597 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) {
599 &Inst, TII, TRI, MRI,
600 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
603 } else if (TII->isMTBUF(Inst)) {
604 if (Inst.mayStore()) {
605 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
607 } else if (TII->isMUBUF(Inst)) {
608 if (Inst.mayStore()) {
609 setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
610 } else if (AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1) {
612 &Inst, TII, TRI, MRI,
613 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
617 if (TII->isEXP(Inst)) {
618 // For export the destination registers are really temps that
619 // can be used as the actual source after export patching, so
620 // we need to treat them like sources and set the EXP_CNT
622 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
623 MachineOperand &DefMO = Inst.getOperand(I);
624 if (DefMO.isReg() && DefMO.isDef() &&
625 TRI->isVGPR(*MRI, DefMO.getReg())) {
626 setRegScore(TRI->getEncodingValue(DefMO.getReg()), EXP_CNT,
631 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
632 MachineOperand &MO = Inst.getOperand(I);
633 if (MO.isReg() && !MO.isDef() && TRI->isVGPR(*MRI, MO.getReg())) {
634 setExpScore(&Inst, TII, TRI, MRI, I, CurrScore);
638 #if 0 // TODO: check if this is handled by MUBUF code above.
639 } else if (Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORD ||
640 Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX2 ||
641 Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX4) {
642 MachineOperand *MO = TII->getNamedOperand(Inst, AMDGPU::OpName::data);
643 unsigned OpNo;//TODO: find the OpNo for this operand;
644 RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, OpNo);
645 for (int RegNo = Interval.first; RegNo < Interval.second;
647 setRegScore(RegNo + NUM_ALL_VGPRS, t, CurrScore);
651 // Match the score to the destination registers.
652 for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
653 auto &Op = Inst.getOperand(I);
654 if (!Op.isReg() || !Op.isDef())
656 RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, I);
658 if (Interval.first >= NUM_ALL_VGPRS)
660 if (SIInstrInfo::isVMEM(Inst)) {
661 VmemType V = getVmemType(Inst);
662 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo)
663 VgprVmemTypes[RegNo] |= 1 << V;
666 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
667 setRegScore(RegNo, T, CurrScore);
670 if (TII->isDS(Inst) && Inst.mayStore()) {
671 setRegScore(SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS, T, CurrScore);
676 void WaitcntBrackets::print(raw_ostream &OS) {
678 for (auto T : inst_counter_types()) {
679 unsigned LB = getScoreLB(T);
680 unsigned UB = getScoreUB(T);
684 OS << " VM_CNT(" << UB - LB << "): ";
687 OS << " LGKM_CNT(" << UB - LB << "): ";
690 OS << " EXP_CNT(" << UB - LB << "): ";
693 OS << " VS_CNT(" << UB - LB << "): ";
696 OS << " UNKNOWN(" << UB - LB << "): ";
701 // Print vgpr scores.
702 for (int J = 0; J <= VgprUB; J++) {
703 unsigned RegScore = getRegScore(J, T);
706 unsigned RelScore = RegScore - LB - 1;
707 if (J < SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS) {
708 OS << RelScore << ":v" << J << " ";
710 OS << RelScore << ":ds ";
713 // Also need to print sgpr scores for lgkm_cnt.
715 for (int J = 0; J <= SgprUB; J++) {
716 unsigned RegScore = getRegScore(J + NUM_ALL_VGPRS, LGKM_CNT);
719 unsigned RelScore = RegScore - LB - 1;
720 OS << RelScore << ":s" << J << " ";
729 /// Simplify the waitcnt, in the sense of removing redundant counts, and return
730 /// whether a waitcnt instruction is needed at all.
731 bool WaitcntBrackets::simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const {
732 return simplifyWaitcnt(VM_CNT, Wait.VmCnt) |
733 simplifyWaitcnt(EXP_CNT, Wait.ExpCnt) |
734 simplifyWaitcnt(LGKM_CNT, Wait.LgkmCnt) |
735 simplifyWaitcnt(VS_CNT, Wait.VsCnt);
738 bool WaitcntBrackets::simplifyWaitcnt(InstCounterType T,
739 unsigned &Count) const {
740 const unsigned LB = getScoreLB(T);
741 const unsigned UB = getScoreUB(T);
742 if (Count < UB && UB - Count > LB)
749 void WaitcntBrackets::determineWait(InstCounterType T, unsigned ScoreToWait,
750 AMDGPU::Waitcnt &Wait) const {
751 // If the score of src_operand falls within the bracket, we need an
752 // s_waitcnt instruction.
753 const unsigned LB = getScoreLB(T);
754 const unsigned UB = getScoreUB(T);
755 if ((UB >= ScoreToWait) && (ScoreToWait > LB)) {
756 if ((T == VM_CNT || T == LGKM_CNT) &&
758 !ST->hasFlatLgkmVMemCountInOrder()) {
759 // If there is a pending FLAT operation, and this is a VMem or LGKM
760 // waitcnt and the target can report early completion, then we need
761 // to force a waitcnt 0.
763 } else if (counterOutOfOrder(T)) {
764 // Counter can get decremented out-of-order when there
765 // are multiple types event in the bracket. Also emit an s_wait counter
766 // with a conservative value of 0 for the counter.
769 // If a counter has been maxed out avoid overflow by waiting for
770 // MAX(CounterType) - 1 instead.
771 unsigned NeededWait = std::min(UB - ScoreToWait, getWaitCountMax(T) - 1);
772 addWait(Wait, T, NeededWait);
777 void WaitcntBrackets::applyWaitcnt(const AMDGPU::Waitcnt &Wait) {
778 applyWaitcnt(VM_CNT, Wait.VmCnt);
779 applyWaitcnt(EXP_CNT, Wait.ExpCnt);
780 applyWaitcnt(LGKM_CNT, Wait.LgkmCnt);
781 applyWaitcnt(VS_CNT, Wait.VsCnt);
784 void WaitcntBrackets::applyWaitcnt(InstCounterType T, unsigned Count) {
785 const unsigned UB = getScoreUB(T);
789 if (counterOutOfOrder(T))
791 setScoreLB(T, std::max(getScoreLB(T), UB - Count));
794 PendingEvents &= ~WaitEventMaskForInst[T];
798 // Where there are multiple types of event in the bracket of a counter,
799 // the decrement may go out of order.
800 bool WaitcntBrackets::counterOutOfOrder(InstCounterType T) const {
801 // Scalar memory read always can go out of order.
802 if (T == LGKM_CNT && hasPendingEvent(SMEM_ACCESS))
804 return hasMixedPendingEvents(T);
807 INITIALIZE_PASS_BEGIN(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false,
809 INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
810 INITIALIZE_PASS_END(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false,
813 char SIInsertWaitcnts::ID = 0;
815 char &llvm::SIInsertWaitcntsID = SIInsertWaitcnts::ID;
817 FunctionPass *llvm::createSIInsertWaitcntsPass() {
818 return new SIInsertWaitcnts();
821 static bool readsVCCZ(const MachineInstr &MI) {
822 unsigned Opc = MI.getOpcode();
823 return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) &&
824 !MI.getOperand(1).isUndef();
827 /// \returns true if the callee inserts an s_waitcnt 0 on function entry.
828 static bool callWaitsOnFunctionEntry(const MachineInstr &MI) {
829 // Currently all conventions wait, but this may not always be the case.
831 // TODO: If IPRA is enabled, and the callee is isSafeForNoCSROpt, it may make
832 // senses to omit the wait and do it in the caller.
836 /// \returns true if the callee is expected to wait for any outstanding waits
837 /// before returning.
838 static bool callWaitsOnFunctionReturn(const MachineInstr &MI) {
842 /// Generate s_waitcnt instruction to be placed before cur_Inst.
843 /// Instructions of a given type are returned in order,
844 /// but instructions of different types can complete out of order.
845 /// We rely on this in-order completion
846 /// and simply assign a score to the memory access instructions.
847 /// We keep track of the active "score bracket" to determine
848 /// if an access of a memory read requires an s_waitcnt
849 /// and if so what the value of each counter is.
850 /// The "score bracket" is bound by the lower bound and upper bound
851 /// scores (*_score_LB and *_score_ub respectively).
852 bool SIInsertWaitcnts::generateWaitcntInstBefore(
853 MachineInstr &MI, WaitcntBrackets &ScoreBrackets,
854 MachineInstr *OldWaitcntInstr) {
855 setForceEmitWaitcnt();
856 bool IsForceEmitWaitcnt = isForceEmitWaitcnt();
858 if (MI.isDebugInstr())
861 AMDGPU::Waitcnt Wait;
863 // See if this instruction has a forced S_WAITCNT VM.
864 // TODO: Handle other cases of NeedsWaitcntVmBefore()
865 if (MI.getOpcode() == AMDGPU::BUFFER_WBINVL1 ||
866 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_SC ||
867 MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_VOL ||
868 MI.getOpcode() == AMDGPU::BUFFER_GL0_INV ||
869 MI.getOpcode() == AMDGPU::BUFFER_GL1_INV) {
873 // All waits must be resolved at call return.
874 // NOTE: this could be improved with knowledge of all call sites or
875 // with knowledge of the called routines.
876 if (MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG ||
877 MI.getOpcode() == AMDGPU::S_SETPC_B64_return ||
878 (MI.isReturn() && MI.isCall() && !callWaitsOnFunctionEntry(MI))) {
879 Wait = Wait.combined(AMDGPU::Waitcnt::allZero(IV));
881 // Resolve vm waits before gs-done.
882 else if ((MI.getOpcode() == AMDGPU::S_SENDMSG ||
883 MI.getOpcode() == AMDGPU::S_SENDMSGHALT) &&
884 ((MI.getOperand(0).getImm() & AMDGPU::SendMsg::ID_MASK_) ==
885 AMDGPU::SendMsg::ID_GS_DONE)) {
888 #if 0 // TODO: the following blocks of logic when we have fence.
889 else if (MI.getOpcode() == SC_FENCE) {
890 const unsigned int group_size =
891 context->shader_info->GetMaxThreadGroupSize();
892 // group_size == 0 means thread group size is unknown at compile time
893 const bool group_is_multi_wave =
894 (group_size == 0 || group_size > target_info->GetWaveFrontSize());
895 const bool fence_is_global = !((SCInstInternalMisc*)Inst)->IsGroupFence();
897 for (unsigned int i = 0; i < Inst->NumSrcOperands(); i++) {
898 SCRegType src_type = Inst->GetSrcType(i);
901 if (group_is_multi_wave ||
902 context->OptFlagIsOn(OPT_R1100_LDSMEM_FENCE_CHICKEN_BIT)) {
903 EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT,
904 ScoreBrackets->getScoreUB(LGKM_CNT));
905 // LDS may have to wait for VM_CNT after buffer load to LDS
906 if (target_info->HasBufferLoadToLDS()) {
907 EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT,
908 ScoreBrackets->getScoreUB(VM_CNT));
914 if (group_is_multi_wave || fence_is_global) {
915 EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
916 ScoreBrackets->getScoreUB(EXP_CNT));
917 EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT,
918 ScoreBrackets->getScoreUB(LGKM_CNT));
926 if (group_is_multi_wave || fence_is_global) {
927 EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
928 ScoreBrackets->getScoreUB(EXP_CNT));
929 EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT,
930 ScoreBrackets->getScoreUB(VM_CNT));
942 // Export & GDS instructions do not read the EXEC mask until after the export
943 // is granted (which can occur well after the instruction is issued).
944 // The shader program must flush all EXP operations on the export-count
945 // before overwriting the EXEC mask.
947 if (MI.modifiesRegister(AMDGPU::EXEC, TRI)) {
948 // Export and GDS are tracked individually, either may trigger a waitcnt
950 if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) ||
951 ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) ||
952 ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) ||
953 ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) {
958 if (MI.isCall() && callWaitsOnFunctionEntry(MI)) {
959 // The function is going to insert a wait on everything in its prolog.
960 // This still needs to be careful if the call target is a load (e.g. a GOT
961 // load). We also need to check WAW depenancy with saved PC.
962 Wait = AMDGPU::Waitcnt();
965 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
966 RegInterval CallAddrOpInterval =
967 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, CallAddrOpIdx);
969 for (int RegNo = CallAddrOpInterval.first;
970 RegNo < CallAddrOpInterval.second; ++RegNo)
971 ScoreBrackets.determineWait(
972 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait);
975 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
976 if (RtnAddrOpIdx != -1) {
977 RegInterval RtnAddrOpInterval =
978 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, RtnAddrOpIdx);
980 for (int RegNo = RtnAddrOpInterval.first;
981 RegNo < RtnAddrOpInterval.second; ++RegNo)
982 ScoreBrackets.determineWait(
983 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait);
987 // FIXME: Should not be relying on memoperands.
988 // Look at the source operands of every instruction to see if
989 // any of them results from a previous memory operation that affects
990 // its current usage. If so, an s_waitcnt instruction needs to be
992 // If the source operand was defined by a load, add the s_waitcnt
995 // Two cases are handled for destination operands:
996 // 1) If the destination operand was defined by a load, add the s_waitcnt
997 // instruction to guarantee the right WAW order.
998 // 2) If a destination operand that was used by a recent export/store ins,
999 // add s_waitcnt on exp_cnt to guarantee the WAR order.
1000 for (const MachineMemOperand *Memop : MI.memoperands()) {
1001 const Value *Ptr = Memop->getValue();
1002 if (Memop->isStore() && SLoadAddresses.count(Ptr)) {
1003 addWait(Wait, LGKM_CNT, 0);
1004 if (PDT->dominates(MI.getParent(), SLoadAddresses.find(Ptr)->second))
1005 SLoadAddresses.erase(Ptr);
1007 unsigned AS = Memop->getAddrSpace();
1008 if (AS != AMDGPUAS::LOCAL_ADDRESS)
1010 unsigned RegNo = SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS;
1011 // VM_CNT is only relevant to vgpr or LDS.
1012 ScoreBrackets.determineWait(
1013 VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait);
1014 if (Memop->isStore()) {
1015 ScoreBrackets.determineWait(
1016 EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait);
1020 // Loop over use and def operands.
1021 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
1022 MachineOperand &Op = MI.getOperand(I);
1025 RegInterval Interval =
1026 ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, I);
1027 for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
1028 if (TRI->isVGPR(*MRI, Op.getReg())) {
1029 // RAW always needs an s_waitcnt. WAW needs an s_waitcnt unless the
1030 // previous write and this write are the same type of VMEM
1031 // instruction, in which case they're guaranteed to write their
1032 // results in order anyway.
1033 if (Op.isUse() || !SIInstrInfo::isVMEM(MI) ||
1034 ScoreBrackets.hasOtherPendingVmemTypes(RegNo,
1036 ScoreBrackets.determineWait(
1037 VM_CNT, ScoreBrackets.getRegScore(RegNo, VM_CNT), Wait);
1038 ScoreBrackets.clearVgprVmemTypes(RegNo);
1041 ScoreBrackets.determineWait(
1042 EXP_CNT, ScoreBrackets.getRegScore(RegNo, EXP_CNT), Wait);
1045 ScoreBrackets.determineWait(
1046 LGKM_CNT, ScoreBrackets.getRegScore(RegNo, LGKM_CNT), Wait);
1052 // Check to see if this is an S_BARRIER, and if an implicit S_WAITCNT 0
1053 // occurs before the instruction. Doing it here prevents any additional
1054 // S_WAITCNTs from being emitted if the instruction was marked as
1055 // requiring a WAITCNT beforehand.
1056 if (MI.getOpcode() == AMDGPU::S_BARRIER &&
1057 !ST->hasAutoWaitcntBeforeBarrier()) {
1058 Wait = Wait.combined(AMDGPU::Waitcnt::allZero(IV));
1061 // TODO: Remove this work-around, enable the assert for Bug 457939
1062 // after fixing the scheduler. Also, the Shader Compiler code is
1063 // independent of target.
1064 if (readsVCCZ(MI) && ST->hasReadVCCZBug()) {
1065 if (ScoreBrackets.getScoreLB(LGKM_CNT) <
1066 ScoreBrackets.getScoreUB(LGKM_CNT) &&
1067 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
1072 // Early-out if no wait is indicated.
1073 if (!ScoreBrackets.simplifyWaitcnt(Wait) && !IsForceEmitWaitcnt) {
1074 bool Modified = false;
1075 if (OldWaitcntInstr) {
1076 for (auto II = OldWaitcntInstr->getIterator(), NextI = std::next(II);
1077 &*II != &MI; II = NextI, ++NextI) {
1078 if (II->isDebugInstr())
1081 if (TrackedWaitcntSet.count(&*II)) {
1082 TrackedWaitcntSet.erase(&*II);
1083 II->eraseFromParent();
1085 } else if (II->getOpcode() == AMDGPU::S_WAITCNT) {
1086 int64_t Imm = II->getOperand(0).getImm();
1087 ScoreBrackets.applyWaitcnt(AMDGPU::decodeWaitcnt(IV, Imm));
1089 assert(II->getOpcode() == AMDGPU::S_WAITCNT_VSCNT);
1090 assert(II->getOperand(0).getReg() == AMDGPU::SGPR_NULL);
1091 ScoreBrackets.applyWaitcnt(
1092 AMDGPU::Waitcnt(~0u, ~0u, ~0u, II->getOperand(1).getImm()));
1099 if (ForceEmitZeroWaitcnts)
1100 Wait = AMDGPU::Waitcnt::allZero(IV);
1102 if (ForceEmitWaitcnt[VM_CNT])
1104 if (ForceEmitWaitcnt[EXP_CNT])
1106 if (ForceEmitWaitcnt[LGKM_CNT])
1108 if (ForceEmitWaitcnt[VS_CNT])
1111 ScoreBrackets.applyWaitcnt(Wait);
1113 AMDGPU::Waitcnt OldWait;
1114 bool Modified = false;
1116 if (OldWaitcntInstr) {
1117 for (auto II = OldWaitcntInstr->getIterator(), NextI = std::next(II);
1118 &*II != &MI; II = NextI, NextI++) {
1119 if (II->isDebugInstr())
1122 if (II->getOpcode() == AMDGPU::S_WAITCNT) {
1123 unsigned IEnc = II->getOperand(0).getImm();
1124 AMDGPU::Waitcnt IWait = AMDGPU::decodeWaitcnt(IV, IEnc);
1125 OldWait = OldWait.combined(IWait);
1126 if (!TrackedWaitcntSet.count(&*II))
1127 Wait = Wait.combined(IWait);
1128 unsigned NewEnc = AMDGPU::encodeWaitcnt(IV, Wait);
1129 if (IEnc != NewEnc) {
1130 II->getOperand(0).setImm(NewEnc);
1137 assert(II->getOpcode() == AMDGPU::S_WAITCNT_VSCNT);
1138 assert(II->getOperand(0).getReg() == AMDGPU::SGPR_NULL);
1140 unsigned ICnt = II->getOperand(1).getImm();
1141 OldWait.VsCnt = std::min(OldWait.VsCnt, ICnt);
1142 if (!TrackedWaitcntSet.count(&*II))
1143 Wait.VsCnt = std::min(Wait.VsCnt, ICnt);
1144 if (Wait.VsCnt != ICnt) {
1145 II->getOperand(1).setImm(Wait.VsCnt);
1151 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n"
1152 << "Old Instr: " << MI
1153 << "New Instr: " << *II << '\n');
1155 if (!Wait.hasWait())
1160 if (Wait.VmCnt != ~0u || Wait.LgkmCnt != ~0u || Wait.ExpCnt != ~0u) {
1161 unsigned Enc = AMDGPU::encodeWaitcnt(IV, Wait);
1162 auto SWaitInst = BuildMI(*MI.getParent(), MI.getIterator(),
1163 MI.getDebugLoc(), TII->get(AMDGPU::S_WAITCNT))
1165 TrackedWaitcntSet.insert(SWaitInst);
1168 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n"
1169 << "Old Instr: " << MI
1170 << "New Instr: " << *SWaitInst << '\n');
1173 if (Wait.VsCnt != ~0u) {
1174 assert(ST->hasVscnt());
1177 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1178 TII->get(AMDGPU::S_WAITCNT_VSCNT))
1179 .addReg(AMDGPU::SGPR_NULL, RegState::Undef)
1180 .addImm(Wait.VsCnt);
1181 TrackedWaitcntSet.insert(SWaitInst);
1184 LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n"
1185 << "Old Instr: " << MI
1186 << "New Instr: " << *SWaitInst << '\n');
1192 // This is a flat memory operation. Check to see if it has memory
1193 // tokens for both LDS and Memory, and if so mark it as a flat.
1194 bool SIInsertWaitcnts::mayAccessLDSThroughFlat(const MachineInstr &MI) const {
1195 if (MI.memoperands_empty())
1198 for (const MachineMemOperand *Memop : MI.memoperands()) {
1199 unsigned AS = Memop->getAddrSpace();
1200 if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS)
1207 void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
1208 WaitcntBrackets *ScoreBrackets) {
1209 // Now look at the instruction opcode. If it is a memory access
1210 // instruction, update the upper-bound of the appropriate counter's
1211 // bracket and the destination operand scores.
1212 // TODO: Use the (TSFlags & SIInstrFlags::LGKM_CNT) property everywhere.
1213 if (TII->isDS(Inst) && TII->usesLGKM_CNT(Inst)) {
1214 if (TII->isAlwaysGDS(Inst.getOpcode()) ||
1215 TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
1216 ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_ACCESS, Inst);
1217 ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_GPR_LOCK, Inst);
1219 ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
1221 } else if (TII->isFLAT(Inst)) {
1222 assert(Inst.mayLoadOrStore());
1224 if (TII->usesVM_CNT(Inst)) {
1225 if (!ST->hasVscnt())
1226 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst);
1227 else if (Inst.mayLoad() &&
1228 AMDGPU::getAtomicRetOp(Inst.getOpcode()) == -1)
1229 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst);
1231 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst);
1234 if (TII->usesLGKM_CNT(Inst)) {
1235 ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
1237 // This is a flat memory operation, so note it - it will require
1238 // that both the VM and LGKM be flushed to zero if it is pending when
1239 // a VM or LGKM dependency occurs.
1240 if (mayAccessLDSThroughFlat(Inst))
1241 ScoreBrackets->setPendingFlat();
1243 } else if (SIInstrInfo::isVMEM(Inst) &&
1244 // TODO: get a better carve out.
1245 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1 &&
1246 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_SC &&
1247 Inst.getOpcode() != AMDGPU::BUFFER_WBINVL1_VOL &&
1248 Inst.getOpcode() != AMDGPU::BUFFER_GL0_INV &&
1249 Inst.getOpcode() != AMDGPU::BUFFER_GL1_INV) {
1250 if (!ST->hasVscnt())
1251 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_ACCESS, Inst);
1252 else if ((Inst.mayLoad() &&
1253 AMDGPU::getAtomicRetOp(Inst.getOpcode()) == -1) ||
1254 /* IMAGE_GET_RESINFO / IMAGE_GET_LOD */
1255 (TII->isMIMG(Inst) && !Inst.mayLoad() && !Inst.mayStore()))
1256 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_READ_ACCESS, Inst);
1257 else if (Inst.mayStore())
1258 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMEM_WRITE_ACCESS, Inst);
1260 if (ST->vmemWriteNeedsExpWaitcnt() &&
1261 (Inst.mayStore() || AMDGPU::getAtomicNoRetOp(Inst.getOpcode()) != -1)) {
1262 ScoreBrackets->updateByEvent(TII, TRI, MRI, VMW_GPR_LOCK, Inst);
1264 } else if (TII->isSMRD(Inst)) {
1265 ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst);
1266 } else if (Inst.isCall()) {
1267 if (callWaitsOnFunctionReturn(Inst)) {
1268 // Act as a wait on everything
1269 ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt::allZero(IV));
1271 // May need to way wait for anything.
1272 ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt());
1275 switch (Inst.getOpcode()) {
1276 case AMDGPU::S_SENDMSG:
1277 case AMDGPU::S_SENDMSGHALT:
1278 ScoreBrackets->updateByEvent(TII, TRI, MRI, SQ_MESSAGE, Inst);
1281 case AMDGPU::EXP_DONE: {
1282 int Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm();
1283 if (Imm >= 32 && Imm <= 63)
1284 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_PARAM_ACCESS, Inst);
1285 else if (Imm >= 12 && Imm <= 15)
1286 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_POS_ACCESS, Inst);
1288 ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_GPR_LOCK, Inst);
1291 case AMDGPU::S_MEMTIME:
1292 case AMDGPU::S_MEMREALTIME:
1293 ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst);
1301 bool WaitcntBrackets::mergeScore(const MergeInfo &M, unsigned &Score,
1302 unsigned OtherScore) {
1303 unsigned MyShifted = Score <= M.OldLB ? 0 : Score + M.MyShift;
1304 unsigned OtherShifted =
1305 OtherScore <= M.OtherLB ? 0 : OtherScore + M.OtherShift;
1306 Score = std::max(MyShifted, OtherShifted);
1307 return OtherShifted > MyShifted;
1310 /// Merge the pending events and associater score brackets of \p Other into
1311 /// this brackets status.
1313 /// Returns whether the merge resulted in a change that requires tighter waits
1314 /// (i.e. the merged brackets strictly dominate the original brackets).
1315 bool WaitcntBrackets::merge(const WaitcntBrackets &Other) {
1316 bool StrictDom = false;
1318 VgprUB = std::max(VgprUB, Other.VgprUB);
1319 SgprUB = std::max(SgprUB, Other.SgprUB);
1321 for (auto T : inst_counter_types()) {
1322 // Merge event flags for this counter
1323 const bool OldOutOfOrder = counterOutOfOrder(T);
1324 const unsigned OldEvents = PendingEvents & WaitEventMaskForInst[T];
1325 const unsigned OtherEvents = Other.PendingEvents & WaitEventMaskForInst[T];
1326 if (OtherEvents & ~OldEvents)
1328 PendingEvents |= OtherEvents;
1330 // Merge scores for this counter
1331 const unsigned MyPending = ScoreUBs[T] - ScoreLBs[T];
1332 const unsigned OtherPending = Other.ScoreUBs[T] - Other.ScoreLBs[T];
1333 const unsigned NewUB = ScoreLBs[T] + std::max(MyPending, OtherPending);
1334 if (NewUB < ScoreLBs[T])
1335 report_fatal_error("waitcnt score overflow");
1338 M.OldLB = ScoreLBs[T];
1339 M.OtherLB = Other.ScoreLBs[T];
1340 M.MyShift = NewUB - ScoreUBs[T];
1341 M.OtherShift = NewUB - Other.ScoreUBs[T];
1343 ScoreUBs[T] = NewUB;
1345 StrictDom |= mergeScore(M, LastFlat[T], Other.LastFlat[T]);
1347 bool RegStrictDom = false;
1348 for (int J = 0; J <= VgprUB; J++) {
1349 RegStrictDom |= mergeScore(M, VgprScores[T][J], Other.VgprScores[T][J]);
1353 for (int J = 0; J <= VgprUB; J++) {
1354 unsigned char NewVmemTypes = VgprVmemTypes[J] | Other.VgprVmemTypes[J];
1355 RegStrictDom |= NewVmemTypes != VgprVmemTypes[J];
1356 VgprVmemTypes[J] = NewVmemTypes;
1360 if (T == LGKM_CNT) {
1361 for (int J = 0; J <= SgprUB; J++) {
1362 RegStrictDom |= mergeScore(M, SgprScores[J], Other.SgprScores[J]);
1366 if (RegStrictDom && !OldOutOfOrder)
1373 // Generate s_waitcnt instructions where needed.
1374 bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
1375 MachineBasicBlock &Block,
1376 WaitcntBrackets &ScoreBrackets) {
1377 bool Modified = false;
1380 dbgs() << "*** Block" << Block.getNumber() << " ***";
1381 ScoreBrackets.dump();
1384 // Assume VCCZ is correct at basic block boundaries, unless and until we need
1385 // to handle cases where that is not true.
1386 bool VCCZCorrect = true;
1388 // Walk over the instructions.
1389 MachineInstr *OldWaitcntInstr = nullptr;
1391 for (MachineBasicBlock::instr_iterator Iter = Block.instr_begin(),
1392 E = Block.instr_end();
1394 MachineInstr &Inst = *Iter;
1396 // Track pre-existing waitcnts from earlier iterations.
1397 if (Inst.getOpcode() == AMDGPU::S_WAITCNT ||
1398 (Inst.getOpcode() == AMDGPU::S_WAITCNT_VSCNT &&
1399 Inst.getOperand(0).isReg() &&
1400 Inst.getOperand(0).getReg() == AMDGPU::SGPR_NULL)) {
1401 if (!OldWaitcntInstr)
1402 OldWaitcntInstr = &Inst;
1407 // We might need to restore vccz to its correct value for either of two
1408 // different reasons; see ST->hasReadVCCZBug() and
1409 // ST->partialVCCWritesUpdateVCCZ().
1410 bool RestoreVCCZ = false;
1411 if (readsVCCZ(Inst)) {
1414 else if (ST->hasReadVCCZBug()) {
1415 // There is a hardware bug on CI/SI where SMRD instruction may corrupt
1416 // vccz bit, so when we detect that an instruction may read from a
1417 // corrupt vccz bit, we need to:
1418 // 1. Insert s_waitcnt lgkm(0) to wait for all outstanding SMRD
1419 // operations to complete.
1420 // 2. Restore the correct value of vccz by writing the current value
1421 // of vcc back to vcc.
1422 if (ScoreBrackets.getScoreLB(LGKM_CNT) <
1423 ScoreBrackets.getScoreUB(LGKM_CNT) &&
1424 ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
1430 if (TII->isSMRD(Inst)) {
1431 for (const MachineMemOperand *Memop : Inst.memoperands()) {
1432 const Value *Ptr = Memop->getValue();
1433 SLoadAddresses.insert(std::make_pair(Ptr, Inst.getParent()));
1437 if (!ST->partialVCCWritesUpdateVCCZ()) {
1438 // Up to gfx9, writes to vcc_lo and vcc_hi don't update vccz.
1439 // Writes to vcc will fix it.
1440 if (Inst.definesRegister(AMDGPU::VCC_LO) ||
1441 Inst.definesRegister(AMDGPU::VCC_HI))
1442 VCCZCorrect = false;
1443 else if (Inst.definesRegister(AMDGPU::VCC))
1447 // Generate an s_waitcnt instruction to be placed before
1448 // cur_Inst, if needed.
1449 Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr);
1450 OldWaitcntInstr = nullptr;
1452 updateEventWaitcntAfter(Inst, &ScoreBrackets);
1454 #if 0 // TODO: implement resource type check controlled by options with ub = LB.
1455 // If this instruction generates a S_SETVSKIP because it is an
1456 // indexed resource, and we are on Tahiti, then it will also force
1457 // an S_WAITCNT vmcnt(0)
1458 if (RequireCheckResourceType(Inst, context)) {
1459 // Force the score to as if an S_WAITCNT vmcnt(0) is emitted.
1460 ScoreBrackets->setScoreLB(VM_CNT,
1461 ScoreBrackets->getScoreUB(VM_CNT));
1467 ScoreBrackets.dump();
1470 // TODO: Remove this work-around after fixing the scheduler and enable the
1473 // Restore the vccz bit. Any time a value is written to vcc, the vcc
1474 // bit is updated, so we can restore the bit by reading the value of
1475 // vcc and then writing it back to the register.
1476 BuildMI(Block, Inst, Inst.getDebugLoc(),
1477 TII->get(ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64),
1479 .addReg(TRI->getVCC());
1490 bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) {
1491 ST = &MF.getSubtarget<GCNSubtarget>();
1492 TII = ST->getInstrInfo();
1493 TRI = &TII->getRegisterInfo();
1494 MRI = &MF.getRegInfo();
1495 IV = AMDGPU::getIsaVersion(ST->getCPU());
1496 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1497 PDT = &getAnalysis<MachinePostDominatorTree>();
1499 ForceEmitZeroWaitcnts = ForceEmitZeroFlag;
1500 for (auto T : inst_counter_types())
1501 ForceEmitWaitcnt[T] = false;
1503 HardwareLimits.VmcntMax = AMDGPU::getVmcntBitMask(IV);
1504 HardwareLimits.ExpcntMax = AMDGPU::getExpcntBitMask(IV);
1505 HardwareLimits.LgkmcntMax = AMDGPU::getLgkmcntBitMask(IV);
1506 HardwareLimits.VscntMax = ST->hasVscnt() ? 63 : 0;
1508 unsigned NumVGPRsMax = ST->getAddressableNumVGPRs();
1509 unsigned NumSGPRsMax = ST->getAddressableNumSGPRs();
1510 assert(NumVGPRsMax <= SQ_MAX_PGM_VGPRS);
1511 assert(NumSGPRsMax <= SQ_MAX_PGM_SGPRS);
1513 RegisterEncoding.VGPR0 = TRI->getEncodingValue(AMDGPU::VGPR0);
1514 RegisterEncoding.VGPRL = RegisterEncoding.VGPR0 + NumVGPRsMax - 1;
1515 RegisterEncoding.SGPR0 = TRI->getEncodingValue(AMDGPU::SGPR0);
1516 RegisterEncoding.SGPRL = RegisterEncoding.SGPR0 + NumSGPRsMax - 1;
1518 TrackedWaitcntSet.clear();
1521 // Keep iterating over the blocks in reverse post order, inserting and
1522 // updating s_waitcnt where needed, until a fix point is reached.
1523 for (auto *MBB : ReversePostOrderTraversal<MachineFunction *>(&MF))
1524 BlockInfos.insert({MBB, BlockInfo(MBB)});
1526 std::unique_ptr<WaitcntBrackets> Brackets;
1527 bool Modified = false;
1532 for (auto BII = BlockInfos.begin(), BIE = BlockInfos.end(); BII != BIE;
1534 BlockInfo &BI = BII->second;
1540 Brackets = std::make_unique<WaitcntBrackets>(*BI.Incoming);
1542 *Brackets = *BI.Incoming;
1545 Brackets = std::make_unique<WaitcntBrackets>(ST);
1547 *Brackets = WaitcntBrackets(ST);
1550 Modified |= insertWaitcntInBlock(MF, *BI.MBB, *Brackets);
1553 if (Brackets->hasPending()) {
1554 BlockInfo *MoveBracketsToSucc = nullptr;
1555 for (MachineBasicBlock *Succ : BI.MBB->successors()) {
1556 auto SuccBII = BlockInfos.find(Succ);
1557 BlockInfo &SuccBI = SuccBII->second;
1558 if (!SuccBI.Incoming) {
1559 SuccBI.Dirty = true;
1562 if (!MoveBracketsToSucc) {
1563 MoveBracketsToSucc = &SuccBI;
1565 SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets);
1567 } else if (SuccBI.Incoming->merge(*Brackets)) {
1568 SuccBI.Dirty = true;
1573 if (MoveBracketsToSucc)
1574 MoveBracketsToSucc->Incoming = std::move(Brackets);
1579 SmallVector<MachineBasicBlock *, 4> EndPgmBlocks;
1581 bool HaveScalarStores = false;
1583 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); BI != BE;
1585 MachineBasicBlock &MBB = *BI;
1587 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;
1589 if (!HaveScalarStores && TII->isScalarStore(*I))
1590 HaveScalarStores = true;
1592 if (I->getOpcode() == AMDGPU::S_ENDPGM ||
1593 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG)
1594 EndPgmBlocks.push_back(&MBB);
1598 if (HaveScalarStores) {
1599 // If scalar writes are used, the cache must be flushed or else the next
1600 // wave to reuse the same scratch memory can be clobbered.
1602 // Insert s_dcache_wb at wave termination points if there were any scalar
1603 // stores, and only if the cache hasn't already been flushed. This could be
1604 // improved by looking across blocks for flushes in postdominating blocks
1605 // from the stores but an explicitly requested flush is probably very rare.
1606 for (MachineBasicBlock *MBB : EndPgmBlocks) {
1607 bool SeenDCacheWB = false;
1609 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
1611 if (I->getOpcode() == AMDGPU::S_DCACHE_WB)
1612 SeenDCacheWB = true;
1613 else if (TII->isScalarStore(*I))
1614 SeenDCacheWB = false;
1616 // FIXME: It would be better to insert this before a waitcnt if any.
1617 if ((I->getOpcode() == AMDGPU::S_ENDPGM ||
1618 I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) &&
1621 BuildMI(*MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_DCACHE_WB));
1627 if (!MFI->isEntryFunction()) {
1628 // Wait for any outstanding memory operations that the input registers may
1629 // depend on. We can't track them and it's better to the wait after the
1630 // costly call sequence.
1632 // TODO: Could insert earlier and schedule more liberally with operations
1633 // that only use caller preserved registers.
1634 MachineBasicBlock &EntryBB = MF.front();
1635 MachineBasicBlock::iterator I = EntryBB.begin();
1636 for (MachineBasicBlock::iterator E = EntryBB.end();
1637 I != E && (I->isPHI() || I->isMetaInstruction()); ++I)
1639 BuildMI(EntryBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT)).addImm(0);
1641 BuildMI(EntryBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT_VSCNT))
1642 .addReg(AMDGPU::SGPR_NULL, RegState::Undef)