1 //===-- GCNHazardRecognizers.cpp - GCN Hazard Recognizer Impls ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements hazard recognizers for scheduling on GCN processors.
12 //===----------------------------------------------------------------------===//
14 #include "GCNHazardRecognizer.h"
15 #include "AMDGPUSubtarget.h"
16 #include "SIDefines.h"
17 #include "SIInstrInfo.h"
18 #include "SIRegisterInfo.h"
19 #include "Utils/AMDGPUBaseInfo.h"
20 #include "llvm/ADT/iterator_range.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/CodeGen/MachineOperand.h"
24 #include "llvm/CodeGen/ScheduleDAG.h"
25 #include "llvm/MC/MCInstrDesc.h"
26 #include "llvm/Support/ErrorHandling.h"
35 //===----------------------------------------------------------------------===//
36 // Hazard Recoginizer Implementation
37 //===----------------------------------------------------------------------===//
39 GCNHazardRecognizer::GCNHazardRecognizer(const MachineFunction &MF) :
40 CurrCycleInstr(nullptr),
42 ST(MF.getSubtarget<SISubtarget>()),
43 TII(*ST.getInstrInfo()) {
47 void GCNHazardRecognizer::EmitInstruction(SUnit *SU) {
48 EmitInstruction(SU->getInstr());
51 void GCNHazardRecognizer::EmitInstruction(MachineInstr *MI) {
55 static bool isDivFMas(unsigned Opcode) {
56 return Opcode == AMDGPU::V_DIV_FMAS_F32 || Opcode == AMDGPU::V_DIV_FMAS_F64;
59 static bool isSGetReg(unsigned Opcode) {
60 return Opcode == AMDGPU::S_GETREG_B32;
63 static bool isSSetReg(unsigned Opcode) {
64 return Opcode == AMDGPU::S_SETREG_B32 || Opcode == AMDGPU::S_SETREG_IMM32_B32;
67 static bool isRWLane(unsigned Opcode) {
68 return Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32;
71 static bool isRFE(unsigned Opcode) {
72 return Opcode == AMDGPU::S_RFE_B64;
75 static bool isSMovRel(unsigned Opcode) {
77 case AMDGPU::S_MOVRELS_B32:
78 case AMDGPU::S_MOVRELS_B64:
79 case AMDGPU::S_MOVRELD_B32:
80 case AMDGPU::S_MOVRELD_B64:
87 static unsigned getHWReg(const SIInstrInfo *TII, const MachineInstr &RegInstr) {
88 const MachineOperand *RegOp = TII->getNamedOperand(RegInstr,
89 AMDGPU::OpName::simm16);
90 return RegOp->getImm() & AMDGPU::Hwreg::ID_MASK_;
93 ScheduleHazardRecognizer::HazardType
94 GCNHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
95 MachineInstr *MI = SU->getInstr();
97 if (SIInstrInfo::isSMRD(*MI) && checkSMRDHazards(MI) > 0)
100 if (SIInstrInfo::isVMEM(*MI) && checkVMEMHazards(MI) > 0)
103 if (SIInstrInfo::isVALU(*MI) && checkVALUHazards(MI) > 0)
106 if (SIInstrInfo::isDPP(*MI) && checkDPPHazards(MI) > 0)
109 if (isDivFMas(MI->getOpcode()) && checkDivFMasHazards(MI) > 0)
112 if (isRWLane(MI->getOpcode()) && checkRWLaneHazards(MI) > 0)
115 if (isSGetReg(MI->getOpcode()) && checkGetRegHazards(MI) > 0)
118 if (isSSetReg(MI->getOpcode()) && checkSetRegHazards(MI) > 0)
121 if (isRFE(MI->getOpcode()) && checkRFEHazards(MI) > 0)
124 if ((TII.isVINTRP(*MI) || isSMovRel(MI->getOpcode())) &&
125 checkReadM0Hazards(MI) > 0)
128 if (checkAnyInstHazards(MI) > 0)
134 unsigned GCNHazardRecognizer::PreEmitNoops(SUnit *SU) {
135 return PreEmitNoops(SU->getInstr());
138 unsigned GCNHazardRecognizer::PreEmitNoops(MachineInstr *MI) {
139 int WaitStates = std::max(0, checkAnyInstHazards(MI));
141 if (SIInstrInfo::isSMRD(*MI))
142 return std::max(WaitStates, checkSMRDHazards(MI));
144 if (SIInstrInfo::isVALU(*MI)) {
145 WaitStates = std::max(WaitStates, checkVALUHazards(MI));
147 if (SIInstrInfo::isVMEM(*MI))
148 WaitStates = std::max(WaitStates, checkVMEMHazards(MI));
150 if (SIInstrInfo::isDPP(*MI))
151 WaitStates = std::max(WaitStates, checkDPPHazards(MI));
153 if (isDivFMas(MI->getOpcode()))
154 WaitStates = std::max(WaitStates, checkDivFMasHazards(MI));
156 if (isRWLane(MI->getOpcode()))
157 WaitStates = std::max(WaitStates, checkRWLaneHazards(MI));
159 if (TII.isVINTRP(*MI))
160 WaitStates = std::max(WaitStates, checkReadM0Hazards(MI));
165 if (isSGetReg(MI->getOpcode()))
166 return std::max(WaitStates, checkGetRegHazards(MI));
168 if (isSSetReg(MI->getOpcode()))
169 return std::max(WaitStates, checkSetRegHazards(MI));
171 if (isRFE(MI->getOpcode()))
172 return std::max(WaitStates, checkRFEHazards(MI));
174 if (TII.isVINTRP(*MI) || isSMovRel(MI->getOpcode()))
175 return std::max(WaitStates, checkReadM0Hazards(MI));
180 void GCNHazardRecognizer::EmitNoop() {
181 EmittedInstrs.push_front(nullptr);
184 void GCNHazardRecognizer::AdvanceCycle() {
185 // When the scheduler detects a stall, it will call AdvanceCycle() without
186 // emitting any instructions.
190 unsigned NumWaitStates = TII.getNumWaitStates(*CurrCycleInstr);
192 // Keep track of emitted instructions
193 EmittedInstrs.push_front(CurrCycleInstr);
195 // Add a nullptr for each additional wait state after the first. Make sure
196 // not to add more than getMaxLookAhead() items to the list, since we
197 // truncate the list to that size right after this loop.
198 for (unsigned i = 1, e = std::min(NumWaitStates, getMaxLookAhead());
200 EmittedInstrs.push_front(nullptr);
203 // getMaxLookahead() is the largest number of wait states we will ever need
204 // to insert, so there is no point in keeping track of more than that many
206 EmittedInstrs.resize(getMaxLookAhead());
208 CurrCycleInstr = nullptr;
211 void GCNHazardRecognizer::RecedeCycle() {
212 llvm_unreachable("hazard recognizer does not support bottom-up scheduling.");
215 //===----------------------------------------------------------------------===//
217 //===----------------------------------------------------------------------===//
219 int GCNHazardRecognizer::getWaitStatesSince(
220 function_ref<bool(MachineInstr *)> IsHazard) {
222 for (MachineInstr *MI : EmittedInstrs) {
227 unsigned Opcode = MI->getOpcode();
228 if (Opcode == AMDGPU::DBG_VALUE || Opcode == AMDGPU::IMPLICIT_DEF)
233 return std::numeric_limits<int>::max();
236 int GCNHazardRecognizer::getWaitStatesSinceDef(
237 unsigned Reg, function_ref<bool(MachineInstr *)> IsHazardDef) {
238 const SIRegisterInfo *TRI = ST.getRegisterInfo();
240 auto IsHazardFn = [IsHazardDef, TRI, Reg] (MachineInstr *MI) {
241 return IsHazardDef(MI) && MI->modifiesRegister(Reg, TRI);
244 return getWaitStatesSince(IsHazardFn);
247 int GCNHazardRecognizer::getWaitStatesSinceSetReg(
248 function_ref<bool(MachineInstr *)> IsHazard) {
249 auto IsHazardFn = [IsHazard] (MachineInstr *MI) {
250 return isSSetReg(MI->getOpcode()) && IsHazard(MI);
253 return getWaitStatesSince(IsHazardFn);
256 //===----------------------------------------------------------------------===//
257 // No-op Hazard Detection
258 //===----------------------------------------------------------------------===//
260 static void addRegsToSet(iterator_range<MachineInstr::const_mop_iterator> Ops,
261 std::set<unsigned> &Set) {
262 for (const MachineOperand &Op : Ops) {
264 Set.insert(Op.getReg());
268 int GCNHazardRecognizer::checkSMEMSoftClauseHazards(MachineInstr *SMEM) {
269 // SMEM soft clause are only present on VI+
270 if (ST.getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
273 // A soft-clause is any group of consecutive SMEM instructions. The
274 // instructions in this group may return out of order and/or may be
275 // replayed (i.e. the same instruction issued more than once).
277 // In order to handle these situations correctly we need to make sure
278 // that when a clause has more than one instruction, no instruction in the
279 // clause writes to a register that is read another instruction in the clause
280 // (including itself). If we encounter this situaion, we need to break the
281 // clause by inserting a non SMEM instruction.
283 std::set<unsigned> ClauseDefs;
284 std::set<unsigned> ClauseUses;
286 for (MachineInstr *MI : EmittedInstrs) {
288 // When we hit a non-SMEM instruction then we have passed the start of the
289 // clause and we can stop.
290 if (!MI || !SIInstrInfo::isSMRD(*MI))
293 addRegsToSet(MI->defs(), ClauseDefs);
294 addRegsToSet(MI->uses(), ClauseUses);
297 if (ClauseDefs.empty())
300 // FIXME: When we support stores, we need to make sure not to put loads and
301 // stores in the same clause if they use the same address. For now, just
302 // start a new clause whenever we see a store.
303 if (SMEM->mayStore())
306 addRegsToSet(SMEM->defs(), ClauseDefs);
307 addRegsToSet(SMEM->uses(), ClauseUses);
309 std::vector<unsigned> Result(std::max(ClauseDefs.size(), ClauseUses.size()));
310 std::vector<unsigned>::iterator End;
312 End = std::set_intersection(ClauseDefs.begin(), ClauseDefs.end(),
313 ClauseUses.begin(), ClauseUses.end(), Result.begin());
315 // If the set of defs and uses intersect then we cannot add this instruction
316 // to the clause, so we have a hazard.
317 if (End != Result.begin())
323 int GCNHazardRecognizer::checkSMRDHazards(MachineInstr *SMRD) {
324 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
325 int WaitStatesNeeded = 0;
327 WaitStatesNeeded = checkSMEMSoftClauseHazards(SMRD);
329 // This SMRD hazard only affects SI.
330 if (ST.getGeneration() != SISubtarget::SOUTHERN_ISLANDS)
331 return WaitStatesNeeded;
333 // A read of an SGPR by SMRD instruction requires 4 wait states when the
334 // SGPR was written by a VALU instruction.
335 int SmrdSgprWaitStates = 4;
336 auto IsHazardDefFn = [this] (MachineInstr *MI) { return TII.isVALU(*MI); };
338 for (const MachineOperand &Use : SMRD->uses()) {
341 int WaitStatesNeededForUse =
342 SmrdSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn);
343 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
345 return WaitStatesNeeded;
348 int GCNHazardRecognizer::checkVMEMHazards(MachineInstr* VMEM) {
349 const SIInstrInfo *TII = ST.getInstrInfo();
351 if (ST.getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
354 const SIRegisterInfo &TRI = TII->getRegisterInfo();
356 // A read of an SGPR by a VMEM instruction requires 5 wait states when the
357 // SGPR was written by a VALU Instruction.
358 int VmemSgprWaitStates = 5;
359 int WaitStatesNeeded = 0;
360 auto IsHazardDefFn = [TII] (MachineInstr *MI) { return TII->isVALU(*MI); };
362 for (const MachineOperand &Use : VMEM->uses()) {
363 if (!Use.isReg() || TRI.isVGPR(MF.getRegInfo(), Use.getReg()))
366 int WaitStatesNeededForUse =
367 VmemSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn);
368 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
370 return WaitStatesNeeded;
373 int GCNHazardRecognizer::checkDPPHazards(MachineInstr *DPP) {
374 const SIRegisterInfo *TRI = ST.getRegisterInfo();
376 // Check for DPP VGPR read after VALU VGPR write.
377 int DppVgprWaitStates = 2;
378 int WaitStatesNeeded = 0;
380 for (const MachineOperand &Use : DPP->uses()) {
381 if (!Use.isReg() || !TRI->isVGPR(MF.getRegInfo(), Use.getReg()))
383 int WaitStatesNeededForUse =
384 DppVgprWaitStates - getWaitStatesSinceDef(Use.getReg());
385 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
388 return WaitStatesNeeded;
391 int GCNHazardRecognizer::checkDivFMasHazards(MachineInstr *DivFMas) {
392 const SIInstrInfo *TII = ST.getInstrInfo();
394 // v_div_fmas requires 4 wait states after a write to vcc from a VALU
396 const int DivFMasWaitStates = 4;
397 auto IsHazardDefFn = [TII] (MachineInstr *MI) { return TII->isVALU(*MI); };
398 int WaitStatesNeeded = getWaitStatesSinceDef(AMDGPU::VCC, IsHazardDefFn);
400 return DivFMasWaitStates - WaitStatesNeeded;
403 int GCNHazardRecognizer::checkGetRegHazards(MachineInstr *GetRegInstr) {
404 const SIInstrInfo *TII = ST.getInstrInfo();
405 unsigned GetRegHWReg = getHWReg(TII, *GetRegInstr);
407 const int GetRegWaitStates = 2;
408 auto IsHazardFn = [TII, GetRegHWReg] (MachineInstr *MI) {
409 return GetRegHWReg == getHWReg(TII, *MI);
411 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn);
413 return GetRegWaitStates - WaitStatesNeeded;
416 int GCNHazardRecognizer::checkSetRegHazards(MachineInstr *SetRegInstr) {
417 const SIInstrInfo *TII = ST.getInstrInfo();
418 unsigned HWReg = getHWReg(TII, *SetRegInstr);
420 const int SetRegWaitStates =
421 ST.getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS ? 1 : 2;
422 auto IsHazardFn = [TII, HWReg] (MachineInstr *MI) {
423 return HWReg == getHWReg(TII, *MI);
425 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn);
426 return SetRegWaitStates - WaitStatesNeeded;
429 int GCNHazardRecognizer::createsVALUHazard(const MachineInstr &MI) {
433 const SIInstrInfo *TII = ST.getInstrInfo();
434 unsigned Opcode = MI.getOpcode();
435 const MCInstrDesc &Desc = MI.getDesc();
437 int VDataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
440 VDataRCID = Desc.OpInfo[VDataIdx].RegClass;
442 if (TII->isMUBUF(MI) || TII->isMTBUF(MI)) {
443 // There is no hazard if the instruction does not use vector regs
447 // For MUBUF/MTBUF instructions this hazard only exists if the
448 // instruction is not using a register in the soffset field.
449 const MachineOperand *SOffset =
450 TII->getNamedOperand(MI, AMDGPU::OpName::soffset);
451 // If we have no soffset operand, then assume this field has been
452 // hardcoded to zero.
453 if (AMDGPU::getRegBitWidth(VDataRCID) > 64 &&
454 (!SOffset || !SOffset->isReg()))
458 // MIMG instructions create a hazard if they don't use a 256-bit T# and
459 // the store size is greater than 8 bytes and they have more than two bits
460 // of their dmask set.
461 // All our MIMG definitions use a 256-bit T#, so we can skip checking for them.
462 if (TII->isMIMG(MI)) {
463 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc);
464 assert(SRsrcIdx != -1 &&
465 AMDGPU::getRegBitWidth(Desc.OpInfo[SRsrcIdx].RegClass) == 256);
469 if (TII->isFLAT(MI)) {
470 int DataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
471 if (AMDGPU::getRegBitWidth(Desc.OpInfo[DataIdx].RegClass) > 64)
478 int GCNHazardRecognizer::checkVALUHazards(MachineInstr *VALU) {
479 // This checks for the hazard where VMEM instructions that store more than
480 // 8 bytes can have there store data over written by the next instruction.
481 if (!ST.has12DWordStoreHazard())
484 const SIRegisterInfo *TRI = ST.getRegisterInfo();
485 const MachineRegisterInfo &MRI = VALU->getParent()->getParent()->getRegInfo();
487 const int VALUWaitStates = 1;
488 int WaitStatesNeeded = 0;
490 for (const MachineOperand &Def : VALU->defs()) {
491 if (!TRI->isVGPR(MRI, Def.getReg()))
493 unsigned Reg = Def.getReg();
494 auto IsHazardFn = [this, Reg, TRI] (MachineInstr *MI) {
495 int DataIdx = createsVALUHazard(*MI);
496 return DataIdx >= 0 &&
497 TRI->regsOverlap(MI->getOperand(DataIdx).getReg(), Reg);
499 int WaitStatesNeededForDef =
500 VALUWaitStates - getWaitStatesSince(IsHazardFn);
501 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
503 return WaitStatesNeeded;
506 int GCNHazardRecognizer::checkRWLaneHazards(MachineInstr *RWLane) {
507 const SIInstrInfo *TII = ST.getInstrInfo();
508 const SIRegisterInfo *TRI = ST.getRegisterInfo();
509 const MachineRegisterInfo &MRI =
510 RWLane->getParent()->getParent()->getRegInfo();
512 const MachineOperand *LaneSelectOp =
513 TII->getNamedOperand(*RWLane, AMDGPU::OpName::src1);
515 if (!LaneSelectOp->isReg() || !TRI->isSGPRReg(MRI, LaneSelectOp->getReg()))
518 unsigned LaneSelectReg = LaneSelectOp->getReg();
519 auto IsHazardFn = [TII] (MachineInstr *MI) {
520 return TII->isVALU(*MI);
523 const int RWLaneWaitStates = 4;
524 int WaitStatesSince = getWaitStatesSinceDef(LaneSelectReg, IsHazardFn);
525 return RWLaneWaitStates - WaitStatesSince;
528 int GCNHazardRecognizer::checkRFEHazards(MachineInstr *RFE) {
529 if (ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
532 const SIInstrInfo *TII = ST.getInstrInfo();
534 const int RFEWaitStates = 1;
536 auto IsHazardFn = [TII] (MachineInstr *MI) {
537 return getHWReg(TII, *MI) == AMDGPU::Hwreg::ID_TRAPSTS;
539 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn);
540 return RFEWaitStates - WaitStatesNeeded;
543 int GCNHazardRecognizer::checkAnyInstHazards(MachineInstr *MI) {
544 if (MI->isDebugValue())
547 const SIRegisterInfo *TRI = ST.getRegisterInfo();
548 if (!ST.hasSMovFedHazard())
551 // Check for any instruction reading an SGPR after a write from
553 int MovFedWaitStates = 1;
554 int WaitStatesNeeded = 0;
556 for (const MachineOperand &Use : MI->uses()) {
557 if (!Use.isReg() || TRI->isVGPR(MF.getRegInfo(), Use.getReg()))
559 auto IsHazardFn = [] (MachineInstr *MI) {
560 return MI->getOpcode() == AMDGPU::S_MOV_FED_B32;
562 int WaitStatesNeededForUse =
563 MovFedWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardFn);
564 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
567 return WaitStatesNeeded;
570 int GCNHazardRecognizer::checkReadM0Hazards(MachineInstr *MI) {
571 if (!ST.hasReadM0Hazard())
574 const SIInstrInfo *TII = ST.getInstrInfo();
575 int SMovRelWaitStates = 1;
576 auto IsHazardFn = [TII] (MachineInstr *MI) {
577 return TII->isSALU(*MI);
579 return SMovRelWaitStates - getWaitStatesSinceDef(AMDGPU::M0, IsHazardFn);