1 //===-- GCNHazardRecognizers.cpp - GCN Hazard Recognizer Impls ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements hazard recognizers for scheduling on GCN processors.
12 //===----------------------------------------------------------------------===//
14 #include "AMDGPUSubtarget.h"
15 #include "GCNHazardRecognizer.h"
16 #include "SIDefines.h"
17 #include "SIInstrInfo.h"
18 #include "SIRegisterInfo.h"
19 #include "Utils/AMDGPUBaseInfo.h"
20 #include "llvm/ADT/iterator_range.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/CodeGen/MachineOperand.h"
24 #include "llvm/CodeGen/ScheduleDAG.h"
25 #include "llvm/MC/MCInstrDesc.h"
26 #include "llvm/Support/ErrorHandling.h"
35 //===----------------------------------------------------------------------===//
36 // Hazard Recoginizer Implementation
37 //===----------------------------------------------------------------------===//
39 GCNHazardRecognizer::GCNHazardRecognizer(const MachineFunction &MF) :
40 CurrCycleInstr(nullptr),
42 ST(MF.getSubtarget<SISubtarget>()),
43 TII(*ST.getInstrInfo()) {
47 void GCNHazardRecognizer::EmitInstruction(SUnit *SU) {
48 EmitInstruction(SU->getInstr());
51 void GCNHazardRecognizer::EmitInstruction(MachineInstr *MI) {
55 static bool isDivFMas(unsigned Opcode) {
56 return Opcode == AMDGPU::V_DIV_FMAS_F32 || Opcode == AMDGPU::V_DIV_FMAS_F64;
59 static bool isSGetReg(unsigned Opcode) {
60 return Opcode == AMDGPU::S_GETREG_B32;
63 static bool isSSetReg(unsigned Opcode) {
64 return Opcode == AMDGPU::S_SETREG_B32 || Opcode == AMDGPU::S_SETREG_IMM32_B32;
67 static bool isRWLane(unsigned Opcode) {
68 return Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32;
71 static bool isRFE(unsigned Opcode) {
72 return Opcode == AMDGPU::S_RFE_B64;
75 static bool isSMovRel(unsigned Opcode) {
77 case AMDGPU::S_MOVRELS_B32:
78 case AMDGPU::S_MOVRELS_B64:
79 case AMDGPU::S_MOVRELD_B32:
80 case AMDGPU::S_MOVRELD_B64:
87 static unsigned getHWReg(const SIInstrInfo *TII, const MachineInstr &RegInstr) {
88 const MachineOperand *RegOp = TII->getNamedOperand(RegInstr,
89 AMDGPU::OpName::simm16);
90 return RegOp->getImm() & AMDGPU::Hwreg::ID_MASK_;
93 ScheduleHazardRecognizer::HazardType
94 GCNHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
95 MachineInstr *MI = SU->getInstr();
97 if (SIInstrInfo::isSMRD(*MI) && checkSMRDHazards(MI) > 0)
100 if (SIInstrInfo::isVMEM(*MI) && checkVMEMHazards(MI) > 0)
103 if (SIInstrInfo::isVALU(*MI) && checkVALUHazards(MI) > 0)
106 if (SIInstrInfo::isDPP(*MI) && checkDPPHazards(MI) > 0)
109 if (isDivFMas(MI->getOpcode()) && checkDivFMasHazards(MI) > 0)
112 if (isRWLane(MI->getOpcode()) && checkRWLaneHazards(MI) > 0)
115 if (isSGetReg(MI->getOpcode()) && checkGetRegHazards(MI) > 0)
118 if (isSSetReg(MI->getOpcode()) && checkSetRegHazards(MI) > 0)
121 if (isRFE(MI->getOpcode()) && checkRFEHazards(MI) > 0)
124 if ((TII.isVINTRP(*MI) || isSMovRel(MI->getOpcode())) &&
125 checkReadM0Hazards(MI) > 0)
128 if (checkAnyInstHazards(MI) > 0)
134 unsigned GCNHazardRecognizer::PreEmitNoops(SUnit *SU) {
135 return PreEmitNoops(SU->getInstr());
138 unsigned GCNHazardRecognizer::PreEmitNoops(MachineInstr *MI) {
139 int WaitStates = std::max(0, checkAnyInstHazards(MI));
141 if (SIInstrInfo::isSMRD(*MI))
142 return std::max(WaitStates, checkSMRDHazards(MI));
144 if (SIInstrInfo::isVALU(*MI)) {
145 WaitStates = std::max(WaitStates, checkVALUHazards(MI));
147 if (SIInstrInfo::isVMEM(*MI))
148 WaitStates = std::max(WaitStates, checkVMEMHazards(MI));
150 if (SIInstrInfo::isDPP(*MI))
151 WaitStates = std::max(WaitStates, checkDPPHazards(MI));
153 if (isDivFMas(MI->getOpcode()))
154 WaitStates = std::max(WaitStates, checkDivFMasHazards(MI));
156 if (isRWLane(MI->getOpcode()))
157 WaitStates = std::max(WaitStates, checkRWLaneHazards(MI));
159 if (TII.isVINTRP(*MI))
160 WaitStates = std::max(WaitStates, checkReadM0Hazards(MI));
165 if (isSGetReg(MI->getOpcode()))
166 return std::max(WaitStates, checkGetRegHazards(MI));
168 if (isSSetReg(MI->getOpcode()))
169 return std::max(WaitStates, checkSetRegHazards(MI));
171 if (isRFE(MI->getOpcode()))
172 return std::max(WaitStates, checkRFEHazards(MI));
174 if (TII.isVINTRP(*MI) || isSMovRel(MI->getOpcode()))
175 return std::max(WaitStates, checkReadM0Hazards(MI));
180 void GCNHazardRecognizer::EmitNoop() {
181 EmittedInstrs.push_front(nullptr);
184 void GCNHazardRecognizer::AdvanceCycle() {
185 // When the scheduler detects a stall, it will call AdvanceCycle() without
186 // emitting any instructions.
190 unsigned NumWaitStates = TII.getNumWaitStates(*CurrCycleInstr);
192 // Keep track of emitted instructions
193 EmittedInstrs.push_front(CurrCycleInstr);
195 // Add a nullptr for each additional wait state after the first. Make sure
196 // not to add more than getMaxLookAhead() items to the list, since we
197 // truncate the list to that size right after this loop.
198 for (unsigned i = 1, e = std::min(NumWaitStates, getMaxLookAhead());
200 EmittedInstrs.push_front(nullptr);
203 // getMaxLookahead() is the largest number of wait states we will ever need
204 // to insert, so there is no point in keeping track of more than that many
206 EmittedInstrs.resize(getMaxLookAhead());
208 CurrCycleInstr = nullptr;
211 void GCNHazardRecognizer::RecedeCycle() {
212 llvm_unreachable("hazard recognizer does not support bottom-up scheduling.");
215 //===----------------------------------------------------------------------===//
217 //===----------------------------------------------------------------------===//
219 int GCNHazardRecognizer::getWaitStatesSince(
220 function_ref<bool(MachineInstr *)> IsHazard) {
222 for (MachineInstr *MI : EmittedInstrs) {
224 if (!MI || !IsHazard(MI))
228 return std::numeric_limits<int>::max();
231 int GCNHazardRecognizer::getWaitStatesSinceDef(
232 unsigned Reg, function_ref<bool(MachineInstr *)> IsHazardDef) {
233 const SIRegisterInfo *TRI = ST.getRegisterInfo();
235 auto IsHazardFn = [IsHazardDef, TRI, Reg] (MachineInstr *MI) {
236 return IsHazardDef(MI) && MI->modifiesRegister(Reg, TRI);
239 return getWaitStatesSince(IsHazardFn);
242 int GCNHazardRecognizer::getWaitStatesSinceSetReg(
243 function_ref<bool(MachineInstr *)> IsHazard) {
244 auto IsHazardFn = [IsHazard] (MachineInstr *MI) {
245 return isSSetReg(MI->getOpcode()) && IsHazard(MI);
248 return getWaitStatesSince(IsHazardFn);
251 //===----------------------------------------------------------------------===//
252 // No-op Hazard Detection
253 //===----------------------------------------------------------------------===//
255 static void addRegsToSet(iterator_range<MachineInstr::const_mop_iterator> Ops,
256 std::set<unsigned> &Set) {
257 for (const MachineOperand &Op : Ops) {
259 Set.insert(Op.getReg());
263 int GCNHazardRecognizer::checkSMEMSoftClauseHazards(MachineInstr *SMEM) {
264 // SMEM soft clause are only present on VI+
265 if (ST.getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
268 // A soft-clause is any group of consecutive SMEM instructions. The
269 // instructions in this group may return out of order and/or may be
270 // replayed (i.e. the same instruction issued more than once).
272 // In order to handle these situations correctly we need to make sure
273 // that when a clause has more than one instruction, no instruction in the
274 // clause writes to a register that is read another instruction in the clause
275 // (including itself). If we encounter this situaion, we need to break the
276 // clause by inserting a non SMEM instruction.
278 std::set<unsigned> ClauseDefs;
279 std::set<unsigned> ClauseUses;
281 for (MachineInstr *MI : EmittedInstrs) {
283 // When we hit a non-SMEM instruction then we have passed the start of the
284 // clause and we can stop.
285 if (!MI || !SIInstrInfo::isSMRD(*MI))
288 addRegsToSet(MI->defs(), ClauseDefs);
289 addRegsToSet(MI->uses(), ClauseUses);
292 if (ClauseDefs.empty())
295 // FIXME: When we support stores, we need to make sure not to put loads and
296 // stores in the same clause if they use the same address. For now, just
297 // start a new clause whenever we see a store.
298 if (SMEM->mayStore())
301 addRegsToSet(SMEM->defs(), ClauseDefs);
302 addRegsToSet(SMEM->uses(), ClauseUses);
304 std::vector<unsigned> Result(std::max(ClauseDefs.size(), ClauseUses.size()));
305 std::vector<unsigned>::iterator End;
307 End = std::set_intersection(ClauseDefs.begin(), ClauseDefs.end(),
308 ClauseUses.begin(), ClauseUses.end(), Result.begin());
310 // If the set of defs and uses intersect then we cannot add this instruction
311 // to the clause, so we have a hazard.
312 if (End != Result.begin())
318 int GCNHazardRecognizer::checkSMRDHazards(MachineInstr *SMRD) {
319 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
320 int WaitStatesNeeded = 0;
322 WaitStatesNeeded = checkSMEMSoftClauseHazards(SMRD);
324 // This SMRD hazard only affects SI.
325 if (ST.getGeneration() != SISubtarget::SOUTHERN_ISLANDS)
326 return WaitStatesNeeded;
328 // A read of an SGPR by SMRD instruction requires 4 wait states when the
329 // SGPR was written by a VALU instruction.
330 int SmrdSgprWaitStates = 4;
331 auto IsHazardDefFn = [this] (MachineInstr *MI) { return TII.isVALU(*MI); };
333 for (const MachineOperand &Use : SMRD->uses()) {
336 int WaitStatesNeededForUse =
337 SmrdSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn);
338 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
340 return WaitStatesNeeded;
343 int GCNHazardRecognizer::checkVMEMHazards(MachineInstr* VMEM) {
344 const SIInstrInfo *TII = ST.getInstrInfo();
346 if (ST.getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
349 const SIRegisterInfo &TRI = TII->getRegisterInfo();
351 // A read of an SGPR by a VMEM instruction requires 5 wait states when the
352 // SGPR was written by a VALU Instruction.
353 int VmemSgprWaitStates = 5;
354 int WaitStatesNeeded = 0;
355 auto IsHazardDefFn = [TII] (MachineInstr *MI) { return TII->isVALU(*MI); };
357 for (const MachineOperand &Use : VMEM->uses()) {
358 if (!Use.isReg() || TRI.isVGPR(MF.getRegInfo(), Use.getReg()))
361 int WaitStatesNeededForUse =
362 VmemSgprWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardDefFn);
363 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
365 return WaitStatesNeeded;
368 int GCNHazardRecognizer::checkDPPHazards(MachineInstr *DPP) {
369 const SIRegisterInfo *TRI = ST.getRegisterInfo();
371 // Check for DPP VGPR read after VALU VGPR write.
372 int DppVgprWaitStates = 2;
373 int WaitStatesNeeded = 0;
375 for (const MachineOperand &Use : DPP->uses()) {
376 if (!Use.isReg() || !TRI->isVGPR(MF.getRegInfo(), Use.getReg()))
378 int WaitStatesNeededForUse =
379 DppVgprWaitStates - getWaitStatesSinceDef(Use.getReg());
380 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
383 return WaitStatesNeeded;
386 int GCNHazardRecognizer::checkDivFMasHazards(MachineInstr *DivFMas) {
387 const SIInstrInfo *TII = ST.getInstrInfo();
389 // v_div_fmas requires 4 wait states after a write to vcc from a VALU
391 const int DivFMasWaitStates = 4;
392 auto IsHazardDefFn = [TII] (MachineInstr *MI) { return TII->isVALU(*MI); };
393 int WaitStatesNeeded = getWaitStatesSinceDef(AMDGPU::VCC, IsHazardDefFn);
395 return DivFMasWaitStates - WaitStatesNeeded;
398 int GCNHazardRecognizer::checkGetRegHazards(MachineInstr *GetRegInstr) {
399 const SIInstrInfo *TII = ST.getInstrInfo();
400 unsigned GetRegHWReg = getHWReg(TII, *GetRegInstr);
402 const int GetRegWaitStates = 2;
403 auto IsHazardFn = [TII, GetRegHWReg] (MachineInstr *MI) {
404 return GetRegHWReg == getHWReg(TII, *MI);
406 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn);
408 return GetRegWaitStates - WaitStatesNeeded;
411 int GCNHazardRecognizer::checkSetRegHazards(MachineInstr *SetRegInstr) {
412 const SIInstrInfo *TII = ST.getInstrInfo();
413 unsigned HWReg = getHWReg(TII, *SetRegInstr);
415 const int SetRegWaitStates =
416 ST.getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS ? 1 : 2;
417 auto IsHazardFn = [TII, HWReg] (MachineInstr *MI) {
418 return HWReg == getHWReg(TII, *MI);
420 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn);
421 return SetRegWaitStates - WaitStatesNeeded;
424 int GCNHazardRecognizer::createsVALUHazard(const MachineInstr &MI) {
428 const SIInstrInfo *TII = ST.getInstrInfo();
429 unsigned Opcode = MI.getOpcode();
430 const MCInstrDesc &Desc = MI.getDesc();
432 int VDataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
435 VDataRCID = Desc.OpInfo[VDataIdx].RegClass;
437 if (TII->isMUBUF(MI) || TII->isMTBUF(MI)) {
438 // There is no hazard if the instruction does not use vector regs
442 // For MUBUF/MTBUF instructions this hazard only exists if the
443 // instruction is not using a register in the soffset field.
444 const MachineOperand *SOffset =
445 TII->getNamedOperand(MI, AMDGPU::OpName::soffset);
446 // If we have no soffset operand, then assume this field has been
447 // hardcoded to zero.
448 if (AMDGPU::getRegBitWidth(VDataRCID) > 64 &&
449 (!SOffset || !SOffset->isReg()))
453 // MIMG instructions create a hazard if they don't use a 256-bit T# and
454 // the store size is greater than 8 bytes and they have more than two bits
455 // of their dmask set.
456 // All our MIMG definitions use a 256-bit T#, so we can skip checking for them.
457 if (TII->isMIMG(MI)) {
458 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc);
459 assert(SRsrcIdx != -1 &&
460 AMDGPU::getRegBitWidth(Desc.OpInfo[SRsrcIdx].RegClass) == 256);
464 if (TII->isFLAT(MI)) {
465 int DataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata);
466 if (AMDGPU::getRegBitWidth(Desc.OpInfo[DataIdx].RegClass) > 64)
473 int GCNHazardRecognizer::checkVALUHazards(MachineInstr *VALU) {
474 // This checks for the hazard where VMEM instructions that store more than
475 // 8 bytes can have there store data over written by the next instruction.
476 if (!ST.has12DWordStoreHazard())
479 const SIRegisterInfo *TRI = ST.getRegisterInfo();
480 const MachineRegisterInfo &MRI = VALU->getParent()->getParent()->getRegInfo();
482 const int VALUWaitStates = 1;
483 int WaitStatesNeeded = 0;
485 for (const MachineOperand &Def : VALU->defs()) {
486 if (!TRI->isVGPR(MRI, Def.getReg()))
488 unsigned Reg = Def.getReg();
489 auto IsHazardFn = [this, Reg, TRI] (MachineInstr *MI) {
490 int DataIdx = createsVALUHazard(*MI);
491 return DataIdx >= 0 &&
492 TRI->regsOverlap(MI->getOperand(DataIdx).getReg(), Reg);
494 int WaitStatesNeededForDef =
495 VALUWaitStates - getWaitStatesSince(IsHazardFn);
496 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForDef);
498 return WaitStatesNeeded;
501 int GCNHazardRecognizer::checkRWLaneHazards(MachineInstr *RWLane) {
502 const SIInstrInfo *TII = ST.getInstrInfo();
503 const SIRegisterInfo *TRI = ST.getRegisterInfo();
504 const MachineRegisterInfo &MRI =
505 RWLane->getParent()->getParent()->getRegInfo();
507 const MachineOperand *LaneSelectOp =
508 TII->getNamedOperand(*RWLane, AMDGPU::OpName::src1);
510 if (!LaneSelectOp->isReg() || !TRI->isSGPRReg(MRI, LaneSelectOp->getReg()))
513 unsigned LaneSelectReg = LaneSelectOp->getReg();
514 auto IsHazardFn = [TII] (MachineInstr *MI) {
515 return TII->isVALU(*MI);
518 const int RWLaneWaitStates = 4;
519 int WaitStatesSince = getWaitStatesSinceDef(LaneSelectReg, IsHazardFn);
520 return RWLaneWaitStates - WaitStatesSince;
523 int GCNHazardRecognizer::checkRFEHazards(MachineInstr *RFE) {
524 if (ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS)
527 const SIInstrInfo *TII = ST.getInstrInfo();
529 const int RFEWaitStates = 1;
531 auto IsHazardFn = [TII] (MachineInstr *MI) {
532 return getHWReg(TII, *MI) == AMDGPU::Hwreg::ID_TRAPSTS;
534 int WaitStatesNeeded = getWaitStatesSinceSetReg(IsHazardFn);
535 return RFEWaitStates - WaitStatesNeeded;
538 int GCNHazardRecognizer::checkAnyInstHazards(MachineInstr *MI) {
539 if (MI->isDebugValue())
542 const SIRegisterInfo *TRI = ST.getRegisterInfo();
543 if (!ST.hasSMovFedHazard())
546 // Check for any instruction reading an SGPR after a write from
548 int MovFedWaitStates = 1;
549 int WaitStatesNeeded = 0;
551 for (const MachineOperand &Use : MI->uses()) {
552 if (!Use.isReg() || TRI->isVGPR(MF.getRegInfo(), Use.getReg()))
554 auto IsHazardFn = [] (MachineInstr *MI) {
555 return MI->getOpcode() == AMDGPU::S_MOV_FED_B32;
557 int WaitStatesNeededForUse =
558 MovFedWaitStates - getWaitStatesSinceDef(Use.getReg(), IsHazardFn);
559 WaitStatesNeeded = std::max(WaitStatesNeeded, WaitStatesNeededForUse);
562 return WaitStatesNeeded;
565 int GCNHazardRecognizer::checkReadM0Hazards(MachineInstr *MI) {
566 if (!ST.hasReadM0Hazard())
569 const SIInstrInfo *TII = ST.getInstrInfo();
570 int SMovRelWaitStates = 1;
571 auto IsHazardFn = [TII] (MachineInstr *MI) {
572 return TII->isSALU(*MI);
574 return SMovRelWaitStates - getWaitStatesSinceDef(AMDGPU::M0, IsHazardFn);