1 //===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
15 #include "SIInstrInfo.h"
16 #include "AMDGPUTargetMachine.h"
17 #include "GCNHazardRecognizer.h"
18 #include "SIDefines.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/ScheduleDAG.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
26 #include "llvm/MC/MCInstrDesc.h"
27 #include "llvm/Support/Debug.h"
31 // Must be at least 4 to be able to branch over minimum unconditional branch
32 // code. This is only for making it possible to write reasonably small tests for
34 static cl::opt<unsigned>
35 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16),
36 cl::desc("Restrict range of branch instructions (DEBUG)"));
38 SIInstrInfo::SIInstrInfo(const SISubtarget &ST)
39 : AMDGPUInstrInfo(ST), RI(), ST(ST) {}
41 //===----------------------------------------------------------------------===//
42 // TargetInstrInfo callbacks
43 //===----------------------------------------------------------------------===//
45 static unsigned getNumOperandsNoGlue(SDNode *Node) {
46 unsigned N = Node->getNumOperands();
47 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue)
52 static SDValue findChainOperand(SDNode *Load) {
53 SDValue LastOp = Load->getOperand(getNumOperandsNoGlue(Load) - 1);
54 assert(LastOp.getValueType() == MVT::Other && "Chain missing from load node");
58 /// \brief Returns true if both nodes have the same value for the given
59 /// operand \p Op, or if both nodes do not have this operand.
60 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) {
61 unsigned Opc0 = N0->getMachineOpcode();
62 unsigned Opc1 = N1->getMachineOpcode();
64 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName);
65 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName);
67 if (Op0Idx == -1 && Op1Idx == -1)
71 if ((Op0Idx == -1 && Op1Idx != -1) ||
72 (Op1Idx == -1 && Op0Idx != -1))
75 // getNamedOperandIdx returns the index for the MachineInstr's operands,
76 // which includes the result as the first operand. We are indexing into the
77 // MachineSDNode's operands, so we need to skip the result operand to get
82 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx);
85 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
86 AliasAnalysis *AA) const {
87 // TODO: The generic check fails for VALU instructions that should be
88 // rematerializable due to implicit reads of exec. We really want all of the
89 // generic logic for this except for this.
90 switch (MI.getOpcode()) {
91 case AMDGPU::V_MOV_B32_e32:
92 case AMDGPU::V_MOV_B32_e64:
93 case AMDGPU::V_MOV_B64_PSEUDO:
100 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1,
102 int64_t &Offset1) const {
103 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode())
106 unsigned Opc0 = Load0->getMachineOpcode();
107 unsigned Opc1 = Load1->getMachineOpcode();
109 // Make sure both are actually loads.
110 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad())
113 if (isDS(Opc0) && isDS(Opc1)) {
115 // FIXME: Handle this case:
116 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1))
120 if (Load0->getOperand(1) != Load1->getOperand(1))
124 if (findChainOperand(Load0) != findChainOperand(Load1))
127 // Skip read2 / write2 variants for simplicity.
128 // TODO: We should report true if the used offsets are adjacent (excluded
130 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 ||
131 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1)
134 Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue();
135 Offset1 = cast<ConstantSDNode>(Load1->getOperand(2))->getZExtValue();
139 if (isSMRD(Opc0) && isSMRD(Opc1)) {
140 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1));
143 if (Load0->getOperand(0) != Load1->getOperand(0))
146 const ConstantSDNode *Load0Offset =
147 dyn_cast<ConstantSDNode>(Load0->getOperand(1));
148 const ConstantSDNode *Load1Offset =
149 dyn_cast<ConstantSDNode>(Load1->getOperand(1));
151 if (!Load0Offset || !Load1Offset)
155 if (findChainOperand(Load0) != findChainOperand(Load1))
158 Offset0 = Load0Offset->getZExtValue();
159 Offset1 = Load1Offset->getZExtValue();
163 // MUBUF and MTBUF can access the same addresses.
164 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) {
166 // MUBUF and MTBUF have vaddr at different indices.
167 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) ||
168 findChainOperand(Load0) != findChainOperand(Load1) ||
169 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) ||
170 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc))
173 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset);
174 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset);
176 if (OffIdx0 == -1 || OffIdx1 == -1)
179 // getNamedOperandIdx returns the index for MachineInstrs. Since they
180 // inlcude the output in the operand list, but SDNodes don't, we need to
181 // subtract the index by one.
185 SDValue Off0 = Load0->getOperand(OffIdx0);
186 SDValue Off1 = Load1->getOperand(OffIdx1);
188 // The offset might be a FrameIndexSDNode.
189 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1))
192 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue();
193 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue();
200 static bool isStride64(unsigned Opc) {
202 case AMDGPU::DS_READ2ST64_B32:
203 case AMDGPU::DS_READ2ST64_B64:
204 case AMDGPU::DS_WRITE2ST64_B32:
205 case AMDGPU::DS_WRITE2ST64_B64:
212 bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
214 const TargetRegisterInfo *TRI) const {
215 unsigned Opc = LdSt.getOpcode();
218 const MachineOperand *OffsetImm =
219 getNamedOperand(LdSt, AMDGPU::OpName::offset);
221 // Normal, single offset LDS instruction.
222 const MachineOperand *AddrReg =
223 getNamedOperand(LdSt, AMDGPU::OpName::addr);
225 BaseReg = AddrReg->getReg();
226 Offset = OffsetImm->getImm();
230 // The 2 offset instructions use offset0 and offset1 instead. We can treat
231 // these as a load with a single offset if the 2 offsets are consecutive. We
232 // will use this for some partially aligned loads.
233 const MachineOperand *Offset0Imm =
234 getNamedOperand(LdSt, AMDGPU::OpName::offset0);
235 const MachineOperand *Offset1Imm =
236 getNamedOperand(LdSt, AMDGPU::OpName::offset1);
238 uint8_t Offset0 = Offset0Imm->getImm();
239 uint8_t Offset1 = Offset1Imm->getImm();
241 if (Offset1 > Offset0 && Offset1 - Offset0 == 1) {
242 // Each of these offsets is in element sized units, so we need to convert
243 // to bytes of the individual reads.
247 EltSize = getOpRegClass(LdSt, 0)->getSize() / 2;
249 assert(LdSt.mayStore());
250 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
251 EltSize = getOpRegClass(LdSt, Data0Idx)->getSize();
257 const MachineOperand *AddrReg =
258 getNamedOperand(LdSt, AMDGPU::OpName::addr);
259 BaseReg = AddrReg->getReg();
260 Offset = EltSize * Offset0;
267 if (isMUBUF(LdSt) || isMTBUF(LdSt)) {
268 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset);
269 if (SOffset && SOffset->isReg())
272 const MachineOperand *AddrReg =
273 getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
277 const MachineOperand *OffsetImm =
278 getNamedOperand(LdSt, AMDGPU::OpName::offset);
279 BaseReg = AddrReg->getReg();
280 Offset = OffsetImm->getImm();
282 if (SOffset) // soffset can be an inline immediate.
283 Offset += SOffset->getImm();
289 const MachineOperand *OffsetImm =
290 getNamedOperand(LdSt, AMDGPU::OpName::offset);
294 const MachineOperand *SBaseReg =
295 getNamedOperand(LdSt, AMDGPU::OpName::sbase);
296 BaseReg = SBaseReg->getReg();
297 Offset = OffsetImm->getImm();
302 const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
303 BaseReg = AddrReg->getReg();
311 bool SIInstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt,
312 MachineInstr &SecondLdSt,
313 unsigned NumLoads) const {
314 const MachineOperand *FirstDst = nullptr;
315 const MachineOperand *SecondDst = nullptr;
317 if ((isMUBUF(FirstLdSt) && isMUBUF(SecondLdSt)) ||
318 (isMTBUF(FirstLdSt) && isMTBUF(SecondLdSt))) {
319 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdata);
320 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdata);
321 } else if (isSMRD(FirstLdSt) && isSMRD(SecondLdSt)) {
322 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst);
323 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst);
324 } else if (isDS(FirstLdSt) && isDS(SecondLdSt)) {
325 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst);
326 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst);
329 if (!FirstDst || !SecondDst)
332 // Try to limit clustering based on the total number of bytes loaded
333 // rather than the number of instructions. This is done to help reduce
334 // register pressure. The method used is somewhat inexact, though,
335 // because it assumes that all loads in the cluster will load the
336 // same number of bytes as FirstLdSt.
338 // The unit of this value is bytes.
339 // FIXME: This needs finer tuning.
340 unsigned LoadClusterThreshold = 16;
342 const MachineRegisterInfo &MRI =
343 FirstLdSt.getParent()->getParent()->getRegInfo();
344 const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg());
346 return (NumLoads * DstRC->getSize()) <= LoadClusterThreshold;
349 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
350 MachineBasicBlock::iterator MI,
351 const DebugLoc &DL, unsigned DestReg,
352 unsigned SrcReg, bool KillSrc) const {
353 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg);
355 if (RC == &AMDGPU::VGPR_32RegClass) {
356 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) ||
357 AMDGPU::SReg_32RegClass.contains(SrcReg));
358 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
359 .addReg(SrcReg, getKillRegState(KillSrc));
363 if (RC == &AMDGPU::SReg_32_XM0RegClass ||
364 RC == &AMDGPU::SReg_32RegClass) {
365 if (SrcReg == AMDGPU::SCC) {
366 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg)
372 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
373 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
374 .addReg(SrcReg, getKillRegState(KillSrc));
378 if (RC == &AMDGPU::SReg_64RegClass) {
379 if (DestReg == AMDGPU::VCC) {
380 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
381 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC)
382 .addReg(SrcReg, getKillRegState(KillSrc));
384 // FIXME: Hack until VReg_1 removed.
385 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg));
386 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32))
388 .addReg(SrcReg, getKillRegState(KillSrc));
394 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
395 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
396 .addReg(SrcReg, getKillRegState(KillSrc));
400 if (DestReg == AMDGPU::SCC) {
401 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
402 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32))
403 .addReg(SrcReg, getKillRegState(KillSrc))
408 unsigned EltSize = 4;
409 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
410 if (RI.isSGPRClass(RC)) {
411 if (RC->getSize() > 4) {
412 Opcode = AMDGPU::S_MOV_B64;
415 Opcode = AMDGPU::S_MOV_B32;
420 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize);
421 bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg);
423 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) {
426 SubIdx = SubIndices[Idx];
428 SubIdx = SubIndices[SubIndices.size() - Idx - 1];
430 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
431 get(Opcode), RI.getSubReg(DestReg, SubIdx));
433 Builder.addReg(RI.getSubReg(SrcReg, SubIdx));
435 if (Idx == SubIndices.size() - 1)
436 Builder.addReg(SrcReg, getKillRegState(KillSrc) | RegState::Implicit);
439 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
441 Builder.addReg(SrcReg, RegState::Implicit);
445 int SIInstrInfo::commuteOpcode(unsigned Opcode) const {
448 // Try to map original to commuted opcode
449 NewOpc = AMDGPU::getCommuteRev(Opcode);
451 // Check if the commuted (REV) opcode exists on the target.
452 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
454 // Try to map commuted to original opcode
455 NewOpc = AMDGPU::getCommuteOrig(Opcode);
457 // Check if the original (non-REV) opcode exists on the target.
458 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1;
463 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
465 if (DstRC->getSize() == 4) {
466 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
467 } else if (DstRC->getSize() == 8 && RI.isSGPRClass(DstRC)) {
468 return AMDGPU::S_MOV_B64;
469 } else if (DstRC->getSize() == 8 && !RI.isSGPRClass(DstRC)) {
470 return AMDGPU::V_MOV_B64_PSEUDO;
475 static unsigned getSGPRSpillSaveOpcode(unsigned Size) {
478 return AMDGPU::SI_SPILL_S32_SAVE;
480 return AMDGPU::SI_SPILL_S64_SAVE;
482 return AMDGPU::SI_SPILL_S128_SAVE;
484 return AMDGPU::SI_SPILL_S256_SAVE;
486 return AMDGPU::SI_SPILL_S512_SAVE;
488 llvm_unreachable("unknown register size");
492 static unsigned getVGPRSpillSaveOpcode(unsigned Size) {
495 return AMDGPU::SI_SPILL_V32_SAVE;
497 return AMDGPU::SI_SPILL_V64_SAVE;
499 return AMDGPU::SI_SPILL_V96_SAVE;
501 return AMDGPU::SI_SPILL_V128_SAVE;
503 return AMDGPU::SI_SPILL_V256_SAVE;
505 return AMDGPU::SI_SPILL_V512_SAVE;
507 llvm_unreachable("unknown register size");
511 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
512 MachineBasicBlock::iterator MI,
513 unsigned SrcReg, bool isKill,
515 const TargetRegisterClass *RC,
516 const TargetRegisterInfo *TRI) const {
517 MachineFunction *MF = MBB.getParent();
518 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
519 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
520 DebugLoc DL = MBB.findDebugLoc(MI);
522 unsigned Size = FrameInfo.getObjectSize(FrameIndex);
523 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
524 MachinePointerInfo PtrInfo
525 = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
526 MachineMemOperand *MMO
527 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
530 if (RI.isSGPRClass(RC)) {
531 MFI->setHasSpilledSGPRs();
533 // We are only allowed to create one new instruction when spilling
534 // registers, so we need to use pseudo instruction for spilling SGPRs.
535 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(RC->getSize()));
537 // The SGPR spill/restore instructions only work on number sgprs, so we need
538 // to make sure we are using the correct register class.
539 if (TargetRegisterInfo::isVirtualRegister(SrcReg) && RC->getSize() == 4) {
540 MachineRegisterInfo &MRI = MF->getRegInfo();
541 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass);
544 MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc)
545 .addReg(SrcReg, getKillRegState(isKill)) // data
546 .addFrameIndex(FrameIndex) // addr
548 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit)
549 .addReg(MFI->getScratchWaveOffsetReg(), RegState::Implicit);
550 // Add the scratch resource registers as implicit uses because we may end up
551 // needing them, and need to ensure that the reserved registers are
552 // correctly handled.
554 if (ST.hasScalarStores()) {
555 // m0 is used for offset to scalar stores if used to spill.
556 Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine);
562 if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
563 LLVMContext &Ctx = MF->getFunction()->getContext();
564 Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to"
566 BuildMI(MBB, MI, DL, get(AMDGPU::KILL))
572 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected");
574 unsigned Opcode = getVGPRSpillSaveOpcode(RC->getSize());
575 MFI->setHasSpilledVGPRs();
576 BuildMI(MBB, MI, DL, get(Opcode))
577 .addReg(SrcReg, getKillRegState(isKill)) // data
578 .addFrameIndex(FrameIndex) // addr
579 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc
580 .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset
585 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) {
588 return AMDGPU::SI_SPILL_S32_RESTORE;
590 return AMDGPU::SI_SPILL_S64_RESTORE;
592 return AMDGPU::SI_SPILL_S128_RESTORE;
594 return AMDGPU::SI_SPILL_S256_RESTORE;
596 return AMDGPU::SI_SPILL_S512_RESTORE;
598 llvm_unreachable("unknown register size");
602 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) {
605 return AMDGPU::SI_SPILL_V32_RESTORE;
607 return AMDGPU::SI_SPILL_V64_RESTORE;
609 return AMDGPU::SI_SPILL_V96_RESTORE;
611 return AMDGPU::SI_SPILL_V128_RESTORE;
613 return AMDGPU::SI_SPILL_V256_RESTORE;
615 return AMDGPU::SI_SPILL_V512_RESTORE;
617 llvm_unreachable("unknown register size");
621 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
622 MachineBasicBlock::iterator MI,
623 unsigned DestReg, int FrameIndex,
624 const TargetRegisterClass *RC,
625 const TargetRegisterInfo *TRI) const {
626 MachineFunction *MF = MBB.getParent();
627 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
628 MachineFrameInfo &FrameInfo = MF->getFrameInfo();
629 DebugLoc DL = MBB.findDebugLoc(MI);
630 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
631 unsigned Size = FrameInfo.getObjectSize(FrameIndex);
633 MachinePointerInfo PtrInfo
634 = MachinePointerInfo::getFixedStack(*MF, FrameIndex);
636 MachineMemOperand *MMO = MF->getMachineMemOperand(
637 PtrInfo, MachineMemOperand::MOLoad, Size, Align);
639 if (RI.isSGPRClass(RC)) {
640 // FIXME: Maybe this should not include a memoperand because it will be
641 // lowered to non-memory instructions.
642 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(RC->getSize()));
643 if (TargetRegisterInfo::isVirtualRegister(DestReg) && RC->getSize() == 4) {
644 MachineRegisterInfo &MRI = MF->getRegInfo();
645 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass);
648 MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc, DestReg)
649 .addFrameIndex(FrameIndex) // addr
651 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit)
652 .addReg(MFI->getScratchWaveOffsetReg(), RegState::Implicit);
654 if (ST.hasScalarStores()) {
655 // m0 is used for offset to scalar stores if used to spill.
656 Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine);
662 if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
663 LLVMContext &Ctx = MF->getFunction()->getContext();
664 Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to"
665 " restore register");
666 BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg);
671 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected");
673 unsigned Opcode = getVGPRSpillRestoreOpcode(RC->getSize());
674 BuildMI(MBB, MI, DL, get(Opcode), DestReg)
675 .addFrameIndex(FrameIndex) // vaddr
676 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc
677 .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset
682 /// \param @Offset Offset in bytes of the FrameIndex being spilled
683 unsigned SIInstrInfo::calculateLDSSpillAddress(
684 MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg,
685 unsigned FrameOffset, unsigned Size) const {
686 MachineFunction *MF = MBB.getParent();
687 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
688 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
689 const SIRegisterInfo *TRI = ST.getRegisterInfo();
690 DebugLoc DL = MBB.findDebugLoc(MI);
691 unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize();
692 unsigned WavefrontSize = ST.getWavefrontSize();
694 unsigned TIDReg = MFI->getTIDReg();
695 if (!MFI->hasCalculatedTID()) {
696 MachineBasicBlock &Entry = MBB.getParent()->front();
697 MachineBasicBlock::iterator Insert = Entry.front();
698 DebugLoc DL = Insert->getDebugLoc();
700 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass,
702 if (TIDReg == AMDGPU::NoRegister)
705 if (!AMDGPU::isShader(MF->getFunction()->getCallingConv()) &&
706 WorkGroupSize > WavefrontSize) {
709 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_X);
711 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Y);
713 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Z);
714 unsigned InputPtrReg =
715 TRI->getPreloadedValue(*MF, SIRegisterInfo::KERNARG_SEGMENT_PTR);
716 for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) {
717 if (!Entry.isLiveIn(Reg))
718 Entry.addLiveIn(Reg);
721 RS->enterBasicBlock(Entry);
722 // FIXME: Can we scavenge an SReg_64 and access the subregs?
723 unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
724 unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0);
725 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0)
727 .addImm(SI::KernelInputOffsets::NGROUPS_Z);
728 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1)
730 .addImm(SI::KernelInputOffsets::NGROUPS_Y);
732 // NGROUPS.X * NGROUPS.Y
733 BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1)
736 // (NGROUPS.X * NGROUPS.Y) * TIDIG.X
737 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg)
740 // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)
741 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg)
745 // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z
746 BuildMI(Entry, Insert, DL, get(AMDGPU::V_ADD_I32_e32), TIDReg)
751 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64),
756 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64),
762 BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32),
766 MFI->setTIDReg(TIDReg);
769 // Add FrameIndex to LDS offset
770 unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize);
771 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), TmpReg)
778 void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB,
779 MachineBasicBlock::iterator MI,
781 DebugLoc DL = MBB.findDebugLoc(MI);
789 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP))
794 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB,
795 MachineBasicBlock::iterator MI) const {
796 insertWaitStates(MBB, MI, 1);
799 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) const {
800 switch (MI.getOpcode()) {
801 default: return 1; // FIXME: Do wait states equal cycles?
804 return MI.getOperand(0).getImm() + 1;
808 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
809 MachineBasicBlock &MBB = *MI.getParent();
810 DebugLoc DL = MBB.findDebugLoc(MI);
811 switch (MI.getOpcode()) {
812 default: return AMDGPUInstrInfo::expandPostRAPseudo(MI);
813 case AMDGPU::S_MOV_B64_term: {
814 // This is only a terminator to get the correct spill code placement during
815 // register allocation.
816 MI.setDesc(get(AMDGPU::S_MOV_B64));
819 case AMDGPU::S_XOR_B64_term: {
820 // This is only a terminator to get the correct spill code placement during
821 // register allocation.
822 MI.setDesc(get(AMDGPU::S_XOR_B64));
825 case AMDGPU::S_ANDN2_B64_term: {
826 // This is only a terminator to get the correct spill code placement during
827 // register allocation.
828 MI.setDesc(get(AMDGPU::S_ANDN2_B64));
831 case AMDGPU::V_MOV_B64_PSEUDO: {
832 unsigned Dst = MI.getOperand(0).getReg();
833 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0);
834 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1);
836 const MachineOperand &SrcOp = MI.getOperand(1);
837 // FIXME: Will this work for 64-bit floating point immediates?
838 assert(!SrcOp.isFPImm());
840 APInt Imm(64, SrcOp.getImm());
841 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
842 .addImm(Imm.getLoBits(32).getZExtValue())
843 .addReg(Dst, RegState::Implicit | RegState::Define);
844 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
845 .addImm(Imm.getHiBits(32).getZExtValue())
846 .addReg(Dst, RegState::Implicit | RegState::Define);
848 assert(SrcOp.isReg());
849 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo)
850 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0))
851 .addReg(Dst, RegState::Implicit | RegState::Define);
852 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi)
853 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1))
854 .addReg(Dst, RegState::Implicit | RegState::Define);
856 MI.eraseFromParent();
859 case AMDGPU::V_MOVRELD_B32_V1:
860 case AMDGPU::V_MOVRELD_B32_V2:
861 case AMDGPU::V_MOVRELD_B32_V4:
862 case AMDGPU::V_MOVRELD_B32_V8:
863 case AMDGPU::V_MOVRELD_B32_V16: {
864 const MCInstrDesc &MovRelDesc = get(AMDGPU::V_MOVRELD_B32_e32);
865 unsigned VecReg = MI.getOperand(0).getReg();
866 bool IsUndef = MI.getOperand(1).isUndef();
867 unsigned SubReg = AMDGPU::sub0 + MI.getOperand(3).getImm();
868 assert(VecReg == MI.getOperand(1).getReg());
870 MachineInstr *MovRel =
871 BuildMI(MBB, MI, DL, MovRelDesc)
872 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef)
873 .addOperand(MI.getOperand(2))
874 .addReg(VecReg, RegState::ImplicitDefine)
875 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0));
877 const int ImpDefIdx =
878 MovRelDesc.getNumOperands() + MovRelDesc.getNumImplicitUses();
879 const int ImpUseIdx = ImpDefIdx + 1;
880 MovRel->tieOperands(ImpDefIdx, ImpUseIdx);
882 MI.eraseFromParent();
885 case AMDGPU::SI_PC_ADD_REL_OFFSET: {
886 MachineFunction &MF = *MBB.getParent();
887 unsigned Reg = MI.getOperand(0).getReg();
888 unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0);
889 unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1);
891 // Create a bundle so these instructions won't be re-ordered by the
892 // post-RA scheduler.
893 MIBundleBuilder Bundler(MBB, MI);
894 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg));
896 // Add 32-bit offset from this instruction to the start of the
898 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo)
900 .addOperand(MI.getOperand(1)));
902 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi)
904 if (MI.getOperand(2).getTargetFlags() == SIInstrInfo::MO_NONE)
907 MIB.addOperand(MI.getOperand(2));
910 llvm::finalizeBundle(MBB, Bundler.begin());
912 MI.eraseFromParent();
919 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI,
920 MachineOperand &Src0,
922 MachineOperand &Src1,
923 unsigned Src1OpName) const {
924 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName);
928 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName);
930 "All commutable instructions have both src0 and src1 modifiers");
932 int Src0ModsVal = Src0Mods->getImm();
933 int Src1ModsVal = Src1Mods->getImm();
935 Src1Mods->setImm(Src0ModsVal);
936 Src0Mods->setImm(Src1ModsVal);
940 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI,
941 MachineOperand &RegOp,
942 MachineOperand &NonRegOp) {
943 unsigned Reg = RegOp.getReg();
944 unsigned SubReg = RegOp.getSubReg();
945 bool IsKill = RegOp.isKill();
946 bool IsDead = RegOp.isDead();
947 bool IsUndef = RegOp.isUndef();
948 bool IsDebug = RegOp.isDebug();
950 if (NonRegOp.isImm())
951 RegOp.ChangeToImmediate(NonRegOp.getImm());
952 else if (NonRegOp.isFI())
953 RegOp.ChangeToFrameIndex(NonRegOp.getIndex());
957 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug);
958 NonRegOp.setSubReg(SubReg);
963 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
965 unsigned Src1Idx) const {
966 assert(!NewMI && "this should never be used");
968 unsigned Opc = MI.getOpcode();
969 int CommutedOpcode = commuteOpcode(Opc);
970 if (CommutedOpcode == -1)
973 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) ==
974 static_cast<int>(Src0Idx) &&
975 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) ==
976 static_cast<int>(Src1Idx) &&
977 "inconsistency with findCommutedOpIndices");
979 MachineOperand &Src0 = MI.getOperand(Src0Idx);
980 MachineOperand &Src1 = MI.getOperand(Src1Idx);
982 MachineInstr *CommutedMI = nullptr;
983 if (Src0.isReg() && Src1.isReg()) {
984 if (isOperandLegal(MI, Src1Idx, &Src0)) {
985 // Be sure to copy the source modifiers to the right place.
987 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx);
990 } else if (Src0.isReg() && !Src1.isReg()) {
991 // src0 should always be able to support any operand type, so no need to
992 // check operand legality.
993 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1);
994 } else if (!Src0.isReg() && Src1.isReg()) {
995 if (isOperandLegal(MI, Src1Idx, &Src0))
996 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0);
998 // FIXME: Found two non registers to commute. This does happen.
1004 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers,
1005 Src1, AMDGPU::OpName::src1_modifiers);
1007 CommutedMI->setDesc(get(CommutedOpcode));
1013 // This needs to be implemented because the source modifiers may be inserted
1014 // between the true commutable operands, and the base
1015 // TargetInstrInfo::commuteInstruction uses it.
1016 bool SIInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx0,
1017 unsigned &SrcOpIdx1) const {
1018 if (!MI.isCommutable())
1021 unsigned Opc = MI.getOpcode();
1022 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
1026 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
1030 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx);
1033 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
1034 int64_t BrOffset) const {
1035 // BranchRelaxation should never have to check s_setpc_b64 because its dest
1036 // block is unanalyzable.
1037 assert(BranchOp != AMDGPU::S_SETPC_B64);
1039 // Convert to dwords.
1042 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is
1043 // from the next instruction.
1046 return isIntN(BranchOffsetBits, BrOffset);
1049 MachineBasicBlock *SIInstrInfo::getBranchDestBlock(
1050 const MachineInstr &MI) const {
1051 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) {
1052 // This would be a difficult analysis to perform, but can always be legal so
1053 // there's no need to analyze it.
1057 return MI.getOperand(0).getMBB();
1060 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
1061 MachineBasicBlock &DestBB,
1064 RegScavenger *RS) const {
1065 assert(RS && "RegScavenger required for long branching");
1066 assert(MBB.empty() &&
1067 "new block should be inserted for expanding unconditional branch");
1068 assert(MBB.pred_size() == 1);
1070 MachineFunction *MF = MBB.getParent();
1071 MachineRegisterInfo &MRI = MF->getRegInfo();
1073 // FIXME: Virtual register workaround for RegScavenger not working with empty
1075 unsigned PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1079 // We need to compute the offset relative to the instruction immediately after
1080 // s_getpc_b64. Insert pc arithmetic code before last terminator.
1081 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg);
1083 // TODO: Handle > 32-bit block address.
1084 if (BrOffset >= 0) {
1085 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32))
1086 .addReg(PCReg, RegState::Define, AMDGPU::sub0)
1087 .addReg(PCReg, 0, AMDGPU::sub0)
1088 .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_FORWARD);
1089 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32))
1090 .addReg(PCReg, RegState::Define, AMDGPU::sub1)
1091 .addReg(PCReg, 0, AMDGPU::sub1)
1094 // Backwards branch.
1095 BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32))
1096 .addReg(PCReg, RegState::Define, AMDGPU::sub0)
1097 .addReg(PCReg, 0, AMDGPU::sub0)
1098 .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_BACKWARD);
1099 BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32))
1100 .addReg(PCReg, RegState::Define, AMDGPU::sub1)
1101 .addReg(PCReg, 0, AMDGPU::sub1)
1105 // Insert the indirect branch after the other terminator.
1106 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64))
1109 // FIXME: If spilling is necessary, this will fail because this scavenger has
1110 // no emergency stack slots. It is non-trivial to spill in this situation,
1111 // because the restore code needs to be specially placed after the
1112 // jump. BranchRelaxation then needs to be made aware of the newly inserted
1115 // If a spill is needed for the pc register pair, we need to insert a spill
1116 // restore block right before the destination block, and insert a short branch
1117 // into the old destination block's fallthrough predecessor.
1120 // s_cbranch_scc0 skip_long_branch:
1124 // s_getpc_b64 s[8:9]
1125 // s_add_u32 s8, s8, restore_bb
1126 // s_addc_u32 s9, s9, 0
1127 // s_setpc_b64 s[8:9]
1129 // skip_long_branch:
1134 // dest_bb_fallthrough_predecessor:
1140 // fallthrough dest_bb
1145 RS->enterBasicBlockEnd(MBB);
1146 unsigned Scav = RS->scavengeRegister(&AMDGPU::SReg_64RegClass,
1147 MachineBasicBlock::iterator(GetPC), 0);
1148 MRI.replaceRegWith(PCReg, Scav);
1149 MRI.clearVirtRegs();
1150 RS->setRegUsed(Scav);
1152 return 4 + 8 + 4 + 4;
1155 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) {
1157 case SIInstrInfo::SCC_TRUE:
1158 return AMDGPU::S_CBRANCH_SCC1;
1159 case SIInstrInfo::SCC_FALSE:
1160 return AMDGPU::S_CBRANCH_SCC0;
1161 case SIInstrInfo::VCCNZ:
1162 return AMDGPU::S_CBRANCH_VCCNZ;
1163 case SIInstrInfo::VCCZ:
1164 return AMDGPU::S_CBRANCH_VCCZ;
1165 case SIInstrInfo::EXECNZ:
1166 return AMDGPU::S_CBRANCH_EXECNZ;
1167 case SIInstrInfo::EXECZ:
1168 return AMDGPU::S_CBRANCH_EXECZ;
1170 llvm_unreachable("invalid branch predicate");
1174 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) {
1176 case AMDGPU::S_CBRANCH_SCC0:
1178 case AMDGPU::S_CBRANCH_SCC1:
1180 case AMDGPU::S_CBRANCH_VCCNZ:
1182 case AMDGPU::S_CBRANCH_VCCZ:
1184 case AMDGPU::S_CBRANCH_EXECNZ:
1186 case AMDGPU::S_CBRANCH_EXECZ:
1193 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB,
1194 MachineBasicBlock::iterator I,
1195 MachineBasicBlock *&TBB,
1196 MachineBasicBlock *&FBB,
1197 SmallVectorImpl<MachineOperand> &Cond,
1198 bool AllowModify) const {
1199 if (I->getOpcode() == AMDGPU::S_BRANCH) {
1200 // Unconditional Branch
1201 TBB = I->getOperand(0).getMBB();
1205 BranchPredicate Pred = getBranchPredicate(I->getOpcode());
1206 if (Pred == INVALID_BR)
1209 MachineBasicBlock *CondBB = I->getOperand(0).getMBB();
1210 Cond.push_back(MachineOperand::CreateImm(Pred));
1211 Cond.push_back(I->getOperand(1)); // Save the branch register.
1215 if (I == MBB.end()) {
1216 // Conditional branch followed by fall-through.
1221 if (I->getOpcode() == AMDGPU::S_BRANCH) {
1223 FBB = I->getOperand(0).getMBB();
1230 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
1231 MachineBasicBlock *&FBB,
1232 SmallVectorImpl<MachineOperand> &Cond,
1233 bool AllowModify) const {
1234 MachineBasicBlock::iterator I = MBB.getFirstTerminator();
1238 if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH)
1239 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify);
1243 // TODO: Should be able to treat as fallthrough?
1247 if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify))
1250 MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB();
1252 // Specifically handle the case where the conditional branch is to the same
1253 // destination as the mask branch. e.g.
1255 // si_mask_branch BB8
1256 // s_cbranch_execz BB8
1259 // This is required to understand divergent loops which may need the branches
1261 if (TBB != MaskBrDest || Cond.empty())
1264 auto Pred = Cond[0].getImm();
1265 return (Pred != EXECZ && Pred != EXECNZ);
1268 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB,
1269 int *BytesRemoved) const {
1270 MachineBasicBlock::iterator I = MBB.getFirstTerminator();
1273 unsigned RemovedSize = 0;
1274 while (I != MBB.end()) {
1275 MachineBasicBlock::iterator Next = std::next(I);
1276 if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) {
1281 RemovedSize += getInstSizeInBytes(*I);
1282 I->eraseFromParent();
1288 *BytesRemoved = RemovedSize;
1293 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB,
1294 MachineBasicBlock *TBB,
1295 MachineBasicBlock *FBB,
1296 ArrayRef<MachineOperand> Cond,
1298 int *BytesAdded) const {
1300 if (!FBB && Cond.empty()) {
1301 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
1308 assert(TBB && Cond[0].isImm());
1311 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm()));
1315 MachineInstr *CondBr =
1316 BuildMI(&MBB, DL, get(Opcode))
1319 // Copy the flags onto the implicit condition register operand.
1320 MachineOperand &CondReg = CondBr->getOperand(1);
1321 CondReg.setIsUndef(Cond[1].isUndef());
1322 CondReg.setIsKill(Cond[1].isKill());
1331 MachineInstr *CondBr =
1332 BuildMI(&MBB, DL, get(Opcode))
1334 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH))
1337 MachineOperand &CondReg = CondBr->getOperand(1);
1338 CondReg.setIsUndef(Cond[1].isUndef());
1339 CondReg.setIsKill(Cond[1].isKill());
1347 bool SIInstrInfo::reverseBranchCondition(
1348 SmallVectorImpl<MachineOperand> &Cond) const {
1349 assert(Cond.size() == 2);
1350 Cond[0].setImm(-Cond[0].getImm());
1354 static void removeModOperands(MachineInstr &MI) {
1355 unsigned Opc = MI.getOpcode();
1356 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1357 AMDGPU::OpName::src0_modifiers);
1358 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1359 AMDGPU::OpName::src1_modifiers);
1360 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc,
1361 AMDGPU::OpName::src2_modifiers);
1363 MI.RemoveOperand(Src2ModIdx);
1364 MI.RemoveOperand(Src1ModIdx);
1365 MI.RemoveOperand(Src0ModIdx);
1368 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
1369 unsigned Reg, MachineRegisterInfo *MRI) const {
1370 if (!MRI->hasOneNonDBGUse(Reg))
1373 unsigned Opc = UseMI.getOpcode();
1374 if (Opc == AMDGPU::COPY) {
1375 bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg());
1376 switch (DefMI.getOpcode()) {
1379 case AMDGPU::S_MOV_B64:
1380 // TODO: We could fold 64-bit immediates, but this get compilicated
1381 // when there are sub-registers.
1384 case AMDGPU::V_MOV_B32_e32:
1385 case AMDGPU::S_MOV_B32:
1388 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1389 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0);
1391 // FIXME: We could handle FrameIndex values here.
1392 if (!ImmOp->isImm()) {
1395 UseMI.setDesc(get(NewOpc));
1396 UseMI.getOperand(1).ChangeToImmediate(ImmOp->getImm());
1397 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent());
1401 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 ||
1402 Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64) {
1403 bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64;
1405 // Don't fold if we are using source modifiers. The new VOP2 instructions
1407 if (hasModifiersSet(UseMI, AMDGPU::OpName::src0_modifiers) ||
1408 hasModifiersSet(UseMI, AMDGPU::OpName::src1_modifiers) ||
1409 hasModifiersSet(UseMI, AMDGPU::OpName::src2_modifiers)) {
1413 const MachineOperand &ImmOp = DefMI.getOperand(1);
1415 // If this is a free constant, there's no reason to do this.
1416 // TODO: We could fold this here instead of letting SIFoldOperands do it
1418 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0);
1420 // Any src operand can be used for the legality check.
1421 if (isInlineConstant(UseMI, *Src0, ImmOp))
1424 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1);
1425 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2);
1427 // Multiplied part is the constant: Use v_madmk_{f16, f32}.
1428 // We should only expect these to be on src0 due to canonicalizations.
1429 if (Src0->isReg() && Src0->getReg() == Reg) {
1430 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
1433 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))
1436 // We need to swap operands 0 and 1 since madmk constant is at operand 1.
1438 const int64_t Imm = DefMI.getOperand(1).getImm();
1440 // FIXME: This would be a lot easier if we could return a new instruction
1441 // instead of having to modify in place.
1443 // Remove these first since they are at the end.
1444 UseMI.RemoveOperand(
1445 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
1446 UseMI.RemoveOperand(
1447 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
1449 unsigned Src1Reg = Src1->getReg();
1450 unsigned Src1SubReg = Src1->getSubReg();
1451 Src0->setReg(Src1Reg);
1452 Src0->setSubReg(Src1SubReg);
1453 Src0->setIsKill(Src1->isKill());
1455 if (Opc == AMDGPU::V_MAC_F32_e64 ||
1456 Opc == AMDGPU::V_MAC_F16_e64)
1457 UseMI.untieRegOperand(
1458 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
1460 Src1->ChangeToImmediate(Imm);
1462 removeModOperands(UseMI);
1463 UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16));
1465 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
1467 DefMI.eraseFromParent();
1472 // Added part is the constant: Use v_madak_{f16, f32}.
1473 if (Src2->isReg() && Src2->getReg() == Reg) {
1474 // Not allowed to use constant bus for another operand.
1475 // We can however allow an inline immediate as src0.
1476 if (!Src0->isImm() &&
1477 (Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))
1480 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
1483 const int64_t Imm = DefMI.getOperand(1).getImm();
1485 // FIXME: This would be a lot easier if we could return a new instruction
1486 // instead of having to modify in place.
1488 // Remove these first since they are at the end.
1489 UseMI.RemoveOperand(
1490 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod));
1491 UseMI.RemoveOperand(
1492 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp));
1494 if (Opc == AMDGPU::V_MAC_F32_e64 ||
1495 Opc == AMDGPU::V_MAC_F16_e64)
1496 UseMI.untieRegOperand(
1497 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2));
1499 // ChangingToImmediate adds Src2 back to the instruction.
1500 Src2->ChangeToImmediate(Imm);
1502 // These come before src2.
1503 removeModOperands(UseMI);
1504 UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16));
1506 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
1508 DefMI.eraseFromParent();
1517 static bool offsetsDoNotOverlap(int WidthA, int OffsetA,
1518 int WidthB, int OffsetB) {
1519 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
1520 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
1521 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1522 return LowOffset + LowWidth <= HighOffset;
1525 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa,
1526 MachineInstr &MIb) const {
1527 unsigned BaseReg0, BaseReg1;
1528 int64_t Offset0, Offset1;
1530 if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) &&
1531 getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) {
1533 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) {
1534 // FIXME: Handle ds_read2 / ds_write2.
1537 unsigned Width0 = (*MIa.memoperands_begin())->getSize();
1538 unsigned Width1 = (*MIb.memoperands_begin())->getSize();
1539 if (BaseReg0 == BaseReg1 &&
1540 offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) {
1548 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr &MIa,
1550 AliasAnalysis *AA) const {
1551 assert((MIa.mayLoad() || MIa.mayStore()) &&
1552 "MIa must load from or modify a memory location");
1553 assert((MIb.mayLoad() || MIb.mayStore()) &&
1554 "MIb must load from or modify a memory location");
1556 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects())
1559 // XXX - Can we relax this between address spaces?
1560 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
1563 if (AA && MIa.hasOneMemOperand() && MIb.hasOneMemOperand()) {
1564 const MachineMemOperand *MMOa = *MIa.memoperands_begin();
1565 const MachineMemOperand *MMOb = *MIb.memoperands_begin();
1566 if (MMOa->getValue() && MMOb->getValue()) {
1567 MemoryLocation LocA(MMOa->getValue(), MMOa->getSize(), MMOa->getAAInfo());
1568 MemoryLocation LocB(MMOb->getValue(), MMOb->getSize(), MMOb->getAAInfo());
1569 if (!AA->alias(LocA, LocB))
1574 // TODO: Should we check the address space from the MachineMemOperand? That
1575 // would allow us to distinguish objects we know don't alias based on the
1576 // underlying address space, even if it was lowered to a different one,
1577 // e.g. private accesses lowered to use MUBUF instructions on a scratch
1581 return checkInstOffsetsDoNotOverlap(MIa, MIb);
1583 return !isFLAT(MIb);
1586 if (isMUBUF(MIa) || isMTBUF(MIa)) {
1587 if (isMUBUF(MIb) || isMTBUF(MIb))
1588 return checkInstOffsetsDoNotOverlap(MIa, MIb);
1590 return !isFLAT(MIb) && !isSMRD(MIb);
1595 return checkInstOffsetsDoNotOverlap(MIa, MIb);
1597 return !isFLAT(MIb) && !isMUBUF(MIa) && !isMTBUF(MIa);
1602 return checkInstOffsetsDoNotOverlap(MIa, MIb);
1610 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB,
1612 LiveVariables *LV) const {
1615 switch (MI.getOpcode()) {
1618 case AMDGPU::V_MAC_F16_e64:
1620 case AMDGPU::V_MAC_F32_e64:
1622 case AMDGPU::V_MAC_F16_e32:
1624 case AMDGPU::V_MAC_F32_e32: {
1625 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
1626 AMDGPU::OpName::src0);
1627 const MachineOperand *Src0 = &MI.getOperand(Src0Idx);
1628 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0))
1634 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
1635 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0);
1636 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1);
1637 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2);
1639 return BuildMI(*MBB, MI, MI.getDebugLoc(),
1640 get(IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32))
1642 .addImm(0) // Src0 mods
1644 .addImm(0) // Src1 mods
1646 .addImm(0) // Src mods
1652 // It's not generally safe to move VALU instructions across these since it will
1653 // start using the register as a base index rather than directly.
1654 // XXX - Why isn't hasSideEffects sufficient for these?
1655 static bool changesVGPRIndexingMode(const MachineInstr &MI) {
1656 switch (MI.getOpcode()) {
1657 case AMDGPU::S_SET_GPR_IDX_ON:
1658 case AMDGPU::S_SET_GPR_IDX_MODE:
1659 case AMDGPU::S_SET_GPR_IDX_OFF:
1666 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1667 const MachineBasicBlock *MBB,
1668 const MachineFunction &MF) const {
1669 // XXX - Do we want the SP check in the base implementation?
1671 // Target-independent instructions do not have an implicit-use of EXEC, even
1672 // when they operate on VGPRs. Treating EXEC modifications as scheduling
1673 // boundaries prevents incorrect movements of such instructions.
1674 return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF) ||
1675 MI.modifiesRegister(AMDGPU::EXEC, &RI) ||
1676 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 ||
1677 MI.getOpcode() == AMDGPU::S_SETREG_B32 ||
1678 changesVGPRIndexingMode(MI);
1681 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const {
1682 switch (Imm.getBitWidth()) {
1684 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(),
1685 ST.hasInv2PiInlineImm());
1687 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(),
1688 ST.hasInv2PiInlineImm());
1690 return AMDGPU::isInlinableLiteral16(Imm.getSExtValue(),
1691 ST.hasInv2PiInlineImm());
1693 llvm_unreachable("invalid bitwidth");
1697 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO,
1698 uint8_t OperandType) const {
1699 if (!MO.isImm() || OperandType < MCOI::OPERAND_FIRST_TARGET)
1702 // MachineOperand provides no way to tell the true operand size, since it only
1703 // records a 64-bit value. We need to know the size to determine if a 32-bit
1704 // floating point immediate bit pattern is legal for an integer immediate. It
1705 // would be for any 32-bit integer operand, but would not be for a 64-bit one.
1707 int64_t Imm = MO.getImm();
1708 switch (operandBitWidth(OperandType)) {
1710 int32_t Trunc = static_cast<int32_t>(Imm);
1711 return Trunc == Imm &&
1712 AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm());
1715 return AMDGPU::isInlinableLiteral64(MO.getImm(),
1716 ST.hasInv2PiInlineImm());
1719 if (isInt<16>(Imm) || isUInt<16>(Imm)) {
1720 int16_t Trunc = static_cast<int16_t>(Imm);
1721 return AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm());
1727 llvm_unreachable("invalid bitwidth");
1731 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO,
1732 const MCOperandInfo &OpInfo) const {
1733 switch (MO.getType()) {
1734 case MachineOperand::MO_Register:
1736 case MachineOperand::MO_Immediate:
1737 return !isInlineConstant(MO, OpInfo);
1738 case MachineOperand::MO_FrameIndex:
1739 case MachineOperand::MO_MachineBasicBlock:
1740 case MachineOperand::MO_ExternalSymbol:
1741 case MachineOperand::MO_GlobalAddress:
1742 case MachineOperand::MO_MCSymbol:
1745 llvm_unreachable("unexpected operand type");
1749 static bool compareMachineOp(const MachineOperand &Op0,
1750 const MachineOperand &Op1) {
1751 if (Op0.getType() != Op1.getType())
1754 switch (Op0.getType()) {
1755 case MachineOperand::MO_Register:
1756 return Op0.getReg() == Op1.getReg();
1757 case MachineOperand::MO_Immediate:
1758 return Op0.getImm() == Op1.getImm();
1760 llvm_unreachable("Didn't expect to be comparing these operand types");
1764 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo,
1765 const MachineOperand &MO) const {
1766 const MCOperandInfo &OpInfo = get(MI.getOpcode()).OpInfo[OpNo];
1768 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI());
1770 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE)
1773 if (OpInfo.RegClass < 0)
1776 if (MO.isImm() && isInlineConstant(MO, OpInfo))
1777 return RI.opCanUseInlineConstant(OpInfo.OperandType);
1779 return RI.opCanUseLiteralConstant(OpInfo.OperandType);
1782 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
1783 int Op32 = AMDGPU::getVOPe32(Opcode);
1787 return pseudoToMCOpcode(Op32) != -1;
1790 bool SIInstrInfo::hasModifiers(unsigned Opcode) const {
1791 // The src0_modifier operand is present on all instructions
1792 // that have modifiers.
1794 return AMDGPU::getNamedOperandIdx(Opcode,
1795 AMDGPU::OpName::src0_modifiers) != -1;
1798 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI,
1799 unsigned OpName) const {
1800 const MachineOperand *Mods = getNamedOperand(MI, OpName);
1801 return Mods && Mods->getImm();
1804 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI,
1805 const MachineOperand &MO,
1806 const MCOperandInfo &OpInfo) const {
1807 // Literal constants use the constant bus.
1808 //if (isLiteralConstantLike(MO, OpInfo))
1811 return !isInlineConstant(MO, OpInfo);
1814 return true; // Misc other operands like FrameIndex
1819 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
1820 return RI.isSGPRClass(MRI.getRegClass(MO.getReg()));
1822 // FLAT_SCR is just an SGPR pair.
1823 if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR))
1826 // EXEC register uses the constant bus.
1827 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
1830 // SGPRs use the constant bus
1831 return (MO.getReg() == AMDGPU::VCC || MO.getReg() == AMDGPU::M0 ||
1832 (!MO.isImplicit() &&
1833 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
1834 AMDGPU::SGPR_64RegClass.contains(MO.getReg()))));
1837 static unsigned findImplicitSGPRRead(const MachineInstr &MI) {
1838 for (const MachineOperand &MO : MI.implicit_operands()) {
1839 // We only care about reads.
1843 switch (MO.getReg()) {
1846 case AMDGPU::FLAT_SCR:
1854 return AMDGPU::NoRegister;
1857 static bool shouldReadExec(const MachineInstr &MI) {
1858 if (SIInstrInfo::isVALU(MI)) {
1859 switch (MI.getOpcode()) {
1860 case AMDGPU::V_READLANE_B32:
1861 case AMDGPU::V_READLANE_B32_si:
1862 case AMDGPU::V_READLANE_B32_vi:
1863 case AMDGPU::V_WRITELANE_B32:
1864 case AMDGPU::V_WRITELANE_B32_si:
1865 case AMDGPU::V_WRITELANE_B32_vi:
1872 if (SIInstrInfo::isGenericOpcode(MI.getOpcode()) ||
1873 SIInstrInfo::isSALU(MI) ||
1874 SIInstrInfo::isSMRD(MI))
1880 static bool isSubRegOf(const SIRegisterInfo &TRI,
1881 const MachineOperand &SuperVec,
1882 const MachineOperand &SubReg) {
1883 if (TargetRegisterInfo::isPhysicalRegister(SubReg.getReg()))
1884 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg());
1886 return SubReg.getSubReg() != AMDGPU::NoSubRegister &&
1887 SubReg.getReg() == SuperVec.getReg();
1890 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI,
1891 StringRef &ErrInfo) const {
1892 uint16_t Opcode = MI.getOpcode();
1893 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
1894 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
1895 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
1896 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
1898 // Make sure the number of operands is correct.
1899 const MCInstrDesc &Desc = get(Opcode);
1900 if (!Desc.isVariadic() &&
1901 Desc.getNumOperands() != MI.getNumExplicitOperands()) {
1902 ErrInfo = "Instruction has wrong number of operands.";
1906 if (MI.isInlineAsm()) {
1907 // Verify register classes for inlineasm constraints.
1908 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands();
1910 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI);
1914 const MachineOperand &Op = MI.getOperand(I);
1918 unsigned Reg = Op.getReg();
1919 if (!TargetRegisterInfo::isVirtualRegister(Reg) && !RC->contains(Reg)) {
1920 ErrInfo = "inlineasm operand has incorrect register class.";
1928 // Make sure the register classes are correct.
1929 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
1930 if (MI.getOperand(i).isFPImm()) {
1931 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast "
1932 "all fp values to integers.";
1936 int RegClass = Desc.OpInfo[i].RegClass;
1938 switch (Desc.OpInfo[i].OperandType) {
1939 case MCOI::OPERAND_REGISTER:
1940 if (MI.getOperand(i).isImm()) {
1941 ErrInfo = "Illegal immediate value for operand.";
1945 case AMDGPU::OPERAND_REG_IMM_INT32:
1946 case AMDGPU::OPERAND_REG_IMM_FP32:
1948 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1949 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1950 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1951 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1952 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1953 case AMDGPU::OPERAND_REG_INLINE_C_FP16: {
1954 const MachineOperand &MO = MI.getOperand(i);
1955 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) {
1956 ErrInfo = "Illegal immediate value for operand.";
1961 case MCOI::OPERAND_IMMEDIATE:
1962 case AMDGPU::OPERAND_KIMM32:
1963 // Check if this operand is an immediate.
1964 // FrameIndex operands will be replaced by immediates, so they are
1966 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) {
1967 ErrInfo = "Expected immediate, but got non-immediate";
1975 if (!MI.getOperand(i).isReg())
1978 if (RegClass != -1) {
1979 unsigned Reg = MI.getOperand(i).getReg();
1980 if (Reg == AMDGPU::NoRegister ||
1981 TargetRegisterInfo::isVirtualRegister(Reg))
1984 const TargetRegisterClass *RC = RI.getRegClass(RegClass);
1985 if (!RC->contains(Reg)) {
1986 ErrInfo = "Operand has incorrect register class.";
1993 if (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI)) {
1994 // Only look at the true operands. Only a real operand can use the constant
1995 // bus, and we don't want to check pseudo-operands like the source modifier
1997 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
1999 unsigned ConstantBusCount = 0;
2001 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1)
2004 unsigned SGPRUsed = findImplicitSGPRRead(MI);
2005 if (SGPRUsed != AMDGPU::NoRegister)
2008 for (int OpIdx : OpIndices) {
2011 const MachineOperand &MO = MI.getOperand(OpIdx);
2012 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) {
2014 if (MO.getReg() != SGPRUsed)
2016 SGPRUsed = MO.getReg();
2022 if (ConstantBusCount > 1) {
2023 ErrInfo = "VOP* instruction uses the constant bus more than once";
2028 // Verify misc. restrictions on specific instructions.
2029 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 ||
2030 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) {
2031 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
2032 const MachineOperand &Src1 = MI.getOperand(Src1Idx);
2033 const MachineOperand &Src2 = MI.getOperand(Src2Idx);
2034 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) {
2035 if (!compareMachineOp(Src0, Src1) &&
2036 !compareMachineOp(Src0, Src2)) {
2037 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2";
2044 int64_t Imm = getNamedOperand(MI, AMDGPU::OpName::simm16)->getImm();
2045 if (sopkIsZext(MI)) {
2046 if (!isUInt<16>(Imm)) {
2047 ErrInfo = "invalid immediate for SOPK instruction";
2051 if (!isInt<16>(Imm)) {
2052 ErrInfo = "invalid immediate for SOPK instruction";
2058 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 ||
2059 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 ||
2060 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
2061 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) {
2062 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 ||
2063 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64;
2065 const unsigned StaticNumOps = Desc.getNumOperands() +
2066 Desc.getNumImplicitUses();
2067 const unsigned NumImplicitOps = IsDst ? 2 : 1;
2069 // Allow additional implicit operands. This allows a fixup done by the post
2070 // RA scheduler where the main implicit operand is killed and implicit-defs
2071 // are added for sub-registers that remain live after this instruction.
2072 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) {
2073 ErrInfo = "missing implicit register operands";
2077 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst);
2079 if (!Dst->isUse()) {
2080 ErrInfo = "v_movreld_b32 vdst should be a use operand";
2085 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) ||
2086 UseOpIdx != StaticNumOps + 1) {
2087 ErrInfo = "movrel implicit operands should be tied";
2092 const MachineOperand &Src0 = MI.getOperand(Src0Idx);
2093 const MachineOperand &ImpUse
2094 = MI.getOperand(StaticNumOps + NumImplicitOps - 1);
2095 if (!ImpUse.isReg() || !ImpUse.isUse() ||
2096 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) {
2097 ErrInfo = "src0 should be subreg of implicit vector use";
2102 // Make sure we aren't losing exec uses in the td files. This mostly requires
2103 // being careful when using let Uses to try to add other use registers.
2104 if (shouldReadExec(MI)) {
2105 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) {
2106 ErrInfo = "VALU instruction does not implicitly read exec mask";
2112 if (MI.mayStore()) {
2113 // The register offset form of scalar stores may only use m0 as the
2114 // soffset register.
2115 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff);
2116 if (Soff && Soff->getReg() != AMDGPU::M0) {
2117 ErrInfo = "scalar stores must use m0 as offset register";
2126 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) {
2127 switch (MI.getOpcode()) {
2128 default: return AMDGPU::INSTRUCTION_LIST_END;
2129 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
2130 case AMDGPU::COPY: return AMDGPU::COPY;
2131 case AMDGPU::PHI: return AMDGPU::PHI;
2132 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG;
2133 case AMDGPU::S_MOV_B32:
2134 return MI.getOperand(1).isReg() ?
2135 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32;
2136 case AMDGPU::S_ADD_I32:
2137 case AMDGPU::S_ADD_U32: return AMDGPU::V_ADD_I32_e32;
2138 case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
2139 case AMDGPU::S_SUB_I32:
2140 case AMDGPU::S_SUB_U32: return AMDGPU::V_SUB_I32_e32;
2141 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
2142 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32;
2143 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64;
2144 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64;
2145 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64;
2146 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64;
2147 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64;
2148 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64;
2149 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64;
2150 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
2151 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
2152 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
2153 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
2154 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
2155 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
2156 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32;
2157 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32;
2158 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32;
2159 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32;
2160 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64;
2161 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
2162 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
2163 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
2164 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32;
2165 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32;
2166 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32;
2167 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
2168 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
2169 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
2170 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32;
2171 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32;
2172 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32;
2173 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32;
2174 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32;
2175 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32;
2176 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32;
2177 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32;
2178 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64;
2179 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
2180 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
2181 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64;
2182 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ;
2183 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ;
2187 bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const {
2188 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
2191 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
2192 unsigned OpNo) const {
2193 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2194 const MCInstrDesc &Desc = get(MI.getOpcode());
2195 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
2196 Desc.OpInfo[OpNo].RegClass == -1) {
2197 unsigned Reg = MI.getOperand(OpNo).getReg();
2199 if (TargetRegisterInfo::isVirtualRegister(Reg))
2200 return MRI.getRegClass(Reg);
2201 return RI.getPhysRegClass(Reg);
2204 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
2205 return RI.getRegClass(RCID);
2208 bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
2209 switch (MI.getOpcode()) {
2211 case AMDGPU::REG_SEQUENCE:
2213 case AMDGPU::INSERT_SUBREG:
2214 return RI.hasVGPRs(getOpRegClass(MI, 0));
2216 return RI.hasVGPRs(getOpRegClass(MI, OpNo));
2220 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const {
2221 MachineBasicBlock::iterator I = MI;
2222 MachineBasicBlock *MBB = MI.getParent();
2223 MachineOperand &MO = MI.getOperand(OpIdx);
2224 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
2225 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass;
2226 const TargetRegisterClass *RC = RI.getRegClass(RCID);
2227 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
2229 Opcode = AMDGPU::COPY;
2230 else if (RI.isSGPRClass(RC))
2231 Opcode = AMDGPU::S_MOV_B32;
2233 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
2234 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC))
2235 VRC = &AMDGPU::VReg_64RegClass;
2237 VRC = &AMDGPU::VGPR_32RegClass;
2239 unsigned Reg = MRI.createVirtualRegister(VRC);
2240 DebugLoc DL = MBB->findDebugLoc(I);
2241 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).addOperand(MO);
2242 MO.ChangeToRegister(Reg, false);
2245 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI,
2246 MachineRegisterInfo &MRI,
2247 MachineOperand &SuperReg,
2248 const TargetRegisterClass *SuperRC,
2250 const TargetRegisterClass *SubRC)
2252 MachineBasicBlock *MBB = MI->getParent();
2253 DebugLoc DL = MI->getDebugLoc();
2254 unsigned SubReg = MRI.createVirtualRegister(SubRC);
2256 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) {
2257 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
2258 .addReg(SuperReg.getReg(), 0, SubIdx);
2262 // Just in case the super register is itself a sub-register, copy it to a new
2263 // value so we don't need to worry about merging its subreg index with the
2264 // SubIdx passed to this function. The register coalescer should be able to
2265 // eliminate this extra copy.
2266 unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC);
2268 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg)
2269 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg());
2271 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg)
2272 .addReg(NewSuperReg, 0, SubIdx);
2277 MachineOperand SIInstrInfo::buildExtractSubRegOrImm(
2278 MachineBasicBlock::iterator MII,
2279 MachineRegisterInfo &MRI,
2281 const TargetRegisterClass *SuperRC,
2283 const TargetRegisterClass *SubRC) const {
2285 if (SubIdx == AMDGPU::sub0)
2286 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm()));
2287 if (SubIdx == AMDGPU::sub1)
2288 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32));
2290 llvm_unreachable("Unhandled register index for immediate");
2293 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC,
2295 return MachineOperand::CreateReg(SubReg, false);
2298 // Change the order of operands from (0, 1, 2) to (0, 2, 1)
2299 void SIInstrInfo::swapOperands(MachineInstr &Inst) const {
2300 assert(Inst.getNumExplicitOperands() == 3);
2301 MachineOperand Op1 = Inst.getOperand(1);
2302 Inst.RemoveOperand(1);
2303 Inst.addOperand(Op1);
2306 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI,
2307 const MCOperandInfo &OpInfo,
2308 const MachineOperand &MO) const {
2312 unsigned Reg = MO.getReg();
2313 const TargetRegisterClass *RC =
2314 TargetRegisterInfo::isVirtualRegister(Reg) ?
2315 MRI.getRegClass(Reg) :
2316 RI.getPhysRegClass(Reg);
2318 const SIRegisterInfo *TRI =
2319 static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
2320 RC = TRI->getSubRegClass(RC, MO.getSubReg());
2322 // In order to be legal, the common sub-class must be equal to the
2323 // class of the current operand. For example:
2325 // v_mov_b32 s0 ; Operand defined as vsrc_b32
2326 // ; RI.getCommonSubClass(s0,vsrc_b32) = sgpr ; LEGAL
2328 // s_sendmsg 0, s0 ; Operand defined as m0reg
2329 // ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL
2331 return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC;
2334 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI,
2335 const MCOperandInfo &OpInfo,
2336 const MachineOperand &MO) const {
2338 return isLegalRegOperand(MRI, OpInfo, MO);
2340 // Handle non-register types that are treated like immediates.
2341 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI());
2345 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx,
2346 const MachineOperand *MO) const {
2347 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
2348 const MCInstrDesc &InstDesc = MI.getDesc();
2349 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx];
2350 const TargetRegisterClass *DefinedRC =
2351 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr;
2353 MO = &MI.getOperand(OpIdx);
2355 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) {
2357 RegSubRegPair SGPRUsed;
2359 SGPRUsed = RegSubRegPair(MO->getReg(), MO->getSubReg());
2361 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
2364 const MachineOperand &Op = MI.getOperand(i);
2366 if ((Op.getReg() != SGPRUsed.Reg || Op.getSubReg() != SGPRUsed.SubReg) &&
2367 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) {
2370 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) {
2378 return isLegalRegOperand(MRI, OpInfo, *MO);
2381 // Handle non-register types that are treated like immediates.
2382 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI());
2385 // This operand expects an immediate.
2389 return isImmOperandLegal(MI, OpIdx, *MO);
2392 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI,
2393 MachineInstr &MI) const {
2394 unsigned Opc = MI.getOpcode();
2395 const MCInstrDesc &InstrDesc = get(Opc);
2397 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
2398 MachineOperand &Src1 = MI.getOperand(Src1Idx);
2400 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32
2401 // we need to only have one constant bus use.
2403 // Note we do not need to worry about literal constants here. They are
2404 // disabled for the operand type for instructions because they will always
2405 // violate the one constant bus use rule.
2406 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister;
2407 if (HasImplicitSGPR) {
2408 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
2409 MachineOperand &Src0 = MI.getOperand(Src0Idx);
2411 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg()))
2412 legalizeOpWithMove(MI, Src0Idx);
2415 // VOP2 src0 instructions support all operand types, so we don't need to check
2416 // their legality. If src1 is already legal, we don't need to do anything.
2417 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1))
2420 // We do not use commuteInstruction here because it is too aggressive and will
2421 // commute if it is possible. We only want to commute here if it improves
2422 // legality. This can be called a fairly large number of times so don't waste
2423 // compile time pointlessly swapping and checking legality again.
2424 if (HasImplicitSGPR || !MI.isCommutable()) {
2425 legalizeOpWithMove(MI, Src1Idx);
2429 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
2430 MachineOperand &Src0 = MI.getOperand(Src0Idx);
2432 // If src0 can be used as src1, commuting will make the operands legal.
2433 // Otherwise we have to give up and insert a move.
2435 // TODO: Other immediate-like operand kinds could be commuted if there was a
2436 // MachineOperand::ChangeTo* for them.
2437 if ((!Src1.isImm() && !Src1.isReg()) ||
2438 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) {
2439 legalizeOpWithMove(MI, Src1Idx);
2443 int CommutedOpc = commuteOpcode(MI);
2444 if (CommutedOpc == -1) {
2445 legalizeOpWithMove(MI, Src1Idx);
2449 MI.setDesc(get(CommutedOpc));
2451 unsigned Src0Reg = Src0.getReg();
2452 unsigned Src0SubReg = Src0.getSubReg();
2453 bool Src0Kill = Src0.isKill();
2456 Src0.ChangeToImmediate(Src1.getImm());
2457 else if (Src1.isReg()) {
2458 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill());
2459 Src0.setSubReg(Src1.getSubReg());
2461 llvm_unreachable("Should only have register or immediate operands");
2463 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill);
2464 Src1.setSubReg(Src0SubReg);
2467 // Legalize VOP3 operands. Because all operand types are supported for any
2468 // operand, and since literal constants are not allowed and should never be
2469 // seen, we only need to worry about inserting copies if we use multiple SGPR
2471 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
2472 MachineInstr &MI) const {
2473 unsigned Opc = MI.getOpcode();
2476 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0),
2477 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1),
2478 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)
2481 // Find the one SGPR operand we are allowed to use.
2482 unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx);
2484 for (unsigned i = 0; i < 3; ++i) {
2485 int Idx = VOP3Idx[i];
2488 MachineOperand &MO = MI.getOperand(Idx);
2490 // We should never see a VOP3 instruction with an illegal immediate operand.
2494 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
2495 continue; // VGPRs are legal
2497 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
2498 SGPRReg = MO.getReg();
2499 // We can use one SGPR in each VOP3 instruction.
2503 // If we make it this far, then the operand is not legal and we must
2505 legalizeOpWithMove(MI, Idx);
2509 unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI,
2510 MachineRegisterInfo &MRI) const {
2511 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
2512 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
2513 unsigned DstReg = MRI.createVirtualRegister(SRC);
2514 unsigned SubRegs = VRC->getSize() / 4;
2516 SmallVector<unsigned, 8> SRegs;
2517 for (unsigned i = 0; i < SubRegs; ++i) {
2518 unsigned SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2519 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
2520 get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
2521 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
2522 SRegs.push_back(SGPR);
2525 MachineInstrBuilder MIB =
2526 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
2527 get(AMDGPU::REG_SEQUENCE), DstReg);
2528 for (unsigned i = 0; i < SubRegs; ++i) {
2529 MIB.addReg(SRegs[i]);
2530 MIB.addImm(RI.getSubRegFromChannel(i));
2535 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI,
2536 MachineInstr &MI) const {
2538 // If the pointer is store in VGPRs, then we need to move them to
2539 // SGPRs using v_readfirstlane. This is safe because we only select
2540 // loads with uniform pointers to SMRD instruction so we know the
2541 // pointer value is uniform.
2542 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase);
2543 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) {
2544 unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI);
2545 SBase->setReg(SGPR);
2549 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB,
2550 MachineBasicBlock::iterator I,
2551 const TargetRegisterClass *DstRC,
2553 MachineRegisterInfo &MRI,
2554 const DebugLoc &DL) const {
2556 unsigned OpReg = Op.getReg();
2557 unsigned OpSubReg = Op.getSubReg();
2559 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg(
2560 RI.getRegClassForReg(MRI, OpReg), OpSubReg);
2562 // Check if operand is already the correct register class.
2566 unsigned DstReg = MRI.createVirtualRegister(DstRC);
2567 MachineInstr *Copy = BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg)
2573 MachineInstr *Def = MRI.getVRegDef(OpReg);
2577 // Try to eliminate the copy if it is copying an immediate value.
2578 if (Def->isMoveImmediate())
2579 FoldImmediate(*Copy, *Def, OpReg, &MRI);
2582 void SIInstrInfo::legalizeOperands(MachineInstr &MI) const {
2583 MachineFunction &MF = *MI.getParent()->getParent();
2584 MachineRegisterInfo &MRI = MF.getRegInfo();
2587 if (isVOP2(MI) || isVOPC(MI)) {
2588 legalizeOperandsVOP2(MRI, MI);
2594 legalizeOperandsVOP3(MRI, MI);
2600 legalizeOperandsSMRD(MRI, MI);
2604 // Legalize REG_SEQUENCE and PHI
2605 // The register class of the operands much be the same type as the register
2606 // class of the output.
2607 if (MI.getOpcode() == AMDGPU::PHI) {
2608 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr;
2609 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
2610 if (!MI.getOperand(i).isReg() ||
2611 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
2613 const TargetRegisterClass *OpRC =
2614 MRI.getRegClass(MI.getOperand(i).getReg());
2615 if (RI.hasVGPRs(OpRC)) {
2622 // If any of the operands are VGPR registers, then they all most be
2623 // otherwise we will create illegal VGPR->SGPR copies when legalizing
2625 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) {
2628 VRC = RI.getEquivalentVGPRClass(SRC);
2635 // Update all the operands so they have the same type.
2636 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
2637 MachineOperand &Op = MI.getOperand(I);
2638 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
2641 // MI is a PHI instruction.
2642 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB();
2643 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator();
2645 // Avoid creating no-op copies with the same src and dst reg class. These
2646 // confuse some of the machine passes.
2647 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc());
2651 // REG_SEQUENCE doesn't really require operand legalization, but if one has a
2652 // VGPR dest type and SGPR sources, insert copies so all operands are
2653 // VGPRs. This seems to help operand folding / the register coalescer.
2654 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) {
2655 MachineBasicBlock *MBB = MI.getParent();
2656 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0);
2657 if (RI.hasVGPRs(DstRC)) {
2658 // Update all the operands so they are VGPR register classes. These may
2659 // not be the same register class because REG_SEQUENCE supports mixing
2660 // subregister index types e.g. sub0_sub1 + sub2 + sub3
2661 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
2662 MachineOperand &Op = MI.getOperand(I);
2663 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg()))
2666 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg());
2667 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC);
2671 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc());
2679 // Legalize INSERT_SUBREG
2680 // src0 must have the same register class as dst
2681 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) {
2682 unsigned Dst = MI.getOperand(0).getReg();
2683 unsigned Src0 = MI.getOperand(1).getReg();
2684 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
2685 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0);
2686 if (DstRC != Src0RC) {
2687 MachineBasicBlock *MBB = MI.getParent();
2688 MachineOperand &Op = MI.getOperand(1);
2689 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc());
2694 // Legalize MIMG and MUBUF/MTBUF for shaders.
2696 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via
2697 // scratch memory access. In both cases, the legalization never involves
2698 // conversion to the addr64 form.
2700 (AMDGPU::isShader(MF.getFunction()->getCallingConv()) &&
2701 (isMUBUF(MI) || isMTBUF(MI)))) {
2702 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc);
2703 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) {
2704 unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI);
2705 SRsrc->setReg(SGPR);
2708 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp);
2709 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) {
2710 unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI);
2711 SSamp->setReg(SGPR);
2716 // Legalize MUBUF* instructions by converting to addr64 form.
2717 // FIXME: If we start using the non-addr64 instructions for compute, we
2718 // may need to legalize them as above. This especially applies to the
2719 // buffer_load_format_* variants and variants with idxen (or bothen).
2721 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc);
2722 if (SRsrcIdx != -1) {
2723 // We have an MUBUF instruction
2724 MachineOperand *SRsrc = &MI.getOperand(SRsrcIdx);
2725 unsigned SRsrcRC = get(MI.getOpcode()).OpInfo[SRsrcIdx].RegClass;
2726 if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()),
2727 RI.getRegClass(SRsrcRC))) {
2728 // The operands are legal.
2729 // FIXME: We may need to legalize operands besided srsrc.
2733 MachineBasicBlock &MBB = *MI.getParent();
2735 // Extract the ptr from the resource descriptor.
2736 unsigned SRsrcPtr = buildExtractSubReg(MI, MRI, *SRsrc,
2737 &AMDGPU::VReg_128RegClass, AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass);
2739 // Create an empty resource descriptor
2740 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2741 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2742 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
2743 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass);
2744 uint64_t RsrcDataFormat = getDefaultRsrcDataFormat();
2747 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B64), Zero64)
2750 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0}
2751 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatLo)
2752 .addImm(RsrcDataFormat & 0xFFFFFFFF);
2754 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32}
2755 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatHi)
2756 .addImm(RsrcDataFormat >> 32);
2758 // NewSRsrc = {Zero64, SRsrcFormat}
2759 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewSRsrc)
2761 .addImm(AMDGPU::sub0_sub1)
2762 .addReg(SRsrcFormatLo)
2763 .addImm(AMDGPU::sub2)
2764 .addReg(SRsrcFormatHi)
2765 .addImm(AMDGPU::sub3);
2767 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
2768 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
2770 // This is already an ADDR64 instruction so we need to add the pointer
2771 // extracted from the resource descriptor to the current value of VAddr.
2772 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2773 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2775 // NewVaddrLo = SRsrcPtr:sub0 + VAddr:sub0
2776 DebugLoc DL = MI.getDebugLoc();
2777 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo)
2778 .addReg(SRsrcPtr, 0, AMDGPU::sub0)
2779 .addReg(VAddr->getReg(), 0, AMDGPU::sub0);
2781 // NewVaddrHi = SRsrcPtr:sub1 + VAddr:sub1
2782 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi)
2783 .addReg(SRsrcPtr, 0, AMDGPU::sub1)
2784 .addReg(VAddr->getReg(), 0, AMDGPU::sub1);
2786 // NewVaddr = {NewVaddrHi, NewVaddrLo}
2787 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr)
2789 .addImm(AMDGPU::sub0)
2791 .addImm(AMDGPU::sub1);
2793 // This instructions is the _OFFSET variant, so we need to convert it to
2795 assert(MBB.getParent()->getSubtarget<SISubtarget>().getGeneration()
2796 < SISubtarget::VOLCANIC_ISLANDS &&
2797 "FIXME: Need to emit flat atomics here");
2799 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata);
2800 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset);
2801 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset);
2802 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode());
2804 // Atomics rith return have have an additional tied operand and are
2805 // missing some of the special bits.
2806 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in);
2807 MachineInstr *Addr64;
2810 // Regular buffer load / store.
2811 MachineInstrBuilder MIB =
2812 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
2814 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
2815 // This will be replaced later
2816 // with the new value of vaddr.
2818 .addOperand(*SOffset)
2819 .addOperand(*Offset);
2821 // Atomics do not have this operand.
2822 if (const MachineOperand *GLC =
2823 getNamedOperand(MI, AMDGPU::OpName::glc)) {
2824 MIB.addImm(GLC->getImm());
2827 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc));
2829 if (const MachineOperand *TFE =
2830 getNamedOperand(MI, AMDGPU::OpName::tfe)) {
2831 MIB.addImm(TFE->getImm());
2834 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
2837 // Atomics with return.
2838 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode))
2840 .addOperand(*VDataIn)
2841 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
2842 // This will be replaced later
2843 // with the new value of vaddr.
2845 .addOperand(*SOffset)
2846 .addOperand(*Offset)
2847 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc))
2848 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
2851 MI.removeFromParent();
2853 // NewVaddr = {NewVaddrHi, NewVaddrLo}
2854 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE),
2856 .addReg(SRsrcPtr, 0, AMDGPU::sub0)
2857 .addImm(AMDGPU::sub0)
2858 .addReg(SRsrcPtr, 0, AMDGPU::sub1)
2859 .addImm(AMDGPU::sub1);
2861 VAddr = getNamedOperand(*Addr64, AMDGPU::OpName::vaddr);
2862 SRsrc = getNamedOperand(*Addr64, AMDGPU::OpName::srsrc);
2865 // Update the instruction to use NewVaddr
2866 VAddr->setReg(NewVAddr);
2867 // Update the instruction to use NewSRsrc
2868 SRsrc->setReg(NewSRsrc);
2872 void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
2873 SmallVector<MachineInstr *, 128> Worklist;
2874 Worklist.push_back(&TopInst);
2876 while (!Worklist.empty()) {
2877 MachineInstr &Inst = *Worklist.pop_back_val();
2878 MachineBasicBlock *MBB = Inst.getParent();
2879 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
2881 unsigned Opcode = Inst.getOpcode();
2882 unsigned NewOpcode = getVALUOp(Inst);
2884 // Handle some special cases
2888 case AMDGPU::S_AND_B64:
2889 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64);
2890 Inst.eraseFromParent();
2893 case AMDGPU::S_OR_B64:
2894 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64);
2895 Inst.eraseFromParent();
2898 case AMDGPU::S_XOR_B64:
2899 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64);
2900 Inst.eraseFromParent();
2903 case AMDGPU::S_NOT_B64:
2904 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::V_NOT_B32_e32);
2905 Inst.eraseFromParent();
2908 case AMDGPU::S_BCNT1_I32_B64:
2909 splitScalar64BitBCNT(Worklist, Inst);
2910 Inst.eraseFromParent();
2913 case AMDGPU::S_BFE_I64: {
2914 splitScalar64BitBFE(Worklist, Inst);
2915 Inst.eraseFromParent();
2919 case AMDGPU::S_LSHL_B32:
2920 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2921 NewOpcode = AMDGPU::V_LSHLREV_B32_e64;
2925 case AMDGPU::S_ASHR_I32:
2926 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2927 NewOpcode = AMDGPU::V_ASHRREV_I32_e64;
2931 case AMDGPU::S_LSHR_B32:
2932 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2933 NewOpcode = AMDGPU::V_LSHRREV_B32_e64;
2937 case AMDGPU::S_LSHL_B64:
2938 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2939 NewOpcode = AMDGPU::V_LSHLREV_B64;
2943 case AMDGPU::S_ASHR_I64:
2944 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2945 NewOpcode = AMDGPU::V_ASHRREV_I64;
2949 case AMDGPU::S_LSHR_B64:
2950 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
2951 NewOpcode = AMDGPU::V_LSHRREV_B64;
2956 case AMDGPU::S_ABS_I32:
2957 lowerScalarAbs(Worklist, Inst);
2958 Inst.eraseFromParent();
2961 case AMDGPU::S_CBRANCH_SCC0:
2962 case AMDGPU::S_CBRANCH_SCC1:
2963 // Clear unused bits of vcc
2964 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64),
2966 .addReg(AMDGPU::EXEC)
2967 .addReg(AMDGPU::VCC);
2970 case AMDGPU::S_BFE_U64:
2971 case AMDGPU::S_BFM_B64:
2972 llvm_unreachable("Moving this op to VALU not implemented");
2975 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
2976 // We cannot move this instruction to the VALU, so we should try to
2977 // legalize its operands instead.
2978 legalizeOperands(Inst);
2982 // Use the new VALU Opcode.
2983 const MCInstrDesc &NewDesc = get(NewOpcode);
2984 Inst.setDesc(NewDesc);
2986 // Remove any references to SCC. Vector instructions can't read from it, and
2987 // We're just about to add the implicit use / defs of VCC, and we don't want
2989 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) {
2990 MachineOperand &Op = Inst.getOperand(i);
2991 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) {
2992 Inst.RemoveOperand(i);
2993 addSCCDefUsersToVALUWorklist(Inst, Worklist);
2997 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) {
2998 // We are converting these to a BFE, so we need to add the missing
2999 // operands for the size and offset.
3000 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16;
3001 Inst.addOperand(MachineOperand::CreateImm(0));
3002 Inst.addOperand(MachineOperand::CreateImm(Size));
3004 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) {
3005 // The VALU version adds the second operand to the result, so insert an
3007 Inst.addOperand(MachineOperand::CreateImm(0));
3010 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent());
3012 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) {
3013 const MachineOperand &OffsetWidthOp = Inst.getOperand(2);
3014 // If we need to move this to VGPRs, we need to unpack the second operand
3015 // back into the 2 separate ones for bit offset and width.
3016 assert(OffsetWidthOp.isImm() &&
3017 "Scalar BFE is only implemented for constant width and offset");
3018 uint32_t Imm = OffsetWidthOp.getImm();
3020 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
3021 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
3022 Inst.RemoveOperand(2); // Remove old immediate.
3023 Inst.addOperand(MachineOperand::CreateImm(Offset));
3024 Inst.addOperand(MachineOperand::CreateImm(BitWidth));
3027 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef();
3028 unsigned NewDstReg = AMDGPU::NoRegister;
3030 // Update the destination register class.
3031 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst);
3035 unsigned DstReg = Inst.getOperand(0).getReg();
3036 if (Inst.isCopy() &&
3037 TargetRegisterInfo::isVirtualRegister(Inst.getOperand(1).getReg()) &&
3038 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) {
3039 // Instead of creating a copy where src and dst are the same register
3040 // class, we just replace all uses of dst with src. These kinds of
3041 // copies interfere with the heuristics MachineSink uses to decide
3042 // whether or not to split a critical edge. Since the pass assumes
3043 // that copies will end up as machine instructions and not be
3045 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist);
3046 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg());
3047 MRI.clearKillFlags(Inst.getOperand(1).getReg());
3048 Inst.getOperand(0).setReg(DstReg);
3052 NewDstReg = MRI.createVirtualRegister(NewDstRC);
3053 MRI.replaceRegWith(DstReg, NewDstReg);
3056 // Legalize the operands
3057 legalizeOperands(Inst);
3060 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist);
3064 void SIInstrInfo::lowerScalarAbs(SmallVectorImpl<MachineInstr *> &Worklist,
3065 MachineInstr &Inst) const {
3066 MachineBasicBlock &MBB = *Inst.getParent();
3067 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3068 MachineBasicBlock::iterator MII = Inst;
3069 DebugLoc DL = Inst.getDebugLoc();
3071 MachineOperand &Dest = Inst.getOperand(0);
3072 MachineOperand &Src = Inst.getOperand(1);
3073 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3074 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3076 BuildMI(MBB, MII, DL, get(AMDGPU::V_SUB_I32_e32), TmpReg)
3078 .addReg(Src.getReg());
3080 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg)
3081 .addReg(Src.getReg())
3084 MRI.replaceRegWith(Dest.getReg(), ResultReg);
3085 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3088 void SIInstrInfo::splitScalar64BitUnaryOp(
3089 SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst,
3090 unsigned Opcode) const {
3091 MachineBasicBlock &MBB = *Inst.getParent();
3092 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3094 MachineOperand &Dest = Inst.getOperand(0);
3095 MachineOperand &Src0 = Inst.getOperand(1);
3096 DebugLoc DL = Inst.getDebugLoc();
3098 MachineBasicBlock::iterator MII = Inst;
3100 const MCInstrDesc &InstDesc = get(Opcode);
3101 const TargetRegisterClass *Src0RC = Src0.isReg() ?
3102 MRI.getRegClass(Src0.getReg()) :
3103 &AMDGPU::SGPR_32RegClass;
3105 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
3107 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3108 AMDGPU::sub0, Src0SubRC);
3110 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
3111 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
3112 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
3114 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
3115 BuildMI(MBB, MII, DL, InstDesc, DestSub0)
3116 .addOperand(SrcReg0Sub0);
3118 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3119 AMDGPU::sub1, Src0SubRC);
3121 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
3122 BuildMI(MBB, MII, DL, InstDesc, DestSub1)
3123 .addOperand(SrcReg0Sub1);
3125 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC);
3126 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
3128 .addImm(AMDGPU::sub0)
3130 .addImm(AMDGPU::sub1);
3132 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
3134 // We don't need to legalizeOperands here because for a single operand, src0
3135 // will support any kind of input.
3137 // Move all users of this moved value.
3138 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
3141 void SIInstrInfo::splitScalar64BitBinaryOp(
3142 SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst,
3143 unsigned Opcode) const {
3144 MachineBasicBlock &MBB = *Inst.getParent();
3145 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3147 MachineOperand &Dest = Inst.getOperand(0);
3148 MachineOperand &Src0 = Inst.getOperand(1);
3149 MachineOperand &Src1 = Inst.getOperand(2);
3150 DebugLoc DL = Inst.getDebugLoc();
3152 MachineBasicBlock::iterator MII = Inst;
3154 const MCInstrDesc &InstDesc = get(Opcode);
3155 const TargetRegisterClass *Src0RC = Src0.isReg() ?
3156 MRI.getRegClass(Src0.getReg()) :
3157 &AMDGPU::SGPR_32RegClass;
3159 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
3160 const TargetRegisterClass *Src1RC = Src1.isReg() ?
3161 MRI.getRegClass(Src1.getReg()) :
3162 &AMDGPU::SGPR_32RegClass;
3164 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
3166 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3167 AMDGPU::sub0, Src0SubRC);
3168 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
3169 AMDGPU::sub0, Src1SubRC);
3171 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
3172 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC);
3173 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0);
3175 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC);
3176 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0)
3177 .addOperand(SrcReg0Sub0)
3178 .addOperand(SrcReg1Sub0);
3180 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
3181 AMDGPU::sub1, Src0SubRC);
3182 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
3183 AMDGPU::sub1, Src1SubRC);
3185 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC);
3186 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1)
3187 .addOperand(SrcReg0Sub1)
3188 .addOperand(SrcReg1Sub1);
3190 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC);
3191 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
3193 .addImm(AMDGPU::sub0)
3195 .addImm(AMDGPU::sub1);
3197 MRI.replaceRegWith(Dest.getReg(), FullDestReg);
3199 // Try to legalize the operands in case we need to swap the order to keep it
3201 legalizeOperands(LoHalf);
3202 legalizeOperands(HiHalf);
3204 // Move all users of this moved vlaue.
3205 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist);
3208 void SIInstrInfo::splitScalar64BitBCNT(
3209 SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst) const {
3210 MachineBasicBlock &MBB = *Inst.getParent();
3211 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3213 MachineBasicBlock::iterator MII = Inst;
3214 DebugLoc DL = Inst.getDebugLoc();
3216 MachineOperand &Dest = Inst.getOperand(0);
3217 MachineOperand &Src = Inst.getOperand(1);
3219 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64);
3220 const TargetRegisterClass *SrcRC = Src.isReg() ?
3221 MRI.getRegClass(Src.getReg()) :
3222 &AMDGPU::SGPR_32RegClass;
3224 unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3225 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3227 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0);
3229 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
3230 AMDGPU::sub0, SrcSubRC);
3231 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC,
3232 AMDGPU::sub1, SrcSubRC);
3234 BuildMI(MBB, MII, DL, InstDesc, MidReg)
3235 .addOperand(SrcRegSub0)
3238 BuildMI(MBB, MII, DL, InstDesc, ResultReg)
3239 .addOperand(SrcRegSub1)
3242 MRI.replaceRegWith(Dest.getReg(), ResultReg);
3244 // We don't need to legalize operands here. src0 for etiher instruction can be
3245 // an SGPR, and the second input is unused or determined here.
3246 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3249 void SIInstrInfo::splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist,
3250 MachineInstr &Inst) const {
3251 MachineBasicBlock &MBB = *Inst.getParent();
3252 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3253 MachineBasicBlock::iterator MII = Inst;
3254 DebugLoc DL = Inst.getDebugLoc();
3256 MachineOperand &Dest = Inst.getOperand(0);
3257 uint32_t Imm = Inst.getOperand(2).getImm();
3258 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0].
3259 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16].
3263 // Only sext_inreg cases handled.
3264 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 &&
3265 Offset == 0 && "Not implemented");
3267 if (BitWidth < 32) {
3268 unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3269 unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3270 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
3272 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo)
3273 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0)
3277 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi)
3281 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
3283 .addImm(AMDGPU::sub0)
3285 .addImm(AMDGPU::sub1);
3287 MRI.replaceRegWith(Dest.getReg(), ResultReg);
3288 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3292 MachineOperand &Src = Inst.getOperand(1);
3293 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3294 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass);
3296 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg)
3298 .addReg(Src.getReg(), 0, AMDGPU::sub0);
3300 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg)
3301 .addReg(Src.getReg(), 0, AMDGPU::sub0)
3302 .addImm(AMDGPU::sub0)
3304 .addImm(AMDGPU::sub1);
3306 MRI.replaceRegWith(Dest.getReg(), ResultReg);
3307 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
3310 void SIInstrInfo::addUsersToMoveToVALUWorklist(
3312 MachineRegisterInfo &MRI,
3313 SmallVectorImpl<MachineInstr *> &Worklist) const {
3314 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg),
3315 E = MRI.use_end(); I != E;) {
3316 MachineInstr &UseMI = *I->getParent();
3317 if (!canReadVGPR(UseMI, I.getOperandNo())) {
3318 Worklist.push_back(&UseMI);
3322 } while (I != E && I->getParent() == &UseMI);
3329 void SIInstrInfo::addSCCDefUsersToVALUWorklist(
3330 MachineInstr &SCCDefInst, SmallVectorImpl<MachineInstr *> &Worklist) const {
3331 // This assumes that all the users of SCC are in the same block
3333 for (MachineInstr &MI :
3334 llvm::make_range(MachineBasicBlock::iterator(SCCDefInst),
3335 SCCDefInst.getParent()->end())) {
3336 // Exit if we find another SCC def.
3337 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC) != -1)
3340 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC) != -1)
3341 Worklist.push_back(&MI);
3345 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass(
3346 const MachineInstr &Inst) const {
3347 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0);
3349 switch (Inst.getOpcode()) {
3350 // For target instructions, getOpRegClass just returns the virtual register
3351 // class associated with the operand, so we need to find an equivalent VGPR
3352 // register class in order to move the instruction to the VALU.
3355 case AMDGPU::REG_SEQUENCE:
3356 case AMDGPU::INSERT_SUBREG:
3357 if (RI.hasVGPRs(NewDstRC))
3360 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
3369 // Find the one SGPR operand we are allowed to use.
3370 unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI,
3371 int OpIndices[3]) const {
3372 const MCInstrDesc &Desc = MI.getDesc();
3374 // Find the one SGPR operand we are allowed to use.
3376 // First we need to consider the instruction's operand requirements before
3377 // legalizing. Some operands are required to be SGPRs, such as implicit uses
3378 // of VCC, but we are still bound by the constant bus requirement to only use
3381 // If the operand's class is an SGPR, we can never move it.
3383 unsigned SGPRReg = findImplicitSGPRRead(MI);
3384 if (SGPRReg != AMDGPU::NoRegister)
3387 unsigned UsedSGPRs[3] = { AMDGPU::NoRegister };
3388 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
3390 for (unsigned i = 0; i < 3; ++i) {
3391 int Idx = OpIndices[i];
3395 const MachineOperand &MO = MI.getOperand(Idx);
3399 // Is this operand statically required to be an SGPR based on the operand
3401 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass);
3402 bool IsRequiredSGPR = RI.isSGPRClass(OpRC);
3406 // If this could be a VGPR or an SGPR, Check the dynamic register class.
3407 unsigned Reg = MO.getReg();
3408 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg);
3409 if (RI.isSGPRClass(RegRC))
3413 // We don't have a required SGPR operand, so we have a bit more freedom in
3414 // selecting operands to move.
3416 // Try to select the most used SGPR. If an SGPR is equal to one of the
3417 // others, we choose that.
3420 // V_FMA_F32 v0, s0, s0, s0 -> No moves
3421 // V_FMA_F32 v0, s0, s1, s0 -> Move s1
3423 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should
3426 if (UsedSGPRs[0] != AMDGPU::NoRegister) {
3427 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2])
3428 SGPRReg = UsedSGPRs[0];
3431 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) {
3432 if (UsedSGPRs[1] == UsedSGPRs[2])
3433 SGPRReg = UsedSGPRs[1];
3439 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI,
3440 unsigned OperandName) const {
3441 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName);
3445 return &MI.getOperand(Idx);
3448 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const {
3449 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT;
3450 if (ST.isAmdHsaOS()) {
3451 RsrcDataFormat |= (1ULL << 56);
3453 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
3455 RsrcDataFormat |= (2ULL << 59);
3458 return RsrcDataFormat;
3461 uint64_t SIInstrInfo::getScratchRsrcWords23() const {
3462 uint64_t Rsrc23 = getDefaultRsrcDataFormat() |
3463 AMDGPU::RSRC_TID_ENABLE |
3464 0xffffffff; // Size;
3466 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1;
3468 Rsrc23 |= (EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT) |
3470 (UINT64_C(3) << AMDGPU::RSRC_INDEX_STRIDE_SHIFT);
3472 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17].
3473 // Clear them unless we want a huge stride.
3474 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
3475 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT;
3480 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const {
3481 unsigned Opc = MI.getOpcode();
3486 bool SIInstrInfo::isHighLatencyInstruction(const MachineInstr &MI) const {
3487 unsigned Opc = MI.getOpcode();
3489 return isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc);
3492 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI,
3493 int &FrameIndex) const {
3494 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr);
3495 if (!Addr || !Addr->isFI())
3496 return AMDGPU::NoRegister;
3498 assert(!MI.memoperands_empty() &&
3499 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS);
3501 FrameIndex = Addr->getIndex();
3502 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg();
3505 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI,
3506 int &FrameIndex) const {
3507 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr);
3508 assert(Addr && Addr->isFI());
3509 FrameIndex = Addr->getIndex();
3510 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg();
3513 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
3514 int &FrameIndex) const {
3517 return AMDGPU::NoRegister;
3519 if (isMUBUF(MI) || isVGPRSpill(MI))
3520 return isStackAccess(MI, FrameIndex);
3522 if (isSGPRSpill(MI))
3523 return isSGPRStackAccess(MI, FrameIndex);
3525 return AMDGPU::NoRegister;
3528 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
3529 int &FrameIndex) const {
3531 return AMDGPU::NoRegister;
3533 if (isMUBUF(MI) || isVGPRSpill(MI))
3534 return isStackAccess(MI, FrameIndex);
3536 if (isSGPRSpill(MI))
3537 return isSGPRStackAccess(MI, FrameIndex);
3539 return AMDGPU::NoRegister;
3542 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
3543 unsigned Opc = MI.getOpcode();
3544 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc);
3545 unsigned DescSize = Desc.getSize();
3547 // If we have a definitive size, we can use it. Otherwise we need to inspect
3548 // the operands to know the size.
3550 // FIXME: Instructions that have a base 32-bit encoding report their size as
3551 // 4, even though they are really 8 bytes if they have a literal operand.
3552 if (DescSize != 0 && DescSize != 4)
3555 if (Opc == AMDGPU::WAVE_BARRIER)
3558 // 4-byte instructions may have a 32-bit literal encoded after them. Check
3559 // operands that coud ever be literals.
3560 if (isVALU(MI) || isSALU(MI)) {
3561 if (isFixedSize(MI)) {
3562 assert(DescSize == 4);
3566 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
3568 return 4; // No operands.
3570 if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx]))
3573 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
3577 if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx]))
3587 case AMDGPU::SI_MASK_BRANCH:
3588 case TargetOpcode::IMPLICIT_DEF:
3589 case TargetOpcode::KILL:
3590 case TargetOpcode::DBG_VALUE:
3591 case TargetOpcode::BUNDLE:
3592 case TargetOpcode::EH_LABEL:
3594 case TargetOpcode::INLINEASM: {
3595 const MachineFunction *MF = MI.getParent()->getParent();
3596 const char *AsmStr = MI.getOperand(0).getSymbolName();
3597 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
3600 llvm_unreachable("unable to find instruction size");
3604 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const {
3608 if (MI.memoperands_empty())
3611 for (const MachineMemOperand *MMO : MI.memoperands()) {
3612 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS)
3618 ArrayRef<std::pair<int, const char *>>
3619 SIInstrInfo::getSerializableTargetIndices() const {
3620 static const std::pair<int, const char *> TargetIndices[] = {
3621 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"},
3622 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"},
3623 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"},
3624 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"},
3625 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}};
3626 return makeArrayRef(TargetIndices);
3629 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The
3630 /// post-RA version of misched uses CreateTargetMIHazardRecognizer.
3631 ScheduleHazardRecognizer *
3632 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
3633 const ScheduleDAG *DAG) const {
3634 return new GCNHazardRecognizer(DAG->MF);
3637 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer
3639 ScheduleHazardRecognizer *
3640 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const {
3641 return new GCNHazardRecognizer(MF);