1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
8 //===----------------------------------------------------------------------===//
12 #include "AMDGPUSubtarget.h"
13 #include "SIInstrInfo.h"
14 #include "SIMachineFunctionInfo.h"
15 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
16 #include "llvm/ADT/DepthFirstIterator.h"
17 #include "llvm/ADT/SetVector.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/raw_ostream.h"
23 #include "llvm/Target/TargetMachine.h"
25 #define DEBUG_TYPE "si-fold-operands"
30 struct FoldCandidate {
33 MachineOperand *OpToFold;
38 unsigned char UseOpNo;
39 MachineOperand::MachineOperandType Kind;
42 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
43 bool Commuted_ = false,
45 UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo),
46 Kind(FoldOp->getType()),
48 if (FoldOp->isImm()) {
49 ImmToFold = FoldOp->getImm();
50 } else if (FoldOp->isFI()) {
51 FrameIndexToFold = FoldOp->getIndex();
53 assert(FoldOp->isReg() || FoldOp->isGlobal());
59 return Kind == MachineOperand::MO_FrameIndex;
63 return Kind == MachineOperand::MO_Immediate;
67 return Kind == MachineOperand::MO_Register;
70 bool isGlobal() const { return Kind == MachineOperand::MO_GlobalAddress; }
72 bool isCommuted() const {
76 bool needsShrink() const {
77 return ShrinkOpcode != -1;
80 int getShrinkOpcode() const {
85 class SIFoldOperands : public MachineFunctionPass {
88 MachineRegisterInfo *MRI;
89 const SIInstrInfo *TII;
90 const SIRegisterInfo *TRI;
91 const GCNSubtarget *ST;
92 const SIMachineFunctionInfo *MFI;
94 void foldOperand(MachineOperand &OpToFold,
97 SmallVectorImpl<FoldCandidate> &FoldList,
98 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
100 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
102 const MachineOperand *isClamp(const MachineInstr &MI) const;
103 bool tryFoldClamp(MachineInstr &MI);
105 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
106 bool tryFoldOMod(MachineInstr &MI);
109 SIFoldOperands() : MachineFunctionPass(ID) {
110 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
113 bool runOnMachineFunction(MachineFunction &MF) override;
115 StringRef getPassName() const override { return "SI Fold Operands"; }
117 void getAnalysisUsage(AnalysisUsage &AU) const override {
118 AU.setPreservesCFG();
119 MachineFunctionPass::getAnalysisUsage(AU);
123 } // End anonymous namespace.
125 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
126 "SI Fold Operands", false, false)
128 char SIFoldOperands::ID = 0;
130 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
132 // Wrapper around isInlineConstant that understands special cases when
133 // instruction types are replaced during operand folding.
134 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
135 const MachineInstr &UseMI,
137 const MachineOperand &OpToFold) {
138 if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
141 unsigned Opc = UseMI.getOpcode();
143 case AMDGPU::V_MAC_F32_e64:
144 case AMDGPU::V_MAC_F16_e64:
145 case AMDGPU::V_FMAC_F32_e64:
146 case AMDGPU::V_FMAC_F16_e64: {
147 // Special case for mac. Since this is replaced with mad when folded into
148 // src2, we need to check the legality for the final instruction.
149 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
150 if (static_cast<int>(OpNo) == Src2Idx) {
151 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 ||
152 Opc == AMDGPU::V_FMAC_F16_e64;
153 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 ||
154 Opc == AMDGPU::V_FMAC_F32_e64;
156 unsigned Opc = IsFMA ?
157 (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) :
158 (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
159 const MCInstrDesc &MadDesc = TII->get(Opc);
160 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
169 // TODO: Add heuristic that the frame index might not fit in the addressing mode
170 // immediate offset to avoid materializing in loops.
171 static bool frameIndexMayFold(const SIInstrInfo *TII,
172 const MachineInstr &UseMI,
174 const MachineOperand &OpToFold) {
175 return OpToFold.isFI() &&
176 (TII->isMUBUF(UseMI) || TII->isFLATScratch(UseMI)) &&
177 OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::vaddr);
180 FunctionPass *llvm::createSIFoldOperandsPass() {
181 return new SIFoldOperands();
184 static bool updateOperand(FoldCandidate &Fold,
185 const SIInstrInfo &TII,
186 const TargetRegisterInfo &TRI,
187 const GCNSubtarget &ST) {
188 MachineInstr *MI = Fold.UseMI;
189 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
193 if (MI->getDesc().TSFlags & SIInstrFlags::IsPacked &&
194 !(MI->getDesc().TSFlags & SIInstrFlags::IsMAI) &&
195 AMDGPU::isInlinableLiteralV216(static_cast<uint16_t>(Fold.ImmToFold),
196 ST.hasInv2PiInlineImm())) {
197 // Set op_sel/op_sel_hi on this operand or bail out if op_sel is
199 unsigned Opcode = MI->getOpcode();
200 int OpNo = MI->getOperandNo(&Old);
202 if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0))
203 ModIdx = AMDGPU::OpName::src0_modifiers;
204 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1))
205 ModIdx = AMDGPU::OpName::src1_modifiers;
206 else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2))
207 ModIdx = AMDGPU::OpName::src2_modifiers;
208 assert(ModIdx != -1);
209 ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx);
210 MachineOperand &Mod = MI->getOperand(ModIdx);
211 unsigned Val = Mod.getImm();
212 if ((Val & SISrcMods::OP_SEL_0) || !(Val & SISrcMods::OP_SEL_1))
214 // Only apply the following transformation if that operand requries
215 // a packed immediate.
216 switch (TII.get(Opcode).OpInfo[OpNo].OperandType) {
217 case AMDGPU::OPERAND_REG_IMM_V2FP16:
218 case AMDGPU::OPERAND_REG_IMM_V2INT16:
219 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
220 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
221 // If upper part is all zero we do not need op_sel_hi.
222 if (!isUInt<16>(Fold.ImmToFold)) {
223 if (!(Fold.ImmToFold & 0xffff)) {
224 Mod.setImm(Mod.getImm() | SISrcMods::OP_SEL_0);
225 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
226 Old.ChangeToImmediate((Fold.ImmToFold >> 16) & 0xffff);
229 Mod.setImm(Mod.getImm() & ~SISrcMods::OP_SEL_1);
230 Old.ChangeToImmediate(Fold.ImmToFold & 0xffff);
240 if ((Fold.isImm() || Fold.isFI() || Fold.isGlobal()) && Fold.needsShrink()) {
241 MachineBasicBlock *MBB = MI->getParent();
242 auto Liveness = MBB->computeRegisterLiveness(&TRI, AMDGPU::VCC, MI, 16);
243 if (Liveness != MachineBasicBlock::LQR_Dead) {
244 LLVM_DEBUG(dbgs() << "Not shrinking " << MI << " due to vcc liveness\n");
248 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
249 int Op32 = Fold.getShrinkOpcode();
250 MachineOperand &Dst0 = MI->getOperand(0);
251 MachineOperand &Dst1 = MI->getOperand(1);
252 assert(Dst0.isDef() && Dst1.isDef());
254 bool HaveNonDbgCarryUse = !MRI.use_nodbg_empty(Dst1.getReg());
256 const TargetRegisterClass *Dst0RC = MRI.getRegClass(Dst0.getReg());
257 Register NewReg0 = MRI.createVirtualRegister(Dst0RC);
259 MachineInstr *Inst32 = TII.buildShrunkInst(*MI, Op32);
261 if (HaveNonDbgCarryUse) {
262 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), Dst1.getReg())
263 .addReg(AMDGPU::VCC, RegState::Kill);
266 // Keep the old instruction around to avoid breaking iterators, but
267 // replace it with a dummy instruction to remove uses.
269 // FIXME: We should not invert how this pass looks at operands to avoid
270 // this. Should track set of foldable movs instead of looking for uses
271 // when looking at a use.
272 Dst0.setReg(NewReg0);
273 for (unsigned I = MI->getNumOperands() - 1; I > 0; --I)
274 MI->RemoveOperand(I);
275 MI->setDesc(TII.get(AMDGPU::IMPLICIT_DEF));
277 if (Fold.isCommuted())
278 TII.commuteInstruction(*Inst32, false);
282 assert(!Fold.needsShrink() && "not handled");
285 // FIXME: ChangeToImmediate should probably clear the subreg flags. It's
286 // reinterpreted as TargetFlags.
288 Old.ChangeToImmediate(Fold.ImmToFold);
292 if (Fold.isGlobal()) {
293 Old.ChangeToGA(Fold.OpToFold->getGlobal(), Fold.OpToFold->getOffset(),
294 Fold.OpToFold->getTargetFlags());
299 Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
303 MachineOperand *New = Fold.OpToFold;
304 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
305 Old.setIsUndef(New->isUndef());
309 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
310 const MachineInstr *MI) {
311 for (auto Candidate : FoldList) {
312 if (Candidate.UseMI == MI)
318 static void appendFoldCandidate(SmallVectorImpl<FoldCandidate> &FoldList,
319 MachineInstr *MI, unsigned OpNo,
320 MachineOperand *FoldOp, bool Commuted = false,
322 // Skip additional folding on the same operand.
323 for (FoldCandidate &Fold : FoldList)
324 if (Fold.UseMI == MI && Fold.UseOpNo == OpNo)
326 LLVM_DEBUG(dbgs() << "Append " << (Commuted ? "commuted" : "normal")
327 << " operand " << OpNo << "\n " << *MI << '\n');
328 FoldList.push_back(FoldCandidate(MI, OpNo, FoldOp, Commuted, ShrinkOp));
331 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
332 MachineInstr *MI, unsigned OpNo,
333 MachineOperand *OpToFold,
334 const SIInstrInfo *TII) {
335 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
336 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
337 unsigned Opc = MI->getOpcode();
338 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64 ||
339 Opc == AMDGPU::V_FMAC_F32_e64 || Opc == AMDGPU::V_FMAC_F16_e64) &&
340 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
341 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e64 ||
342 Opc == AMDGPU::V_FMAC_F16_e64;
343 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64 ||
344 Opc == AMDGPU::V_FMAC_F32_e64;
345 unsigned NewOpc = IsFMA ?
346 (IsF32 ? AMDGPU::V_FMA_F32 : AMDGPU::V_FMA_F16_gfx9) :
347 (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
349 // Check if changing this to a v_mad_{f16, f32} instruction will allow us
350 // to fold the operand.
351 MI->setDesc(TII->get(NewOpc));
352 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
354 MI->untieRegOperand(OpNo);
357 MI->setDesc(TII->get(Opc));
360 // Special case for s_setreg_b32
361 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
362 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
363 appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
367 // If we are already folding into another operand of MI, then
368 // we can't commute the instruction, otherwise we risk making the
369 // other fold illegal.
370 if (isUseMIInFoldList(FoldList, MI))
373 unsigned CommuteOpNo = OpNo;
375 // Operand is not legal, so try to commute the instruction to
376 // see if this makes it possible to fold.
377 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
378 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
379 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
382 if (CommuteIdx0 == OpNo)
383 CommuteOpNo = CommuteIdx1;
384 else if (CommuteIdx1 == OpNo)
385 CommuteOpNo = CommuteIdx0;
389 // One of operands might be an Imm operand, and OpNo may refer to it after
390 // the call of commuteInstruction() below. Such situations are avoided
391 // here explicitly as OpNo must be a register operand to be a candidate
392 // for memory folding.
393 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
394 !MI->getOperand(CommuteIdx1).isReg()))
398 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
401 if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) {
402 if ((Opc == AMDGPU::V_ADD_I32_e64 ||
403 Opc == AMDGPU::V_SUB_I32_e64 ||
404 Opc == AMDGPU::V_SUBREV_I32_e64) && // FIXME
405 (OpToFold->isImm() || OpToFold->isFI() || OpToFold->isGlobal())) {
406 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
408 // Verify the other operand is a VGPR, otherwise we would violate the
409 // constant bus restriction.
410 unsigned OtherIdx = CommuteOpNo == CommuteIdx0 ? CommuteIdx1 : CommuteIdx0;
411 MachineOperand &OtherOp = MI->getOperand(OtherIdx);
412 if (!OtherOp.isReg() ||
413 !TII->getRegisterInfo().isVGPR(MRI, OtherOp.getReg()))
416 assert(MI->getOperand(1).isDef());
418 // Make sure to get the 32-bit version of the commuted opcode.
419 unsigned MaybeCommutedOpc = MI->getOpcode();
420 int Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc);
422 appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true, Op32);
426 TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
430 appendFoldCandidate(FoldList, MI, CommuteOpNo, OpToFold, true);
434 // Check the case where we might introduce a second constant operand to a
435 // scalar instruction
436 if (TII->isSALU(MI->getOpcode())) {
437 const MCInstrDesc &InstDesc = MI->getDesc();
438 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo];
439 const SIRegisterInfo &SRI = TII->getRegisterInfo();
441 // Fine if the operand can be encoded as an inline constant
442 if (OpToFold->isImm()) {
443 if (!SRI.opCanUseInlineConstant(OpInfo.OperandType) ||
444 !TII->isInlineConstant(*OpToFold, OpInfo)) {
445 // Otherwise check for another constant
446 for (unsigned i = 0, e = InstDesc.getNumOperands(); i != e; ++i) {
447 auto &Op = MI->getOperand(i);
449 TII->isLiteralConstantLike(Op, OpInfo)) {
457 appendFoldCandidate(FoldList, MI, OpNo, OpToFold);
461 // If the use operand doesn't care about the value, this may be an operand only
462 // used for register indexing, in which case it is unsafe to fold.
463 static bool isUseSafeToFold(const SIInstrInfo *TII,
464 const MachineInstr &MI,
465 const MachineOperand &UseMO) {
466 return !UseMO.isUndef() && !TII->isSDWA(MI);
467 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
470 // Find a def of the UseReg, check if it is a reg_seqence and find initializers
471 // for each subreg, tracking it to foldable inline immediate if possible.
472 // Returns true on success.
473 static bool getRegSeqInit(
474 SmallVectorImpl<std::pair<MachineOperand*, unsigned>> &Defs,
475 Register UseReg, uint8_t OpTy,
476 const SIInstrInfo *TII, const MachineRegisterInfo &MRI) {
477 MachineInstr *Def = MRI.getUniqueVRegDef(UseReg);
478 if (!Def || !Def->isRegSequence())
481 for (unsigned I = 1, E = Def->getNumExplicitOperands(); I < E; I += 2) {
482 MachineOperand *Sub = &Def->getOperand(I);
483 assert (Sub->isReg());
485 for (MachineInstr *SubDef = MRI.getUniqueVRegDef(Sub->getReg());
486 SubDef && Sub->isReg() && !Sub->getSubReg() &&
487 TII->isFoldableCopy(*SubDef);
488 SubDef = MRI.getUniqueVRegDef(Sub->getReg())) {
489 MachineOperand *Op = &SubDef->getOperand(1);
491 if (TII->isInlineConstant(*Op, OpTy))
500 Defs.push_back(std::make_pair(Sub, Def->getOperand(I + 1).getImm()));
506 static bool tryToFoldACImm(const SIInstrInfo *TII,
507 const MachineOperand &OpToFold,
510 SmallVectorImpl<FoldCandidate> &FoldList) {
511 const MCInstrDesc &Desc = UseMI->getDesc();
512 const MCOperandInfo *OpInfo = Desc.OpInfo;
513 if (!OpInfo || UseOpIdx >= Desc.getNumOperands())
516 uint8_t OpTy = OpInfo[UseOpIdx].OperandType;
517 if (OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST ||
518 OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST)
521 if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) &&
522 TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) {
523 UseMI->getOperand(UseOpIdx).ChangeToImmediate(OpToFold.getImm());
527 if (!OpToFold.isReg())
530 Register UseReg = OpToFold.getReg();
531 if (!Register::isVirtualRegister(UseReg))
534 if (llvm::find_if(FoldList, [UseMI](const FoldCandidate &FC) {
535 return FC.UseMI == UseMI; }) != FoldList.end())
538 MachineRegisterInfo &MRI = UseMI->getParent()->getParent()->getRegInfo();
539 SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
540 if (!getRegSeqInit(Defs, UseReg, OpTy, TII, MRI))
544 for (unsigned I = 0, E = Defs.size(); I != E; ++I) {
545 const MachineOperand *Op = Defs[I].first;
549 auto SubImm = Op->getImm();
552 if (!TII->isInlineConstant(*Op, OpTy) ||
553 !TII->isOperandLegal(*UseMI, UseOpIdx, Op))
559 return false; // Can only fold splat constants
562 appendFoldCandidate(FoldList, UseMI, UseOpIdx, Defs[0].first);
566 void SIFoldOperands::foldOperand(
567 MachineOperand &OpToFold,
570 SmallVectorImpl<FoldCandidate> &FoldList,
571 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
572 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
574 if (!isUseSafeToFold(TII, *UseMI, UseOp))
577 // FIXME: Fold operands with subregs.
578 if (UseOp.isReg() && OpToFold.isReg()) {
579 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
583 // Special case for REG_SEQUENCE: We can't fold literals into
584 // REG_SEQUENCE instructions, so we have to fold them into the
585 // uses of REG_SEQUENCE.
586 if (UseMI->isRegSequence()) {
587 Register RegSeqDstReg = UseMI->getOperand(0).getReg();
588 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
590 MachineRegisterInfo::use_iterator Next;
591 for (MachineRegisterInfo::use_iterator
592 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
593 RSUse != RSE; RSUse = Next) {
594 Next = std::next(RSUse);
596 MachineInstr *RSUseMI = RSUse->getParent();
598 if (tryToFoldACImm(TII, UseMI->getOperand(0), RSUseMI,
599 RSUse.getOperandNo(), FoldList))
602 if (RSUse->getSubReg() != RegSeqDstSubReg)
605 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
612 if (tryToFoldACImm(TII, OpToFold, UseMI, UseOpIdx, FoldList))
615 if (frameIndexMayFold(TII, *UseMI, UseOpIdx, OpToFold)) {
616 // Sanity check that this is a stack access.
617 // FIXME: Should probably use stack pseudos before frame lowering.
619 if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() !=
620 MFI->getScratchRSrcReg())
623 // Ensure this is either relative to the current frame or the current wave.
624 MachineOperand &SOff =
625 *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset);
626 if ((!SOff.isReg() || SOff.getReg() != MFI->getStackPtrOffsetReg()) &&
627 (!SOff.isImm() || SOff.getImm() != 0))
630 // A frame index will resolve to a positive constant, so it should always be
631 // safe to fold the addressing mode, even pre-GFX9.
632 UseMI->getOperand(UseOpIdx).ChangeToFrameIndex(OpToFold.getIndex());
634 // If this is relative to the current wave, update it to be relative to the
637 SOff.ChangeToRegister(MFI->getStackPtrOffsetReg(), false);
641 bool FoldingImmLike =
642 OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
644 if (FoldingImmLike && UseMI->isCopy()) {
645 Register DestReg = UseMI->getOperand(0).getReg();
647 // Don't fold into a copy to a physical register. Doing so would interfere
648 // with the register coalescer's logic which would avoid redundant
650 if (DestReg.isPhysical())
653 const TargetRegisterClass *DestRC = MRI->getRegClass(DestReg);
655 Register SrcReg = UseMI->getOperand(1).getReg();
656 if (SrcReg.isVirtual()) { // XXX - This can be an assert?
657 const TargetRegisterClass * SrcRC = MRI->getRegClass(SrcReg);
658 if (TRI->isSGPRClass(SrcRC) && TRI->hasVectorRegisters(DestRC)) {
659 MachineRegisterInfo::use_iterator NextUse;
660 SmallVector<FoldCandidate, 4> CopyUses;
661 for (MachineRegisterInfo::use_iterator
662 Use = MRI->use_begin(DestReg), E = MRI->use_end();
663 Use != E; Use = NextUse) {
664 NextUse = std::next(Use);
665 FoldCandidate FC = FoldCandidate(Use->getParent(),
666 Use.getOperandNo(), &UseMI->getOperand(1));
667 CopyUses.push_back(FC);
669 for (auto & F : CopyUses) {
670 foldOperand(*F.OpToFold, F.UseMI, F.UseOpNo,
671 FoldList, CopiesToReplace);
676 if (DestRC == &AMDGPU::AGPR_32RegClass &&
677 TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
678 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
679 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
680 CopiesToReplace.push_back(UseMI);
684 // In order to fold immediates into copies, we need to change the
687 unsigned MovOp = TII->getMovOpcode(DestRC);
688 if (MovOp == AMDGPU::COPY)
691 UseMI->setDesc(TII->get(MovOp));
692 MachineInstr::mop_iterator ImpOpI = UseMI->implicit_operands().begin();
693 MachineInstr::mop_iterator ImpOpE = UseMI->implicit_operands().end();
694 while (ImpOpI != ImpOpE) {
695 MachineInstr::mop_iterator Tmp = ImpOpI;
697 UseMI->RemoveOperand(UseMI->getOperandNo(Tmp));
699 CopiesToReplace.push_back(UseMI);
701 if (UseMI->isCopy() && OpToFold.isReg() &&
702 UseMI->getOperand(0).getReg().isVirtual() &&
703 !UseMI->getOperand(1).getSubReg()) {
704 LLVM_DEBUG(dbgs() << "Folding " << OpToFold
705 << "\n into " << *UseMI << '\n');
706 unsigned Size = TII->getOpSize(*UseMI, 1);
707 Register UseReg = OpToFold.getReg();
708 UseMI->getOperand(1).setReg(UseReg);
709 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
710 UseMI->getOperand(1).setIsKill(false);
711 CopiesToReplace.push_back(UseMI);
712 OpToFold.setIsKill(false);
714 // That is very tricky to store a value into an AGPR. v_accvgpr_write_b32
715 // can only accept VGPR or inline immediate. Recreate a reg_sequence with
716 // its initializers right here, so we will rematerialize immediates and
717 // avoid copies via different reg classes.
718 SmallVector<std::pair<MachineOperand*, unsigned>, 32> Defs;
719 if (Size > 4 && TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
720 getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32, TII,
722 const DebugLoc &DL = UseMI->getDebugLoc();
723 MachineBasicBlock &MBB = *UseMI->getParent();
725 UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE));
726 for (unsigned I = UseMI->getNumOperands() - 1; I > 0; --I)
727 UseMI->RemoveOperand(I);
729 MachineInstrBuilder B(*MBB.getParent(), UseMI);
730 DenseMap<TargetInstrInfo::RegSubRegPair, Register> VGPRCopies;
731 SmallSetVector<TargetInstrInfo::RegSubRegPair, 32> SeenAGPRs;
732 for (unsigned I = 0; I < Size / 4; ++I) {
733 MachineOperand *Def = Defs[I].first;
734 TargetInstrInfo::RegSubRegPair CopyToVGPR;
736 TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) {
737 int64_t Imm = Def->getImm();
739 auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
740 BuildMI(MBB, UseMI, DL,
741 TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addImm(Imm);
743 } else if (Def->isReg() && TRI->isAGPR(*MRI, Def->getReg())) {
744 auto Src = getRegSubRegPair(*Def);
745 Def->setIsKill(false);
746 if (!SeenAGPRs.insert(Src)) {
747 // We cannot build a reg_sequence out of the same registers, they
748 // must be copied. Better do it here before copyPhysReg() created
749 // several reads to do the AGPR->VGPR->AGPR copy.
752 B.addReg(Src.Reg, Def->isUndef() ? RegState::Undef : 0,
756 assert(Def->isReg());
757 Def->setIsKill(false);
758 auto Src = getRegSubRegPair(*Def);
760 // Direct copy from SGPR to AGPR is not possible. To avoid creation
761 // of exploded copies SGPR->VGPR->AGPR in the copyPhysReg() later,
762 // create a copy here and track if we already have such a copy.
763 if (TRI->isSGPRReg(*MRI, Src.Reg)) {
766 auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
767 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def);
772 if (CopyToVGPR.Reg) {
774 if (VGPRCopies.count(CopyToVGPR)) {
775 Vgpr = VGPRCopies[CopyToVGPR];
777 Vgpr = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
778 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def);
779 VGPRCopies[CopyToVGPR] = Vgpr;
781 auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass);
782 BuildMI(MBB, UseMI, DL,
783 TII->get(AMDGPU::V_ACCVGPR_WRITE_B32), Tmp).addReg(Vgpr);
787 B.addImm(Defs[I].second);
789 LLVM_DEBUG(dbgs() << "Folded " << *UseMI << '\n');
795 if (TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg()) &&
796 TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()))
797 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32));
798 else if (TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
799 TRI->isAGPR(*MRI, UseMI->getOperand(1).getReg()))
800 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32));
804 unsigned UseOpc = UseMI->getOpcode();
805 if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 ||
806 (UseOpc == AMDGPU::V_READLANE_B32 &&
808 AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) {
809 // %vgpr = V_MOV_B32 imm
810 // %sgpr = V_READFIRSTLANE_B32 %vgpr
812 // %sgpr = S_MOV_B32 imm
813 if (FoldingImmLike) {
814 if (execMayBeModifiedBeforeUse(*MRI,
815 UseMI->getOperand(UseOpIdx).getReg(),
816 *OpToFold.getParent(),
820 UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32));
822 // FIXME: ChangeToImmediate should clear subreg
823 UseMI->getOperand(1).setSubReg(0);
824 if (OpToFold.isImm())
825 UseMI->getOperand(1).ChangeToImmediate(OpToFold.getImm());
827 UseMI->getOperand(1).ChangeToFrameIndex(OpToFold.getIndex());
828 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
832 if (OpToFold.isReg() && TRI->isSGPRReg(*MRI, OpToFold.getReg())) {
833 if (execMayBeModifiedBeforeUse(*MRI,
834 UseMI->getOperand(UseOpIdx).getReg(),
835 *OpToFold.getParent(),
839 // %vgpr = COPY %sgpr0
840 // %sgpr1 = V_READFIRSTLANE_B32 %vgpr
842 // %sgpr1 = COPY %sgpr0
843 UseMI->setDesc(TII->get(AMDGPU::COPY));
844 UseMI->getOperand(1).setReg(OpToFold.getReg());
845 UseMI->getOperand(1).setSubReg(OpToFold.getSubReg());
846 UseMI->getOperand(1).setIsKill(false);
847 UseMI->RemoveOperand(2); // Remove exec read (or src1 for readlane)
852 const MCInstrDesc &UseDesc = UseMI->getDesc();
854 // Don't fold into target independent nodes. Target independent opcodes
855 // don't have defined register classes.
856 if (UseDesc.isVariadic() ||
857 UseOp.isImplicit() ||
858 UseDesc.OpInfo[UseOpIdx].RegClass == -1)
862 if (!FoldingImmLike) {
863 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
865 // FIXME: We could try to change the instruction from 64-bit to 32-bit
866 // to enable more folding opportunites. The shrink operands pass
867 // already does this.
872 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
873 const TargetRegisterClass *FoldRC =
874 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
876 // Split 64-bit constants into 32-bits for folding.
877 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
878 Register UseReg = UseOp.getReg();
879 const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
881 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
884 APInt Imm(64, OpToFold.getImm());
885 if (UseOp.getSubReg() == AMDGPU::sub0) {
886 Imm = Imm.getLoBits(32);
888 assert(UseOp.getSubReg() == AMDGPU::sub1);
889 Imm = Imm.getHiBits(32);
892 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
893 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
899 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
902 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
903 uint32_t LHS, uint32_t RHS) {
905 case AMDGPU::V_AND_B32_e64:
906 case AMDGPU::V_AND_B32_e32:
907 case AMDGPU::S_AND_B32:
910 case AMDGPU::V_OR_B32_e64:
911 case AMDGPU::V_OR_B32_e32:
912 case AMDGPU::S_OR_B32:
915 case AMDGPU::V_XOR_B32_e64:
916 case AMDGPU::V_XOR_B32_e32:
917 case AMDGPU::S_XOR_B32:
920 case AMDGPU::S_XNOR_B32:
921 Result = ~(LHS ^ RHS);
923 case AMDGPU::S_NAND_B32:
924 Result = ~(LHS & RHS);
926 case AMDGPU::S_NOR_B32:
927 Result = ~(LHS | RHS);
929 case AMDGPU::S_ANDN2_B32:
932 case AMDGPU::S_ORN2_B32:
935 case AMDGPU::V_LSHL_B32_e64:
936 case AMDGPU::V_LSHL_B32_e32:
937 case AMDGPU::S_LSHL_B32:
938 // The instruction ignores the high bits for out of bounds shifts.
939 Result = LHS << (RHS & 31);
941 case AMDGPU::V_LSHLREV_B32_e64:
942 case AMDGPU::V_LSHLREV_B32_e32:
943 Result = RHS << (LHS & 31);
945 case AMDGPU::V_LSHR_B32_e64:
946 case AMDGPU::V_LSHR_B32_e32:
947 case AMDGPU::S_LSHR_B32:
948 Result = LHS >> (RHS & 31);
950 case AMDGPU::V_LSHRREV_B32_e64:
951 case AMDGPU::V_LSHRREV_B32_e32:
952 Result = RHS >> (LHS & 31);
954 case AMDGPU::V_ASHR_I32_e64:
955 case AMDGPU::V_ASHR_I32_e32:
956 case AMDGPU::S_ASHR_I32:
957 Result = static_cast<int32_t>(LHS) >> (RHS & 31);
959 case AMDGPU::V_ASHRREV_I32_e64:
960 case AMDGPU::V_ASHRREV_I32_e32:
961 Result = static_cast<int32_t>(RHS) >> (LHS & 31);
968 static unsigned getMovOpc(bool IsScalar) {
969 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
972 /// Remove any leftover implicit operands from mutating the instruction. e.g.
973 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
975 static void stripExtraCopyOperands(MachineInstr &MI) {
976 const MCInstrDesc &Desc = MI.getDesc();
977 unsigned NumOps = Desc.getNumOperands() +
978 Desc.getNumImplicitUses() +
979 Desc.getNumImplicitDefs();
981 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
985 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
987 stripExtraCopyOperands(MI);
990 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
991 MachineOperand &Op) {
993 // If this has a subregister, it obviously is a register source.
994 if (Op.getSubReg() != AMDGPU::NoSubRegister ||
995 !Register::isVirtualRegister(Op.getReg()))
998 MachineInstr *Def = MRI.getVRegDef(Op.getReg());
999 if (Def && Def->isMoveImmediate()) {
1000 MachineOperand &ImmSrc = Def->getOperand(1);
1009 // Try to simplify operations with a constant that may appear after instruction
1011 // TODO: See if a frame index with a fixed offset can fold.
1012 static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
1013 const SIInstrInfo *TII,
1015 MachineOperand *ImmOp) {
1016 unsigned Opc = MI->getOpcode();
1017 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
1018 Opc == AMDGPU::S_NOT_B32) {
1019 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
1020 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
1024 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
1028 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
1029 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
1030 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
1032 if (!Src0->isImm() && !Src1->isImm())
1035 if (MI->getOpcode() == AMDGPU::V_LSHL_OR_B32 ||
1036 MI->getOpcode() == AMDGPU::V_LSHL_ADD_U32 ||
1037 MI->getOpcode() == AMDGPU::V_AND_OR_B32) {
1038 if (Src0->isImm() && Src0->getImm() == 0) {
1039 // v_lshl_or_b32 0, X, Y -> copy Y
1040 // v_lshl_or_b32 0, X, K -> v_mov_b32 K
1041 // v_lshl_add_b32 0, X, Y -> copy Y
1042 // v_lshl_add_b32 0, X, K -> v_mov_b32 K
1043 // v_and_or_b32 0, X, Y -> copy Y
1044 // v_and_or_b32 0, X, K -> v_mov_b32 K
1045 bool UseCopy = TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->isReg();
1046 MI->RemoveOperand(Src1Idx);
1047 MI->RemoveOperand(Src0Idx);
1049 MI->setDesc(TII->get(UseCopy ? AMDGPU::COPY : AMDGPU::V_MOV_B32_e32));
1054 // and k0, k1 -> v_mov_b32 (k0 & k1)
1055 // or k0, k1 -> v_mov_b32 (k0 | k1)
1056 // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
1057 if (Src0->isImm() && Src1->isImm()) {
1059 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
1062 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1063 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
1065 // Be careful to change the right operand, src0 may belong to a different
1067 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
1068 MI->RemoveOperand(Src1Idx);
1069 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
1073 if (!MI->isCommutable())
1076 if (Src0->isImm() && !Src1->isImm()) {
1077 std::swap(Src0, Src1);
1078 std::swap(Src0Idx, Src1Idx);
1081 int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
1082 if (Opc == AMDGPU::V_OR_B32_e64 ||
1083 Opc == AMDGPU::V_OR_B32_e32 ||
1084 Opc == AMDGPU::S_OR_B32) {
1086 // y = or x, 0 => y = copy x
1087 MI->RemoveOperand(Src1Idx);
1088 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1089 } else if (Src1Val == -1) {
1090 // y = or x, -1 => y = v_mov_b32 -1
1091 MI->RemoveOperand(Src1Idx);
1092 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
1099 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
1100 MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
1101 MI->getOpcode() == AMDGPU::S_AND_B32) {
1103 // y = and x, 0 => y = v_mov_b32 0
1104 MI->RemoveOperand(Src0Idx);
1105 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
1106 } else if (Src1Val == -1) {
1107 // y = and x, -1 => y = copy x
1108 MI->RemoveOperand(Src1Idx);
1109 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1110 stripExtraCopyOperands(*MI);
1117 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
1118 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
1119 MI->getOpcode() == AMDGPU::S_XOR_B32) {
1121 // y = xor x, 0 => y = copy x
1122 MI->RemoveOperand(Src1Idx);
1123 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
1131 // Try to fold an instruction into a simpler one
1132 static bool tryFoldInst(const SIInstrInfo *TII,
1134 unsigned Opc = MI->getOpcode();
1136 if (Opc == AMDGPU::V_CNDMASK_B32_e32 ||
1137 Opc == AMDGPU::V_CNDMASK_B32_e64 ||
1138 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
1139 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
1140 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
1141 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers);
1142 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
1143 if (Src1->isIdenticalTo(*Src0) &&
1144 (Src1ModIdx == -1 || !MI->getOperand(Src1ModIdx).getImm()) &&
1145 (Src0ModIdx == -1 || !MI->getOperand(Src0ModIdx).getImm())) {
1146 LLVM_DEBUG(dbgs() << "Folded " << *MI << " into ");
1148 TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false));
1149 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
1151 MI->RemoveOperand(Src2Idx);
1152 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
1153 if (Src1ModIdx != -1)
1154 MI->RemoveOperand(Src1ModIdx);
1155 if (Src0ModIdx != -1)
1156 MI->RemoveOperand(Src0ModIdx);
1157 mutateCopyOp(*MI, NewDesc);
1158 LLVM_DEBUG(dbgs() << *MI << '\n');
1166 void SIFoldOperands::foldInstOperand(MachineInstr &MI,
1167 MachineOperand &OpToFold) const {
1168 // We need mutate the operands of new mov instructions to add implicit
1169 // uses of EXEC, but adding them invalidates the use_iterator, so defer
1171 SmallVector<MachineInstr *, 4> CopiesToReplace;
1172 SmallVector<FoldCandidate, 4> FoldList;
1173 MachineOperand &Dst = MI.getOperand(0);
1175 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1177 unsigned NumLiteralUses = 0;
1178 MachineOperand *NonInlineUse = nullptr;
1179 int NonInlineUseOpNo = -1;
1181 MachineRegisterInfo::use_iterator NextUse;
1182 for (MachineRegisterInfo::use_iterator
1183 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
1184 Use != E; Use = NextUse) {
1185 NextUse = std::next(Use);
1186 MachineInstr *UseMI = Use->getParent();
1187 unsigned OpNo = Use.getOperandNo();
1189 // Folding the immediate may reveal operations that can be constant
1190 // folded or replaced with a copy. This can happen for example after
1191 // frame indices are lowered to constants or from splitting 64-bit
1194 // We may also encounter cases where one or both operands are
1195 // immediates materialized into a register, which would ordinarily not
1196 // be folded due to multiple uses or operand constraints.
1198 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
1199 LLVM_DEBUG(dbgs() << "Constant folded " << *UseMI << '\n');
1201 // Some constant folding cases change the same immediate's use to a new
1202 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
1203 // again. The same constant folded instruction could also have a second
1205 NextUse = MRI->use_begin(Dst.getReg());
1210 // Try to fold any inline immediate uses, and then only fold other
1211 // constants if they have one use.
1213 // The legality of the inline immediate must be checked based on the use
1214 // operand, not the defining instruction, because 32-bit instructions
1215 // with 32-bit inline immediate sources may be used to materialize
1216 // constants used in 16-bit operands.
1218 // e.g. it is unsafe to fold:
1219 // s_mov_b32 s0, 1.0 // materializes 0x3f800000
1220 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
1222 // Folding immediates with more than one use will increase program size.
1223 // FIXME: This will also reduce register usage, which may be better
1224 // in some cases. A better heuristic is needed.
1225 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
1226 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
1227 } else if (frameIndexMayFold(TII, *UseMI, OpNo, OpToFold)) {
1228 foldOperand(OpToFold, UseMI, OpNo, FoldList,
1231 if (++NumLiteralUses == 1) {
1232 NonInlineUse = &*Use;
1233 NonInlineUseOpNo = OpNo;
1238 if (NumLiteralUses == 1) {
1239 MachineInstr *UseMI = NonInlineUse->getParent();
1240 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
1243 // Folding register.
1244 SmallVector <MachineRegisterInfo::use_iterator, 4> UsesToProcess;
1245 for (MachineRegisterInfo::use_iterator
1246 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
1248 UsesToProcess.push_back(Use);
1250 for (auto U : UsesToProcess) {
1251 MachineInstr *UseMI = U->getParent();
1253 foldOperand(OpToFold, UseMI, U.getOperandNo(),
1254 FoldList, CopiesToReplace);
1258 MachineFunction *MF = MI.getParent()->getParent();
1259 // Make sure we add EXEC uses to any new v_mov instructions created.
1260 for (MachineInstr *Copy : CopiesToReplace)
1261 Copy->addImplicitDefUseOperands(*MF);
1263 for (FoldCandidate &Fold : FoldList) {
1264 assert(!Fold.isReg() || Fold.OpToFold);
1265 if (Fold.isReg() && Register::isVirtualRegister(Fold.OpToFold->getReg())) {
1266 Register Reg = Fold.OpToFold->getReg();
1267 MachineInstr *DefMI = Fold.OpToFold->getParent();
1268 if (DefMI->readsRegister(AMDGPU::EXEC, TRI) &&
1269 execMayBeModifiedBeforeUse(*MRI, Reg, *DefMI, *Fold.UseMI))
1272 if (updateOperand(Fold, *TII, *TRI, *ST)) {
1273 // Clear kill flags.
1275 assert(Fold.OpToFold && Fold.OpToFold->isReg());
1276 // FIXME: Probably shouldn't bother trying to fold if not an
1277 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
1279 MRI->clearKillFlags(Fold.OpToFold->getReg());
1281 LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo "
1282 << static_cast<int>(Fold.UseOpNo) << " of "
1283 << *Fold.UseMI << '\n');
1284 tryFoldInst(TII, Fold.UseMI);
1285 } else if (Fold.isCommuted()) {
1286 // Restoring instruction's original operand order if fold has failed.
1287 TII->commuteInstruction(*Fold.UseMI, false);
1292 // Clamp patterns are canonically selected to v_max_* instructions, so only
1294 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
1295 unsigned Op = MI.getOpcode();
1297 case AMDGPU::V_MAX_F32_e64:
1298 case AMDGPU::V_MAX_F16_e64:
1299 case AMDGPU::V_MAX_F64:
1300 case AMDGPU::V_PK_MAX_F16: {
1301 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
1304 // Make sure sources are identical.
1305 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1306 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1307 if (!Src0->isReg() || !Src1->isReg() ||
1308 Src0->getReg() != Src1->getReg() ||
1309 Src0->getSubReg() != Src1->getSubReg() ||
1310 Src0->getSubReg() != AMDGPU::NoSubRegister)
1313 // Can't fold up if we have modifiers.
1314 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1318 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
1320 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
1322 // Having a 0 op_sel_hi would require swizzling the output in the source
1323 // instruction, which we can't do.
1324 unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1
1326 if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
1335 // We obviously have multiple uses in a clamp since the register is used twice
1336 // in the same instruction.
1337 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
1339 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
1348 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
1349 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
1350 const MachineOperand *ClampSrc = isClamp(MI);
1351 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
1354 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
1356 // The type of clamp must be compatible.
1357 if (TII->getClampMask(*Def) != TII->getClampMask(MI))
1360 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
1364 LLVM_DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def
1367 // Clamp is applied after omod, so it is OK if omod is set.
1368 DefClamp->setImm(1);
1369 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1370 MI.eraseFromParent();
1374 static int getOModValue(unsigned Opc, int64_t Val) {
1376 case AMDGPU::V_MUL_F32_e64: {
1377 switch (static_cast<uint32_t>(Val)) {
1378 case 0x3f000000: // 0.5
1379 return SIOutMods::DIV2;
1380 case 0x40000000: // 2.0
1381 return SIOutMods::MUL2;
1382 case 0x40800000: // 4.0
1383 return SIOutMods::MUL4;
1385 return SIOutMods::NONE;
1388 case AMDGPU::V_MUL_F16_e64: {
1389 switch (static_cast<uint16_t>(Val)) {
1391 return SIOutMods::DIV2;
1393 return SIOutMods::MUL2;
1395 return SIOutMods::MUL4;
1397 return SIOutMods::NONE;
1401 llvm_unreachable("invalid mul opcode");
1405 // FIXME: Does this really not support denormals with f16?
1406 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
1407 // handled, so will anything other than that break?
1408 std::pair<const MachineOperand *, int>
1409 SIFoldOperands::isOMod(const MachineInstr &MI) const {
1410 unsigned Op = MI.getOpcode();
1412 case AMDGPU::V_MUL_F32_e64:
1413 case AMDGPU::V_MUL_F16_e64: {
1414 // If output denormals are enabled, omod is ignored.
1415 if ((Op == AMDGPU::V_MUL_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
1416 (Op == AMDGPU::V_MUL_F16_e64 && MFI->getMode().FP64FP16OutputDenormals))
1417 return std::make_pair(nullptr, SIOutMods::NONE);
1419 const MachineOperand *RegOp = nullptr;
1420 const MachineOperand *ImmOp = nullptr;
1421 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1422 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1423 if (Src0->isImm()) {
1426 } else if (Src1->isImm()) {
1430 return std::make_pair(nullptr, SIOutMods::NONE);
1432 int OMod = getOModValue(Op, ImmOp->getImm());
1433 if (OMod == SIOutMods::NONE ||
1434 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
1435 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
1436 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
1437 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
1438 return std::make_pair(nullptr, SIOutMods::NONE);
1440 return std::make_pair(RegOp, OMod);
1442 case AMDGPU::V_ADD_F32_e64:
1443 case AMDGPU::V_ADD_F16_e64: {
1444 // If output denormals are enabled, omod is ignored.
1445 if ((Op == AMDGPU::V_ADD_F32_e64 && MFI->getMode().FP32OutputDenormals) ||
1446 (Op == AMDGPU::V_ADD_F16_e64 && MFI->getMode().FP64FP16OutputDenormals))
1447 return std::make_pair(nullptr, SIOutMods::NONE);
1449 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
1450 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
1451 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
1453 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
1454 Src0->getSubReg() == Src1->getSubReg() &&
1455 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
1456 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
1457 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
1458 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
1459 return std::make_pair(Src0, SIOutMods::MUL2);
1461 return std::make_pair(nullptr, SIOutMods::NONE);
1464 return std::make_pair(nullptr, SIOutMods::NONE);
1468 // FIXME: Does this need to check IEEE bit on function?
1469 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
1470 const MachineOperand *RegOp;
1472 std::tie(RegOp, OMod) = isOMod(MI);
1473 if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
1474 RegOp->getSubReg() != AMDGPU::NoSubRegister ||
1475 !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
1478 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
1479 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
1480 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
1483 // Clamp is applied after omod. If the source already has clamp set, don't
1485 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
1488 LLVM_DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
1490 DefOMod->setImm(OMod);
1491 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
1492 MI.eraseFromParent();
1496 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
1497 if (skipFunction(MF.getFunction()))
1500 MRI = &MF.getRegInfo();
1501 ST = &MF.getSubtarget<GCNSubtarget>();
1502 TII = ST->getInstrInfo();
1503 TRI = &TII->getRegisterInfo();
1504 MFI = MF.getInfo<SIMachineFunctionInfo>();
1506 // omod is ignored by hardware if IEEE bit is enabled. omod also does not
1507 // correctly handle signed zeros.
1509 // FIXME: Also need to check strictfp
1510 bool IsIEEEMode = MFI->getMode().IEEE;
1511 bool HasNSZ = MFI->hasNoSignedZerosFPMath();
1513 for (MachineBasicBlock *MBB : depth_first(&MF)) {
1514 MachineBasicBlock::iterator I, Next;
1516 MachineOperand *CurrentKnownM0Val = nullptr;
1517 for (I = MBB->begin(); I != MBB->end(); I = Next) {
1518 Next = std::next(I);
1519 MachineInstr &MI = *I;
1521 tryFoldInst(TII, &MI);
1523 if (!TII->isFoldableCopy(MI)) {
1524 // Saw an unknown clobber of m0, so we no longer know what it is.
1525 if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI))
1526 CurrentKnownM0Val = nullptr;
1528 // TODO: Omod might be OK if there is NSZ only on the source
1529 // instruction, and not the omod multiply.
1530 if (IsIEEEMode || (!HasNSZ && !MI.getFlag(MachineInstr::FmNsz)) ||
1537 // Specially track simple redefs of m0 to the same value in a block, so we
1538 // can erase the later ones.
1539 if (MI.getOperand(0).getReg() == AMDGPU::M0) {
1540 MachineOperand &NewM0Val = MI.getOperand(1);
1541 if (CurrentKnownM0Val && CurrentKnownM0Val->isIdenticalTo(NewM0Val)) {
1542 MI.eraseFromParent();
1546 // We aren't tracking other physical registers
1547 CurrentKnownM0Val = (NewM0Val.isReg() && NewM0Val.getReg().isPhysical()) ?
1548 nullptr : &NewM0Val;
1552 MachineOperand &OpToFold = MI.getOperand(1);
1554 OpToFold.isImm() || OpToFold.isFI() || OpToFold.isGlobal();
1556 // FIXME: We could also be folding things like TargetIndexes.
1557 if (!FoldingImm && !OpToFold.isReg())
1560 if (OpToFold.isReg() && !Register::isVirtualRegister(OpToFold.getReg()))
1563 // Prevent folding operands backwards in the function. For example,
1564 // the COPY opcode must not be replaced by 1 in this example:
1566 // %3 = COPY %vgpr0; VGPR_32:%3
1568 // %vgpr0 = V_MOV_B32_e32 1, implicit %exec
1569 MachineOperand &Dst = MI.getOperand(0);
1570 if (Dst.isReg() && !Register::isVirtualRegister(Dst.getReg()))
1573 foldInstOperand(MI, OpToFold);