1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
13 #include "AMDGPUSubtarget.h"
14 #include "SIInstrInfo.h"
15 #include "SIMachineFunctionInfo.h"
16 #include "llvm/ADT/DepthFirstIterator.h"
17 #include "llvm/CodeGen/LiveIntervals.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/raw_ostream.h"
23 #include "llvm/Target/TargetMachine.h"
25 #define DEBUG_TYPE "si-fold-operands"
30 struct FoldCandidate {
33 MachineOperand *OpToFold;
37 unsigned char UseOpNo;
38 MachineOperand::MachineOperandType Kind;
41 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp,
42 bool Commuted_ = false) :
43 UseMI(MI), OpToFold(nullptr), UseOpNo(OpNo), Kind(FoldOp->getType()),
45 if (FoldOp->isImm()) {
46 ImmToFold = FoldOp->getImm();
47 } else if (FoldOp->isFI()) {
48 FrameIndexToFold = FoldOp->getIndex();
50 assert(FoldOp->isReg());
56 return Kind == MachineOperand::MO_FrameIndex;
60 return Kind == MachineOperand::MO_Immediate;
64 return Kind == MachineOperand::MO_Register;
67 bool isCommuted() const {
72 class SIFoldOperands : public MachineFunctionPass {
75 MachineRegisterInfo *MRI;
76 const SIInstrInfo *TII;
77 const SIRegisterInfo *TRI;
78 const SISubtarget *ST;
80 void foldOperand(MachineOperand &OpToFold,
83 SmallVectorImpl<FoldCandidate> &FoldList,
84 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
86 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
88 const MachineOperand *isClamp(const MachineInstr &MI) const;
89 bool tryFoldClamp(MachineInstr &MI);
91 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
92 bool tryFoldOMod(MachineInstr &MI);
95 SIFoldOperands() : MachineFunctionPass(ID) {
96 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
99 bool runOnMachineFunction(MachineFunction &MF) override;
101 StringRef getPassName() const override { return "SI Fold Operands"; }
103 void getAnalysisUsage(AnalysisUsage &AU) const override {
104 AU.setPreservesCFG();
105 MachineFunctionPass::getAnalysisUsage(AU);
109 } // End anonymous namespace.
111 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
112 "SI Fold Operands", false, false)
114 char SIFoldOperands::ID = 0;
116 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
118 // Wrapper around isInlineConstant that understands special cases when
119 // instruction types are replaced during operand folding.
120 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
121 const MachineInstr &UseMI,
123 const MachineOperand &OpToFold) {
124 if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
127 unsigned Opc = UseMI.getOpcode();
129 case AMDGPU::V_MAC_F32_e64:
130 case AMDGPU::V_MAC_F16_e64: {
131 // Special case for mac. Since this is replaced with mad when folded into
132 // src2, we need to check the legality for the final instruction.
133 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
134 if (static_cast<int>(OpNo) == Src2Idx) {
135 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
136 const MCInstrDesc &MadDesc
137 = TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
138 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
147 FunctionPass *llvm::createSIFoldOperandsPass() {
148 return new SIFoldOperands();
151 static bool updateOperand(FoldCandidate &Fold,
152 const TargetRegisterInfo &TRI) {
153 MachineInstr *MI = Fold.UseMI;
154 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
158 Old.ChangeToImmediate(Fold.ImmToFold);
163 Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
167 MachineOperand *New = Fold.OpToFold;
168 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
169 TargetRegisterInfo::isVirtualRegister(New->getReg())) {
170 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
172 Old.setIsUndef(New->isUndef());
176 // FIXME: Handle physical registers.
181 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
182 const MachineInstr *MI) {
183 for (auto Candidate : FoldList) {
184 if (Candidate.UseMI == MI)
190 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
191 MachineInstr *MI, unsigned OpNo,
192 MachineOperand *OpToFold,
193 const SIInstrInfo *TII) {
194 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
196 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
197 unsigned Opc = MI->getOpcode();
198 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64) &&
199 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
200 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
202 // Check if changing this to a v_mad_{f16, f32} instruction will allow us
203 // to fold the operand.
204 MI->setDesc(TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16));
205 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
207 MI->untieRegOperand(OpNo);
210 MI->setDesc(TII->get(Opc));
213 // Special case for s_setreg_b32
214 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
215 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
216 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
220 // If we are already folding into another operand of MI, then
221 // we can't commute the instruction, otherwise we risk making the
222 // other fold illegal.
223 if (isUseMIInFoldList(FoldList, MI))
226 // Operand is not legal, so try to commute the instruction to
227 // see if this makes it possible to fold.
228 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
229 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
230 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
233 if (CommuteIdx0 == OpNo)
235 else if (CommuteIdx1 == OpNo)
239 // One of operands might be an Imm operand, and OpNo may refer to it after
240 // the call of commuteInstruction() below. Such situations are avoided
241 // here explicitly as OpNo must be a register operand to be a candidate
242 // for memory folding.
243 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
244 !MI->getOperand(CommuteIdx1).isReg()))
248 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
251 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
252 TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1);
256 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold, true));
260 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
264 // If the use operand doesn't care about the value, this may be an operand only
265 // used for register indexing, in which case it is unsafe to fold.
266 static bool isUseSafeToFold(const SIInstrInfo *TII,
267 const MachineInstr &MI,
268 const MachineOperand &UseMO) {
269 return !UseMO.isUndef() && !TII->isSDWA(MI);
270 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
273 void SIFoldOperands::foldOperand(
274 MachineOperand &OpToFold,
277 SmallVectorImpl<FoldCandidate> &FoldList,
278 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
279 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
281 if (!isUseSafeToFold(TII, *UseMI, UseOp))
284 // FIXME: Fold operands with subregs.
285 if (UseOp.isReg() && OpToFold.isReg()) {
286 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
289 // Don't fold subregister extracts into tied operands, only if it is a full
290 // copy since a subregister use tied to a full register def doesn't really
291 // make sense. e.g. don't fold:
294 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %1<tied0>
297 // %2<tied3> = V_MAC_{F16, F32} %3, %4, %0:sub1<tied0>
298 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
302 // Special case for REG_SEQUENCE: We can't fold literals into
303 // REG_SEQUENCE instructions, so we have to fold them into the
304 // uses of REG_SEQUENCE.
305 if (UseMI->isRegSequence()) {
306 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
307 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
309 for (MachineRegisterInfo::use_iterator
310 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
311 RSUse != RSE; ++RSUse) {
313 MachineInstr *RSUseMI = RSUse->getParent();
314 if (RSUse->getSubReg() != RegSeqDstSubReg)
317 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
325 bool FoldingImm = OpToFold.isImm();
327 // In order to fold immediates into copies, we need to change the
329 if (FoldingImm && UseMI->isCopy()) {
330 unsigned DestReg = UseMI->getOperand(0).getReg();
331 const TargetRegisterClass *DestRC
332 = TargetRegisterInfo::isVirtualRegister(DestReg) ?
333 MRI->getRegClass(DestReg) :
334 TRI->getPhysRegClass(DestReg);
336 unsigned MovOp = TII->getMovOpcode(DestRC);
337 if (MovOp == AMDGPU::COPY)
340 UseMI->setDesc(TII->get(MovOp));
341 CopiesToReplace.push_back(UseMI);
343 const MCInstrDesc &UseDesc = UseMI->getDesc();
345 // Don't fold into target independent nodes. Target independent opcodes
346 // don't have defined register classes.
347 if (UseDesc.isVariadic() ||
348 UseDesc.OpInfo[UseOpIdx].RegClass == -1)
353 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
355 // FIXME: We could try to change the instruction from 64-bit to 32-bit
356 // to enable more folding opportunites. The shrink operands pass
357 // already does this.
362 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
363 const TargetRegisterClass *FoldRC =
364 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
367 // Split 64-bit constants into 32-bits for folding.
368 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
369 unsigned UseReg = UseOp.getReg();
370 const TargetRegisterClass *UseRC
371 = TargetRegisterInfo::isVirtualRegister(UseReg) ?
372 MRI->getRegClass(UseReg) :
373 TRI->getPhysRegClass(UseReg);
375 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
378 APInt Imm(64, OpToFold.getImm());
379 if (UseOp.getSubReg() == AMDGPU::sub0) {
380 Imm = Imm.getLoBits(32);
382 assert(UseOp.getSubReg() == AMDGPU::sub1);
383 Imm = Imm.getHiBits(32);
386 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
387 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
393 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
396 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
397 uint32_t LHS, uint32_t RHS) {
399 case AMDGPU::V_AND_B32_e64:
400 case AMDGPU::V_AND_B32_e32:
401 case AMDGPU::S_AND_B32:
404 case AMDGPU::V_OR_B32_e64:
405 case AMDGPU::V_OR_B32_e32:
406 case AMDGPU::S_OR_B32:
409 case AMDGPU::V_XOR_B32_e64:
410 case AMDGPU::V_XOR_B32_e32:
411 case AMDGPU::S_XOR_B32:
414 case AMDGPU::V_LSHL_B32_e64:
415 case AMDGPU::V_LSHL_B32_e32:
416 case AMDGPU::S_LSHL_B32:
417 // The instruction ignores the high bits for out of bounds shifts.
418 Result = LHS << (RHS & 31);
420 case AMDGPU::V_LSHLREV_B32_e64:
421 case AMDGPU::V_LSHLREV_B32_e32:
422 Result = RHS << (LHS & 31);
424 case AMDGPU::V_LSHR_B32_e64:
425 case AMDGPU::V_LSHR_B32_e32:
426 case AMDGPU::S_LSHR_B32:
427 Result = LHS >> (RHS & 31);
429 case AMDGPU::V_LSHRREV_B32_e64:
430 case AMDGPU::V_LSHRREV_B32_e32:
431 Result = RHS >> (LHS & 31);
433 case AMDGPU::V_ASHR_I32_e64:
434 case AMDGPU::V_ASHR_I32_e32:
435 case AMDGPU::S_ASHR_I32:
436 Result = static_cast<int32_t>(LHS) >> (RHS & 31);
438 case AMDGPU::V_ASHRREV_I32_e64:
439 case AMDGPU::V_ASHRREV_I32_e32:
440 Result = static_cast<int32_t>(RHS) >> (LHS & 31);
447 static unsigned getMovOpc(bool IsScalar) {
448 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
451 /// Remove any leftover implicit operands from mutating the instruction. e.g.
452 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
454 static void stripExtraCopyOperands(MachineInstr &MI) {
455 const MCInstrDesc &Desc = MI.getDesc();
456 unsigned NumOps = Desc.getNumOperands() +
457 Desc.getNumImplicitUses() +
458 Desc.getNumImplicitDefs();
460 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
464 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
466 stripExtraCopyOperands(MI);
469 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
470 MachineOperand &Op) {
472 // If this has a subregister, it obviously is a register source.
473 if (Op.getSubReg() != AMDGPU::NoSubRegister)
476 MachineInstr *Def = MRI.getVRegDef(Op.getReg());
477 if (Def && Def->isMoveImmediate()) {
478 MachineOperand &ImmSrc = Def->getOperand(1);
487 // Try to simplify operations with a constant that may appear after instruction
489 // TODO: See if a frame index with a fixed offset can fold.
490 static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
491 const SIInstrInfo *TII,
493 MachineOperand *ImmOp) {
494 unsigned Opc = MI->getOpcode();
495 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
496 Opc == AMDGPU::S_NOT_B32) {
497 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
498 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
502 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
506 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
507 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
508 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
510 if (!Src0->isImm() && !Src1->isImm())
513 // and k0, k1 -> v_mov_b32 (k0 & k1)
514 // or k0, k1 -> v_mov_b32 (k0 | k1)
515 // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
516 if (Src0->isImm() && Src1->isImm()) {
518 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
521 const SIRegisterInfo &TRI = TII->getRegisterInfo();
522 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
524 // Be careful to change the right operand, src0 may belong to a different
526 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
527 MI->RemoveOperand(Src1Idx);
528 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
532 if (!MI->isCommutable())
535 if (Src0->isImm() && !Src1->isImm()) {
536 std::swap(Src0, Src1);
537 std::swap(Src0Idx, Src1Idx);
540 int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
541 if (Opc == AMDGPU::V_OR_B32_e64 ||
542 Opc == AMDGPU::V_OR_B32_e32 ||
543 Opc == AMDGPU::S_OR_B32) {
545 // y = or x, 0 => y = copy x
546 MI->RemoveOperand(Src1Idx);
547 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
548 } else if (Src1Val == -1) {
549 // y = or x, -1 => y = v_mov_b32 -1
550 MI->RemoveOperand(Src1Idx);
551 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
558 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
559 MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
560 MI->getOpcode() == AMDGPU::S_AND_B32) {
562 // y = and x, 0 => y = v_mov_b32 0
563 MI->RemoveOperand(Src0Idx);
564 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
565 } else if (Src1Val == -1) {
566 // y = and x, -1 => y = copy x
567 MI->RemoveOperand(Src1Idx);
568 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
569 stripExtraCopyOperands(*MI);
576 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
577 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
578 MI->getOpcode() == AMDGPU::S_XOR_B32) {
580 // y = xor x, 0 => y = copy x
581 MI->RemoveOperand(Src1Idx);
582 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
590 // Try to fold an instruction into a simpler one
591 static bool tryFoldInst(const SIInstrInfo *TII,
593 unsigned Opc = MI->getOpcode();
595 if (Opc == AMDGPU::V_CNDMASK_B32_e32 ||
596 Opc == AMDGPU::V_CNDMASK_B32_e64 ||
597 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
598 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
599 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
600 if (Src1->isIdenticalTo(*Src0)) {
601 DEBUG(dbgs() << "Folded " << *MI << " into ");
602 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
604 MI->RemoveOperand(Src2Idx);
605 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
606 mutateCopyOp(*MI, TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY
607 : getMovOpc(false)));
608 DEBUG(dbgs() << *MI << '\n');
616 void SIFoldOperands::foldInstOperand(MachineInstr &MI,
617 MachineOperand &OpToFold) const {
618 // We need mutate the operands of new mov instructions to add implicit
619 // uses of EXEC, but adding them invalidates the use_iterator, so defer
621 SmallVector<MachineInstr *, 4> CopiesToReplace;
622 SmallVector<FoldCandidate, 4> FoldList;
623 MachineOperand &Dst = MI.getOperand(0);
625 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
627 unsigned NumLiteralUses = 0;
628 MachineOperand *NonInlineUse = nullptr;
629 int NonInlineUseOpNo = -1;
631 MachineRegisterInfo::use_iterator NextUse;
632 for (MachineRegisterInfo::use_iterator
633 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
634 Use != E; Use = NextUse) {
635 NextUse = std::next(Use);
636 MachineInstr *UseMI = Use->getParent();
637 unsigned OpNo = Use.getOperandNo();
639 // Folding the immediate may reveal operations that can be constant
640 // folded or replaced with a copy. This can happen for example after
641 // frame indices are lowered to constants or from splitting 64-bit
644 // We may also encounter cases where one or both operands are
645 // immediates materialized into a register, which would ordinarily not
646 // be folded due to multiple uses or operand constraints.
648 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
649 DEBUG(dbgs() << "Constant folded " << *UseMI <<'\n');
651 // Some constant folding cases change the same immediate's use to a new
652 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
653 // again. The same constant folded instruction could also have a second
655 NextUse = MRI->use_begin(Dst.getReg());
660 // Try to fold any inline immediate uses, and then only fold other
661 // constants if they have one use.
663 // The legality of the inline immediate must be checked based on the use
664 // operand, not the defining instruction, because 32-bit instructions
665 // with 32-bit inline immediate sources may be used to materialize
666 // constants used in 16-bit operands.
668 // e.g. it is unsafe to fold:
669 // s_mov_b32 s0, 1.0 // materializes 0x3f800000
670 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
672 // Folding immediates with more than one use will increase program size.
673 // FIXME: This will also reduce register usage, which may be better
674 // in some cases. A better heuristic is needed.
675 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
676 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
678 if (++NumLiteralUses == 1) {
679 NonInlineUse = &*Use;
680 NonInlineUseOpNo = OpNo;
685 if (NumLiteralUses == 1) {
686 MachineInstr *UseMI = NonInlineUse->getParent();
687 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
691 for (MachineRegisterInfo::use_iterator
692 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
694 MachineInstr *UseMI = Use->getParent();
696 foldOperand(OpToFold, UseMI, Use.getOperandNo(),
697 FoldList, CopiesToReplace);
701 MachineFunction *MF = MI.getParent()->getParent();
702 // Make sure we add EXEC uses to any new v_mov instructions created.
703 for (MachineInstr *Copy : CopiesToReplace)
704 Copy->addImplicitDefUseOperands(*MF);
706 for (FoldCandidate &Fold : FoldList) {
707 if (updateOperand(Fold, *TRI)) {
710 assert(Fold.OpToFold && Fold.OpToFold->isReg());
711 // FIXME: Probably shouldn't bother trying to fold if not an
712 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
714 MRI->clearKillFlags(Fold.OpToFold->getReg());
716 DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
717 static_cast<int>(Fold.UseOpNo) << " of " << *Fold.UseMI << '\n');
718 tryFoldInst(TII, Fold.UseMI);
719 } else if (Fold.isCommuted()) {
720 // Restoring instruction's original operand order if fold has failed.
721 TII->commuteInstruction(*Fold.UseMI, false);
726 // Clamp patterns are canonically selected to v_max_* instructions, so only
728 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
729 unsigned Op = MI.getOpcode();
731 case AMDGPU::V_MAX_F32_e64:
732 case AMDGPU::V_MAX_F16_e64:
733 case AMDGPU::V_MAX_F64:
734 case AMDGPU::V_PK_MAX_F16: {
735 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
738 // Make sure sources are identical.
739 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
740 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
741 if (!Src0->isReg() || !Src1->isReg() ||
742 Src0->getReg() != Src1->getReg() ||
743 Src0->getSubReg() != Src1->getSubReg() ||
744 Src0->getSubReg() != AMDGPU::NoSubRegister)
747 // Can't fold up if we have modifiers.
748 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
752 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm();
754 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm();
756 // Having a 0 op_sel_hi would require swizzling the output in the source
757 // instruction, which we can't do.
758 unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1 : 0;
759 if (Src0Mods != UnsetMods && Src1Mods != UnsetMods)
768 // We obviously have multiple uses in a clamp since the register is used twice
769 // in the same instruction.
770 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
772 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
781 // FIXME: Clamp for v_mad_mixhi_f16 handled during isel.
782 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
783 const MachineOperand *ClampSrc = isClamp(MI);
784 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
787 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
789 // The type of clamp must be compatible.
790 if (TII->getClampMask(*Def) != TII->getClampMask(MI))
793 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
797 DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def << '\n');
799 // Clamp is applied after omod, so it is OK if omod is set.
801 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
802 MI.eraseFromParent();
806 static int getOModValue(unsigned Opc, int64_t Val) {
808 case AMDGPU::V_MUL_F32_e64: {
809 switch (static_cast<uint32_t>(Val)) {
810 case 0x3f000000: // 0.5
811 return SIOutMods::DIV2;
812 case 0x40000000: // 2.0
813 return SIOutMods::MUL2;
814 case 0x40800000: // 4.0
815 return SIOutMods::MUL4;
817 return SIOutMods::NONE;
820 case AMDGPU::V_MUL_F16_e64: {
821 switch (static_cast<uint16_t>(Val)) {
823 return SIOutMods::DIV2;
825 return SIOutMods::MUL2;
827 return SIOutMods::MUL4;
829 return SIOutMods::NONE;
833 llvm_unreachable("invalid mul opcode");
837 // FIXME: Does this really not support denormals with f16?
838 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
839 // handled, so will anything other than that break?
840 std::pair<const MachineOperand *, int>
841 SIFoldOperands::isOMod(const MachineInstr &MI) const {
842 unsigned Op = MI.getOpcode();
844 case AMDGPU::V_MUL_F32_e64:
845 case AMDGPU::V_MUL_F16_e64: {
846 // If output denormals are enabled, omod is ignored.
847 if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
848 (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
849 return std::make_pair(nullptr, SIOutMods::NONE);
851 const MachineOperand *RegOp = nullptr;
852 const MachineOperand *ImmOp = nullptr;
853 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
854 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
858 } else if (Src1->isImm()) {
862 return std::make_pair(nullptr, SIOutMods::NONE);
864 int OMod = getOModValue(Op, ImmOp->getImm());
865 if (OMod == SIOutMods::NONE ||
866 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
867 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
868 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
869 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
870 return std::make_pair(nullptr, SIOutMods::NONE);
872 return std::make_pair(RegOp, OMod);
874 case AMDGPU::V_ADD_F32_e64:
875 case AMDGPU::V_ADD_F16_e64: {
876 // If output denormals are enabled, omod is ignored.
877 if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
878 (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
879 return std::make_pair(nullptr, SIOutMods::NONE);
881 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
882 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
883 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
885 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
886 Src0->getSubReg() == Src1->getSubReg() &&
887 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
888 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
889 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
890 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
891 return std::make_pair(Src0, SIOutMods::MUL2);
893 return std::make_pair(nullptr, SIOutMods::NONE);
896 return std::make_pair(nullptr, SIOutMods::NONE);
900 // FIXME: Does this need to check IEEE bit on function?
901 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
902 const MachineOperand *RegOp;
904 std::tie(RegOp, OMod) = isOMod(MI);
905 if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
906 RegOp->getSubReg() != AMDGPU::NoSubRegister ||
907 !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
910 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
911 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
912 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
915 // Clamp is applied after omod. If the source already has clamp set, don't
917 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
920 DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
922 DefOMod->setImm(OMod);
923 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
924 MI.eraseFromParent();
928 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
929 if (skipFunction(MF.getFunction()))
932 MRI = &MF.getRegInfo();
933 ST = &MF.getSubtarget<SISubtarget>();
934 TII = ST->getInstrInfo();
935 TRI = &TII->getRegisterInfo();
937 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
939 // omod is ignored by hardware if IEEE bit is enabled. omod also does not
940 // correctly handle signed zeros.
942 // TODO: Check nsz on instructions when fast math flags are preserved to MI
944 bool IsIEEEMode = ST->enableIEEEBit(MF) || !MFI->hasNoSignedZerosFPMath();
946 for (MachineBasicBlock *MBB : depth_first(&MF)) {
947 MachineBasicBlock::iterator I, Next;
948 for (I = MBB->begin(); I != MBB->end(); I = Next) {
950 MachineInstr &MI = *I;
952 tryFoldInst(TII, &MI);
954 if (!TII->isFoldableCopy(MI)) {
955 if (IsIEEEMode || !tryFoldOMod(MI))
960 MachineOperand &OpToFold = MI.getOperand(1);
961 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
963 // FIXME: We could also be folding things like TargetIndexes.
964 if (!FoldingImm && !OpToFold.isReg())
967 if (OpToFold.isReg() &&
968 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
971 // Prevent folding operands backwards in the function. For example,
972 // the COPY opcode must not be replaced by 1 in this example:
974 // %3 = COPY %vgpr0; VGPR_32:%3
976 // %vgpr0 = V_MOV_B32_e32 1, implicit %exec
977 MachineOperand &Dst = MI.getOperand(0);
979 !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
982 foldInstOperand(MI, OpToFold);