1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
13 #include "AMDGPUSubtarget.h"
14 #include "SIInstrInfo.h"
15 #include "SIMachineFunctionInfo.h"
16 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
17 #include "llvm/CodeGen/MachineFunctionPass.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Support/raw_ostream.h"
22 #include "llvm/Target/TargetMachine.h"
24 #define DEBUG_TYPE "si-fold-operands"
29 struct FoldCandidate {
32 MachineOperand *OpToFold;
36 unsigned char UseOpNo;
37 MachineOperand::MachineOperandType Kind;
39 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp) :
40 UseMI(MI), OpToFold(nullptr), UseOpNo(OpNo), Kind(FoldOp->getType()) {
41 if (FoldOp->isImm()) {
42 ImmToFold = FoldOp->getImm();
43 } else if (FoldOp->isFI()) {
44 FrameIndexToFold = FoldOp->getIndex();
46 assert(FoldOp->isReg());
52 return Kind == MachineOperand::MO_FrameIndex;
56 return Kind == MachineOperand::MO_Immediate;
60 return Kind == MachineOperand::MO_Register;
64 class SIFoldOperands : public MachineFunctionPass {
67 MachineRegisterInfo *MRI;
68 const SIInstrInfo *TII;
69 const SIRegisterInfo *TRI;
70 const SISubtarget *ST;
72 void foldOperand(MachineOperand &OpToFold,
75 SmallVectorImpl<FoldCandidate> &FoldList,
76 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const;
78 void foldInstOperand(MachineInstr &MI, MachineOperand &OpToFold) const;
80 const MachineOperand *isClamp(const MachineInstr &MI) const;
81 bool tryFoldClamp(MachineInstr &MI);
83 std::pair<const MachineOperand *, int> isOMod(const MachineInstr &MI) const;
84 bool tryFoldOMod(MachineInstr &MI);
87 SIFoldOperands() : MachineFunctionPass(ID) {
88 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
91 bool runOnMachineFunction(MachineFunction &MF) override;
93 StringRef getPassName() const override { return "SI Fold Operands"; }
95 void getAnalysisUsage(AnalysisUsage &AU) const override {
97 MachineFunctionPass::getAnalysisUsage(AU);
101 } // End anonymous namespace.
103 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
104 "SI Fold Operands", false, false)
106 char SIFoldOperands::ID = 0;
108 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
110 // Wrapper around isInlineConstant that understands special cases when
111 // instruction types are replaced during operand folding.
112 static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
113 const MachineInstr &UseMI,
115 const MachineOperand &OpToFold) {
116 if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
119 unsigned Opc = UseMI.getOpcode();
121 case AMDGPU::V_MAC_F32_e64:
122 case AMDGPU::V_MAC_F16_e64: {
123 // Special case for mac. Since this is replaced with mad when folded into
124 // src2, we need to check the legality for the final instruction.
125 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
126 if (static_cast<int>(OpNo) == Src2Idx) {
127 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
128 const MCInstrDesc &MadDesc
129 = TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
130 return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
138 FunctionPass *llvm::createSIFoldOperandsPass() {
139 return new SIFoldOperands();
142 static bool updateOperand(FoldCandidate &Fold,
143 const TargetRegisterInfo &TRI) {
144 MachineInstr *MI = Fold.UseMI;
145 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
149 Old.ChangeToImmediate(Fold.ImmToFold);
154 Old.ChangeToFrameIndex(Fold.FrameIndexToFold);
158 MachineOperand *New = Fold.OpToFold;
159 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
160 TargetRegisterInfo::isVirtualRegister(New->getReg())) {
161 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
165 // FIXME: Handle physical registers.
170 static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
171 const MachineInstr *MI) {
172 for (auto Candidate : FoldList) {
173 if (Candidate.UseMI == MI)
179 static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
180 MachineInstr *MI, unsigned OpNo,
181 MachineOperand *OpToFold,
182 const SIInstrInfo *TII) {
183 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
185 // Special case for v_mac_{f16, f32}_e64 if we are trying to fold into src2
186 unsigned Opc = MI->getOpcode();
187 if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64) &&
188 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
189 bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
191 // Check if changing this to a v_mad_{f16, f32} instruction will allow us
192 // to fold the operand.
193 MI->setDesc(TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16));
194 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
196 MI->untieRegOperand(OpNo);
199 MI->setDesc(TII->get(Opc));
202 // Special case for s_setreg_b32
203 if (Opc == AMDGPU::S_SETREG_B32 && OpToFold->isImm()) {
204 MI->setDesc(TII->get(AMDGPU::S_SETREG_IMM32_B32));
205 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
209 // If we are already folding into another operand of MI, then
210 // we can't commute the instruction, otherwise we risk making the
211 // other fold illegal.
212 if (isUseMIInFoldList(FoldList, MI))
215 // Operand is not legal, so try to commute the instruction to
216 // see if this makes it possible to fold.
217 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
218 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
219 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
222 if (CommuteIdx0 == OpNo)
224 else if (CommuteIdx1 == OpNo)
228 // One of operands might be an Imm operand, and OpNo may refer to it after
229 // the call of commuteInstruction() below. Such situations are avoided
230 // here explicitly as OpNo must be a register operand to be a candidate
231 // for memory folding.
232 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
233 !MI->getOperand(CommuteIdx1).isReg()))
237 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
240 if (!TII->isOperandLegal(*MI, OpNo, OpToFold))
244 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
248 // If the use operand doesn't care about the value, this may be an operand only
249 // used for register indexing, in which case it is unsafe to fold.
250 static bool isUseSafeToFold(const MachineInstr &MI,
251 const MachineOperand &UseMO) {
252 return !UseMO.isUndef();
253 //return !MI.hasRegisterImplicitUseOperand(UseMO.getReg());
256 void SIFoldOperands::foldOperand(
257 MachineOperand &OpToFold,
260 SmallVectorImpl<FoldCandidate> &FoldList,
261 SmallVectorImpl<MachineInstr *> &CopiesToReplace) const {
262 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
264 if (!isUseSafeToFold(*UseMI, UseOp))
267 // FIXME: Fold operands with subregs.
268 if (UseOp.isReg() && OpToFold.isReg()) {
269 if (UseOp.isImplicit() || UseOp.getSubReg() != AMDGPU::NoSubRegister)
272 // Don't fold subregister extracts into tied operands, only if it is a full
273 // copy since a subregister use tied to a full register def doesn't really
274 // make sense. e.g. don't fold:
276 // %vreg1 = COPY %vreg0:sub1
277 // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg1<tied0>
280 // %vreg2<tied3> = V_MAC_{F16, F32} %vreg3, %vreg4, %vreg0:sub1<tied0>
281 if (UseOp.isTied() && OpToFold.getSubReg() != AMDGPU::NoSubRegister)
285 // Special case for REG_SEQUENCE: We can't fold literals into
286 // REG_SEQUENCE instructions, so we have to fold them into the
287 // uses of REG_SEQUENCE.
288 if (UseMI->isRegSequence()) {
289 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
290 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
292 for (MachineRegisterInfo::use_iterator
293 RSUse = MRI->use_begin(RegSeqDstReg), RSE = MRI->use_end();
294 RSUse != RSE; ++RSUse) {
296 MachineInstr *RSUseMI = RSUse->getParent();
297 if (RSUse->getSubReg() != RegSeqDstSubReg)
300 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
308 bool FoldingImm = OpToFold.isImm();
310 // In order to fold immediates into copies, we need to change the
312 if (FoldingImm && UseMI->isCopy()) {
313 unsigned DestReg = UseMI->getOperand(0).getReg();
314 const TargetRegisterClass *DestRC
315 = TargetRegisterInfo::isVirtualRegister(DestReg) ?
316 MRI->getRegClass(DestReg) :
317 TRI->getPhysRegClass(DestReg);
319 unsigned MovOp = TII->getMovOpcode(DestRC);
320 if (MovOp == AMDGPU::COPY)
323 UseMI->setDesc(TII->get(MovOp));
324 CopiesToReplace.push_back(UseMI);
326 const MCInstrDesc &UseDesc = UseMI->getDesc();
328 // Don't fold into target independent nodes. Target independent opcodes
329 // don't have defined register classes.
330 if (UseDesc.isVariadic() ||
331 UseDesc.OpInfo[UseOpIdx].RegClass == -1)
336 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
338 // FIXME: We could try to change the instruction from 64-bit to 32-bit
339 // to enable more folding opportunites. The shrink operands pass
340 // already does this.
345 const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc();
346 const TargetRegisterClass *FoldRC =
347 TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
350 // Split 64-bit constants into 32-bits for folding.
351 if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
352 unsigned UseReg = UseOp.getReg();
353 const TargetRegisterClass *UseRC
354 = TargetRegisterInfo::isVirtualRegister(UseReg) ?
355 MRI->getRegClass(UseReg) :
356 TRI->getPhysRegClass(UseReg);
358 if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
361 APInt Imm(64, OpToFold.getImm());
362 if (UseOp.getSubReg() == AMDGPU::sub0) {
363 Imm = Imm.getLoBits(32);
365 assert(UseOp.getSubReg() == AMDGPU::sub1);
366 Imm = Imm.getHiBits(32);
369 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
370 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
376 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
379 static bool evalBinaryInstruction(unsigned Opcode, int32_t &Result,
380 uint32_t LHS, uint32_t RHS) {
382 case AMDGPU::V_AND_B32_e64:
383 case AMDGPU::V_AND_B32_e32:
384 case AMDGPU::S_AND_B32:
387 case AMDGPU::V_OR_B32_e64:
388 case AMDGPU::V_OR_B32_e32:
389 case AMDGPU::S_OR_B32:
392 case AMDGPU::V_XOR_B32_e64:
393 case AMDGPU::V_XOR_B32_e32:
394 case AMDGPU::S_XOR_B32:
397 case AMDGPU::V_LSHL_B32_e64:
398 case AMDGPU::V_LSHL_B32_e32:
399 case AMDGPU::S_LSHL_B32:
400 // The instruction ignores the high bits for out of bounds shifts.
401 Result = LHS << (RHS & 31);
403 case AMDGPU::V_LSHLREV_B32_e64:
404 case AMDGPU::V_LSHLREV_B32_e32:
405 Result = RHS << (LHS & 31);
407 case AMDGPU::V_LSHR_B32_e64:
408 case AMDGPU::V_LSHR_B32_e32:
409 case AMDGPU::S_LSHR_B32:
410 Result = LHS >> (RHS & 31);
412 case AMDGPU::V_LSHRREV_B32_e64:
413 case AMDGPU::V_LSHRREV_B32_e32:
414 Result = RHS >> (LHS & 31);
416 case AMDGPU::V_ASHR_I32_e64:
417 case AMDGPU::V_ASHR_I32_e32:
418 case AMDGPU::S_ASHR_I32:
419 Result = static_cast<int32_t>(LHS) >> (RHS & 31);
421 case AMDGPU::V_ASHRREV_I32_e64:
422 case AMDGPU::V_ASHRREV_I32_e32:
423 Result = static_cast<int32_t>(RHS) >> (LHS & 31);
430 static unsigned getMovOpc(bool IsScalar) {
431 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
434 /// Remove any leftover implicit operands from mutating the instruction. e.g.
435 /// if we replace an s_and_b32 with a copy, we don't need the implicit scc def
437 static void stripExtraCopyOperands(MachineInstr &MI) {
438 const MCInstrDesc &Desc = MI.getDesc();
439 unsigned NumOps = Desc.getNumOperands() +
440 Desc.getNumImplicitUses() +
441 Desc.getNumImplicitDefs();
443 for (unsigned I = MI.getNumOperands() - 1; I >= NumOps; --I)
447 static void mutateCopyOp(MachineInstr &MI, const MCInstrDesc &NewDesc) {
449 stripExtraCopyOperands(MI);
452 static MachineOperand *getImmOrMaterializedImm(MachineRegisterInfo &MRI,
453 MachineOperand &Op) {
455 // If this has a subregister, it obviously is a register source.
456 if (Op.getSubReg() != AMDGPU::NoSubRegister)
459 MachineInstr *Def = MRI.getVRegDef(Op.getReg());
460 if (Def->isMoveImmediate()) {
461 MachineOperand &ImmSrc = Def->getOperand(1);
470 // Try to simplify operations with a constant that may appear after instruction
472 // TODO: See if a frame index with a fixed offset can fold.
473 static bool tryConstantFoldOp(MachineRegisterInfo &MRI,
474 const SIInstrInfo *TII,
476 MachineOperand *ImmOp) {
477 unsigned Opc = MI->getOpcode();
478 if (Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 ||
479 Opc == AMDGPU::S_NOT_B32) {
480 MI->getOperand(1).ChangeToImmediate(~ImmOp->getImm());
481 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32)));
485 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1);
489 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
490 MachineOperand *Src0 = getImmOrMaterializedImm(MRI, MI->getOperand(Src0Idx));
491 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx));
493 if (!Src0->isImm() && !Src1->isImm())
496 // and k0, k1 -> v_mov_b32 (k0 & k1)
497 // or k0, k1 -> v_mov_b32 (k0 | k1)
498 // xor k0, k1 -> v_mov_b32 (k0 ^ k1)
499 if (Src0->isImm() && Src1->isImm()) {
501 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm()))
504 const SIRegisterInfo &TRI = TII->getRegisterInfo();
505 bool IsSGPR = TRI.isSGPRReg(MRI, MI->getOperand(0).getReg());
507 // Be careful to change the right operand, src0 may belong to a different
509 MI->getOperand(Src0Idx).ChangeToImmediate(NewImm);
510 MI->RemoveOperand(Src1Idx);
511 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR)));
515 if (!MI->isCommutable())
518 if (Src0->isImm() && !Src1->isImm()) {
519 std::swap(Src0, Src1);
520 std::swap(Src0Idx, Src1Idx);
523 int32_t Src1Val = static_cast<int32_t>(Src1->getImm());
524 if (Opc == AMDGPU::V_OR_B32_e64 ||
525 Opc == AMDGPU::V_OR_B32_e32 ||
526 Opc == AMDGPU::S_OR_B32) {
528 // y = or x, 0 => y = copy x
529 MI->RemoveOperand(Src1Idx);
530 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
531 } else if (Src1Val == -1) {
532 // y = or x, -1 => y = v_mov_b32 -1
533 MI->RemoveOperand(Src1Idx);
534 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32)));
541 if (MI->getOpcode() == AMDGPU::V_AND_B32_e64 ||
542 MI->getOpcode() == AMDGPU::V_AND_B32_e32 ||
543 MI->getOpcode() == AMDGPU::S_AND_B32) {
545 // y = and x, 0 => y = v_mov_b32 0
546 MI->RemoveOperand(Src0Idx);
547 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32)));
548 } else if (Src1Val == -1) {
549 // y = and x, -1 => y = copy x
550 MI->RemoveOperand(Src1Idx);
551 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
552 stripExtraCopyOperands(*MI);
559 if (MI->getOpcode() == AMDGPU::V_XOR_B32_e64 ||
560 MI->getOpcode() == AMDGPU::V_XOR_B32_e32 ||
561 MI->getOpcode() == AMDGPU::S_XOR_B32) {
563 // y = xor x, 0 => y = copy x
564 MI->RemoveOperand(Src1Idx);
565 mutateCopyOp(*MI, TII->get(AMDGPU::COPY));
573 // Try to fold an instruction into a simpler one
574 static bool tryFoldInst(const SIInstrInfo *TII,
576 unsigned Opc = MI->getOpcode();
578 if (Opc == AMDGPU::V_CNDMASK_B32_e32 ||
579 Opc == AMDGPU::V_CNDMASK_B32_e64 ||
580 Opc == AMDGPU::V_CNDMASK_B64_PSEUDO) {
581 const MachineOperand *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0);
582 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1);
583 if (Src1->isIdenticalTo(*Src0)) {
584 DEBUG(dbgs() << "Folded " << *MI << " into ");
585 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
587 MI->RemoveOperand(Src2Idx);
588 MI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1));
589 mutateCopyOp(*MI, TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY
590 : getMovOpc(false)));
591 DEBUG(dbgs() << *MI << '\n');
599 void SIFoldOperands::foldInstOperand(MachineInstr &MI,
600 MachineOperand &OpToFold) const {
601 // We need mutate the operands of new mov instructions to add implicit
602 // uses of EXEC, but adding them invalidates the use_iterator, so defer
604 SmallVector<MachineInstr *, 4> CopiesToReplace;
605 SmallVector<FoldCandidate, 4> FoldList;
606 MachineOperand &Dst = MI.getOperand(0);
608 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
610 unsigned NumLiteralUses = 0;
611 MachineOperand *NonInlineUse = nullptr;
612 int NonInlineUseOpNo = -1;
614 MachineRegisterInfo::use_iterator NextUse, NextInstUse;
615 for (MachineRegisterInfo::use_iterator
616 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
617 Use != E; Use = NextUse) {
618 NextUse = std::next(Use);
619 MachineInstr *UseMI = Use->getParent();
620 unsigned OpNo = Use.getOperandNo();
622 // Folding the immediate may reveal operations that can be constant
623 // folded or replaced with a copy. This can happen for example after
624 // frame indices are lowered to constants or from splitting 64-bit
627 // We may also encounter cases where one or both operands are
628 // immediates materialized into a register, which would ordinarily not
629 // be folded due to multiple uses or operand constraints.
631 if (OpToFold.isImm() && tryConstantFoldOp(*MRI, TII, UseMI, &OpToFold)) {
632 DEBUG(dbgs() << "Constant folded " << *UseMI <<'\n');
634 // Some constant folding cases change the same immediate's use to a new
635 // instruction, e.g. and x, 0 -> 0. Make sure we re-visit the user
636 // again. The same constant folded instruction could also have a second
638 NextUse = MRI->use_begin(Dst.getReg());
642 // Try to fold any inline immediate uses, and then only fold other
643 // constants if they have one use.
645 // The legality of the inline immediate must be checked based on the use
646 // operand, not the defining instruction, because 32-bit instructions
647 // with 32-bit inline immediate sources may be used to materialize
648 // constants used in 16-bit operands.
650 // e.g. it is unsafe to fold:
651 // s_mov_b32 s0, 1.0 // materializes 0x3f800000
652 // v_add_f16 v0, v1, s0 // 1.0 f16 inline immediate sees 0x00003c00
654 // Folding immediates with more than one use will increase program size.
655 // FIXME: This will also reduce register usage, which may be better
656 // in some cases. A better heuristic is needed.
657 if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
658 foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
660 if (++NumLiteralUses == 1) {
661 NonInlineUse = &*Use;
662 NonInlineUseOpNo = OpNo;
667 if (NumLiteralUses == 1) {
668 MachineInstr *UseMI = NonInlineUse->getParent();
669 foldOperand(OpToFold, UseMI, NonInlineUseOpNo, FoldList, CopiesToReplace);
673 for (MachineRegisterInfo::use_iterator
674 Use = MRI->use_begin(Dst.getReg()), E = MRI->use_end();
676 MachineInstr *UseMI = Use->getParent();
678 foldOperand(OpToFold, UseMI, Use.getOperandNo(),
679 FoldList, CopiesToReplace);
683 MachineFunction *MF = MI.getParent()->getParent();
684 // Make sure we add EXEC uses to any new v_mov instructions created.
685 for (MachineInstr *Copy : CopiesToReplace)
686 Copy->addImplicitDefUseOperands(*MF);
688 for (FoldCandidate &Fold : FoldList) {
689 if (updateOperand(Fold, *TRI)) {
692 assert(Fold.OpToFold && Fold.OpToFold->isReg());
693 // FIXME: Probably shouldn't bother trying to fold if not an
694 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
696 MRI->clearKillFlags(Fold.OpToFold->getReg());
698 DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
699 static_cast<int>(Fold.UseOpNo) << " of " << *Fold.UseMI << '\n');
700 tryFoldInst(TII, Fold.UseMI);
705 const MachineOperand *SIFoldOperands::isClamp(const MachineInstr &MI) const {
706 unsigned Op = MI.getOpcode();
708 case AMDGPU::V_MAX_F32_e64:
709 case AMDGPU::V_MAX_F16_e64:
710 case AMDGPU::V_MAX_F64: {
711 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm())
714 // Make sure sources are identical.
715 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
716 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
717 if (!Src0->isReg() || Src0->getSubReg() != Src1->getSubReg() ||
718 Src0->getSubReg() != AMDGPU::NoSubRegister)
721 // Can't fold up if we have modifiers.
722 if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
723 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
724 TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
733 // We obviously have multiple uses in a clamp since the register is used twice
734 // in the same instruction.
735 static bool hasOneNonDBGUseInst(const MachineRegisterInfo &MRI, unsigned Reg) {
737 for (auto I = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end();
746 bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) {
747 const MachineOperand *ClampSrc = isClamp(MI);
748 if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg()))
751 MachineInstr *Def = MRI->getVRegDef(ClampSrc->getReg());
752 if (!TII->hasFPClamp(*Def))
754 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp);
758 DEBUG(dbgs() << "Folding clamp " << *DefClamp << " into " << *Def << '\n');
760 // Clamp is applied after omod, so it is OK if omod is set.
762 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
763 MI.eraseFromParent();
767 static int getOModValue(unsigned Opc, int64_t Val) {
769 case AMDGPU::V_MUL_F32_e64: {
770 switch (static_cast<uint32_t>(Val)) {
771 case 0x3f000000: // 0.5
772 return SIOutMods::DIV2;
773 case 0x40000000: // 2.0
774 return SIOutMods::MUL2;
775 case 0x40800000: // 4.0
776 return SIOutMods::MUL4;
778 return SIOutMods::NONE;
781 case AMDGPU::V_MUL_F16_e64: {
782 switch (static_cast<uint16_t>(Val)) {
784 return SIOutMods::DIV2;
786 return SIOutMods::MUL2;
788 return SIOutMods::MUL4;
790 return SIOutMods::NONE;
794 llvm_unreachable("invalid mul opcode");
798 // FIXME: Does this really not support denormals with f16?
799 // FIXME: Does this need to check IEEE mode bit? SNaNs are generally not
800 // handled, so will anything other than that break?
801 std::pair<const MachineOperand *, int>
802 SIFoldOperands::isOMod(const MachineInstr &MI) const {
803 unsigned Op = MI.getOpcode();
805 case AMDGPU::V_MUL_F32_e64:
806 case AMDGPU::V_MUL_F16_e64: {
807 // If output denormals are enabled, omod is ignored.
808 if ((Op == AMDGPU::V_MUL_F32_e64 && ST->hasFP32Denormals()) ||
809 (Op == AMDGPU::V_MUL_F16_e64 && ST->hasFP16Denormals()))
810 return std::make_pair(nullptr, SIOutMods::NONE);
812 const MachineOperand *RegOp = nullptr;
813 const MachineOperand *ImmOp = nullptr;
814 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
815 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
819 } else if (Src1->isImm()) {
823 return std::make_pair(nullptr, SIOutMods::NONE);
825 int OMod = getOModValue(Op, ImmOp->getImm());
826 if (OMod == SIOutMods::NONE ||
827 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) ||
828 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) ||
829 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) ||
830 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
831 return std::make_pair(nullptr, SIOutMods::NONE);
833 return std::make_pair(RegOp, OMod);
835 case AMDGPU::V_ADD_F32_e64:
836 case AMDGPU::V_ADD_F16_e64: {
837 // If output denormals are enabled, omod is ignored.
838 if ((Op == AMDGPU::V_ADD_F32_e64 && ST->hasFP32Denormals()) ||
839 (Op == AMDGPU::V_ADD_F16_e64 && ST->hasFP16Denormals()))
840 return std::make_pair(nullptr, SIOutMods::NONE);
842 // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x
843 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
844 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
846 if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() &&
847 Src0->getSubReg() == Src1->getSubReg() &&
848 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) &&
849 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) &&
850 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) &&
851 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
852 return std::make_pair(Src0, SIOutMods::MUL2);
854 return std::make_pair(nullptr, SIOutMods::NONE);
857 return std::make_pair(nullptr, SIOutMods::NONE);
861 // FIXME: Does this need to check IEEE bit on function?
862 bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) {
863 const MachineOperand *RegOp;
865 std::tie(RegOp, OMod) = isOMod(MI);
866 if (OMod == SIOutMods::NONE || !RegOp->isReg() ||
867 RegOp->getSubReg() != AMDGPU::NoSubRegister ||
868 !hasOneNonDBGUseInst(*MRI, RegOp->getReg()))
871 MachineInstr *Def = MRI->getVRegDef(RegOp->getReg());
872 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod);
873 if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE)
876 // Clamp is applied after omod. If the source already has clamp set, don't
878 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp))
881 DEBUG(dbgs() << "Folding omod " << MI << " into " << *Def << '\n');
883 DefOMod->setImm(OMod);
884 MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg());
885 MI.eraseFromParent();
889 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
890 if (skipFunction(*MF.getFunction()))
893 MRI = &MF.getRegInfo();
894 ST = &MF.getSubtarget<SISubtarget>();
895 TII = ST->getInstrInfo();
896 TRI = &TII->getRegisterInfo();
898 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
900 // omod is ignored by hardware if IEEE bit is enabled. omod also does not
901 // correctly handle signed zeros.
903 // TODO: Check nsz on instructions when fast math flags are preserved to MI
905 bool IsIEEEMode = ST->enableIEEEBit(MF) || !MFI->hasNoSignedZerosFPMath();
907 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
910 MachineBasicBlock &MBB = *BI;
911 MachineBasicBlock::iterator I, Next;
912 for (I = MBB.begin(); I != MBB.end(); I = Next) {
914 MachineInstr &MI = *I;
916 tryFoldInst(TII, &MI);
918 if (!TII->isFoldableCopy(MI)) {
919 if (IsIEEEMode || !tryFoldOMod(MI))
924 MachineOperand &OpToFold = MI.getOperand(1);
925 bool FoldingImm = OpToFold.isImm() || OpToFold.isFI();
927 // FIXME: We could also be folding things like TargetIndexes.
928 if (!FoldingImm && !OpToFold.isReg())
931 if (OpToFold.isReg() &&
932 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
935 // Prevent folding operands backwards in the function. For example,
936 // the COPY opcode must not be replaced by 1 in this example:
938 // %vreg3<def> = COPY %VGPR0; VGPR_32:%vreg3
940 // %VGPR0<def> = V_MOV_B32_e32 1, %EXEC<imp-use>
941 MachineOperand &Dst = MI.getOperand(0);
943 !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
946 foldInstOperand(MI, OpToFold);