1 //===-- SILowerI1Copies.cpp - Lower I1 Copies -----------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass lowers all occurrences of i1 values (with a vreg_1 register class)
10 // to lane masks (32 / 64-bit scalar registers). The pass assumes machine SSA
11 // form and a wave-level control flow graph.
13 // Before this pass, values that are semantically i1 and are defined and used
14 // within the same basic block are already represented as lane masks in scalar
15 // registers. However, values that cross basic blocks are always transferred
16 // between basic blocks in vreg_1 virtual registers and are lowered by this
19 // The only instructions that use or define vreg_1 virtual registers are COPY,
20 // PHI, and IMPLICIT_DEF.
22 //===----------------------------------------------------------------------===//
25 #include "AMDGPUSubtarget.h"
26 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
27 #include "SIInstrInfo.h"
28 #include "llvm/CodeGen/MachineDominators.h"
29 #include "llvm/CodeGen/MachineFunctionPass.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachinePostDominators.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/MachineSSAUpdater.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/InitializePasses.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Target/TargetMachine.h"
40 #define DEBUG_TYPE "si-i1-copies"
44 static unsigned createLaneMaskReg(MachineFunction &MF);
45 static unsigned insertUndefLaneMask(MachineBasicBlock &MBB);
49 class SILowerI1Copies : public MachineFunctionPass {
54 bool IsWave32 = false;
55 MachineFunction *MF = nullptr;
56 MachineDominatorTree *DT = nullptr;
57 MachinePostDominatorTree *PDT = nullptr;
58 MachineRegisterInfo *MRI = nullptr;
59 const GCNSubtarget *ST = nullptr;
60 const SIInstrInfo *TII = nullptr;
70 DenseSet<unsigned> ConstrainRegs;
73 SILowerI1Copies() : MachineFunctionPass(ID) {
74 initializeSILowerI1CopiesPass(*PassRegistry::getPassRegistry());
77 bool runOnMachineFunction(MachineFunction &MF) override;
79 StringRef getPassName() const override { return "SI Lower i1 Copies"; }
81 void getAnalysisUsage(AnalysisUsage &AU) const override {
83 AU.addRequired<MachineDominatorTree>();
84 AU.addRequired<MachinePostDominatorTree>();
85 MachineFunctionPass::getAnalysisUsage(AU);
89 void lowerCopiesFromI1();
91 void lowerCopiesToI1();
92 bool isConstantLaneMask(unsigned Reg, bool &Val) const;
93 void buildMergeLaneMasks(MachineBasicBlock &MBB,
94 MachineBasicBlock::iterator I, const DebugLoc &DL,
95 unsigned DstReg, unsigned PrevReg, unsigned CurReg);
96 MachineBasicBlock::iterator
97 getSaluInsertionAtEnd(MachineBasicBlock &MBB) const;
99 bool isVreg1(unsigned Reg) const {
100 return Register::isVirtualRegister(Reg) &&
101 MRI->getRegClass(Reg) == &AMDGPU::VReg_1RegClass;
104 bool isLaneMaskReg(unsigned Reg) const {
105 return TII->getRegisterInfo().isSGPRReg(*MRI, Reg) &&
106 TII->getRegisterInfo().getRegSizeInBits(Reg, *MRI) ==
107 ST->getWavefrontSize();
111 /// Helper class that determines the relationship between incoming values of a
112 /// phi in the control flow graph to determine where an incoming value can
113 /// simply be taken as a scalar lane mask as-is, and where it needs to be
114 /// merged with another, previously defined lane mask.
116 /// The approach is as follows:
117 /// - Determine all basic blocks which, starting from the incoming blocks,
118 /// a wave may reach before entering the def block (the block containing the
120 /// - If an incoming block has no predecessors in this set, we can take the
121 /// incoming value as a scalar lane mask as-is.
122 /// -- A special case of this is when the def block has a self-loop.
123 /// - Otherwise, the incoming value needs to be merged with a previously
124 /// defined lane mask.
125 /// - If there is a path into the set of reachable blocks that does _not_ go
126 /// through an incoming block where we can take the scalar lane mask as-is,
127 /// we need to invent an available value for the SSAUpdater. Choices are
128 /// 0 and undef, with differing consequences for how to merge values etc.
130 /// TODO: We could use region analysis to quickly skip over SESE regions during
133 class PhiIncomingAnalysis {
134 MachinePostDominatorTree &PDT;
136 // For each reachable basic block, whether it is a source in the induced
137 // subgraph of the CFG.
138 DenseMap<MachineBasicBlock *, bool> ReachableMap;
139 SmallVector<MachineBasicBlock *, 4> ReachableOrdered;
140 SmallVector<MachineBasicBlock *, 4> Stack;
141 SmallVector<MachineBasicBlock *, 4> Predecessors;
144 PhiIncomingAnalysis(MachinePostDominatorTree &PDT) : PDT(PDT) {}
146 /// Returns whether \p MBB is a source in the induced subgraph of reachable
148 bool isSource(MachineBasicBlock &MBB) const {
149 return ReachableMap.find(&MBB)->second;
152 ArrayRef<MachineBasicBlock *> predecessors() const { return Predecessors; }
154 void analyze(MachineBasicBlock &DefBlock,
155 ArrayRef<MachineBasicBlock *> IncomingBlocks) {
156 assert(Stack.empty());
157 ReachableMap.clear();
158 ReachableOrdered.clear();
159 Predecessors.clear();
161 // Insert the def block first, so that it acts as an end point for the
163 ReachableMap.try_emplace(&DefBlock, false);
164 ReachableOrdered.push_back(&DefBlock);
166 for (MachineBasicBlock *MBB : IncomingBlocks) {
167 if (MBB == &DefBlock) {
168 ReachableMap[&DefBlock] = true; // self-loop on DefBlock
172 ReachableMap.try_emplace(MBB, false);
173 ReachableOrdered.push_back(MBB);
175 // If this block has a divergent terminator and the def block is its
176 // post-dominator, the wave may first visit the other successors.
177 bool Divergent = false;
178 for (MachineInstr &MI : MBB->terminators()) {
179 if (MI.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO ||
180 MI.getOpcode() == AMDGPU::SI_IF ||
181 MI.getOpcode() == AMDGPU::SI_ELSE ||
182 MI.getOpcode() == AMDGPU::SI_LOOP) {
188 if (Divergent && PDT.dominates(&DefBlock, MBB)) {
189 for (MachineBasicBlock *Succ : MBB->successors())
190 Stack.push_back(Succ);
194 while (!Stack.empty()) {
195 MachineBasicBlock *MBB = Stack.pop_back_val();
196 if (!ReachableMap.try_emplace(MBB, false).second)
198 ReachableOrdered.push_back(MBB);
200 for (MachineBasicBlock *Succ : MBB->successors())
201 Stack.push_back(Succ);
204 for (MachineBasicBlock *MBB : ReachableOrdered) {
205 bool HaveReachablePred = false;
206 for (MachineBasicBlock *Pred : MBB->predecessors()) {
207 if (ReachableMap.count(Pred)) {
208 HaveReachablePred = true;
210 Stack.push_back(Pred);
213 if (!HaveReachablePred)
214 ReachableMap[MBB] = true;
215 if (HaveReachablePred) {
216 for (MachineBasicBlock *UnreachablePred : Stack) {
217 if (llvm::find(Predecessors, UnreachablePred) == Predecessors.end())
218 Predecessors.push_back(UnreachablePred);
226 /// Helper class that detects loops which require us to lower an i1 COPY into
227 /// bitwise manipulation.
229 /// Unfortunately, we cannot use LoopInfo because LoopInfo does not distinguish
230 /// between loops with the same header. Consider this example:
238 /// A is the header of a loop containing A, B, and C as far as LoopInfo is
239 /// concerned. However, an i1 COPY in B that is used in C must be lowered to
240 /// bitwise operations to combine results from different loop iterations when
241 /// B has a divergent branch (since by default we will compile this code such
242 /// that threads in a wave are merged at the entry of C).
244 /// The following rule is implemented to determine whether bitwise operations
245 /// are required: use the bitwise lowering for a def in block B if a backward
246 /// edge to B is reachable without going through the nearest common
247 /// post-dominator of B and all uses of the def.
249 /// TODO: This rule is conservative because it does not check whether the
250 /// relevant branches are actually divergent.
252 /// The class is designed to cache the CFG traversal so that it can be re-used
253 /// for multiple defs within the same basic block.
255 /// TODO: We could use region analysis to quickly skip over SESE regions during
259 MachineDominatorTree &DT;
260 MachinePostDominatorTree &PDT;
262 // All visited / reachable block, tagged by level (level 0 is the def block,
263 // level 1 are all blocks reachable including but not going through the def
264 // block's IPDOM, etc.).
265 DenseMap<MachineBasicBlock *, unsigned> Visited;
267 // Nearest common dominator of all visited blocks by level (level 0 is the
268 // def block). Used for seeding the SSAUpdater.
269 SmallVector<MachineBasicBlock *, 4> CommonDominators;
271 // Post-dominator of all visited blocks.
272 MachineBasicBlock *VisitedPostDom = nullptr;
274 // Level at which a loop was found: 0 is not possible; 1 = a backward edge is
275 // reachable without going through the IPDOM of the def block (if the IPDOM
276 // itself has an edge to the def block, the loop level is 2), etc.
277 unsigned FoundLoopLevel = ~0u;
279 MachineBasicBlock *DefBlock = nullptr;
280 SmallVector<MachineBasicBlock *, 4> Stack;
281 SmallVector<MachineBasicBlock *, 4> NextLevel;
284 LoopFinder(MachineDominatorTree &DT, MachinePostDominatorTree &PDT)
285 : DT(DT), PDT(PDT) {}
287 void initialize(MachineBasicBlock &MBB) {
289 CommonDominators.clear();
292 VisitedPostDom = nullptr;
293 FoundLoopLevel = ~0u;
298 /// Check whether a backward edge can be reached without going through the
299 /// given \p PostDom of the def block.
301 /// Return the level of \p PostDom if a loop was found, or 0 otherwise.
302 unsigned findLoop(MachineBasicBlock *PostDom) {
303 MachineDomTreeNode *PDNode = PDT.getNode(DefBlock);
309 while (PDNode->getBlock() != PostDom) {
310 if (PDNode->getBlock() == VisitedPostDom)
312 PDNode = PDNode->getIDom();
314 if (FoundLoopLevel == Level)
321 /// Add undef values dominating the loop and the optionally given additional
322 /// blocks, so that the SSA updater doesn't have to search all the way to the
324 void addLoopEntries(unsigned LoopLevel, MachineSSAUpdater &SSAUpdater,
325 ArrayRef<MachineBasicBlock *> Blocks = {}) {
326 assert(LoopLevel < CommonDominators.size());
328 MachineBasicBlock *Dom = CommonDominators[LoopLevel];
329 for (MachineBasicBlock *MBB : Blocks)
330 Dom = DT.findNearestCommonDominator(Dom, MBB);
332 if (!inLoopLevel(*Dom, LoopLevel, Blocks)) {
333 SSAUpdater.AddAvailableValue(Dom, insertUndefLaneMask(*Dom));
335 // The dominator is part of the loop or the given blocks, so add the
336 // undef value to unreachable predecessors instead.
337 for (MachineBasicBlock *Pred : Dom->predecessors()) {
338 if (!inLoopLevel(*Pred, LoopLevel, Blocks))
339 SSAUpdater.AddAvailableValue(Pred, insertUndefLaneMask(*Pred));
345 bool inLoopLevel(MachineBasicBlock &MBB, unsigned LoopLevel,
346 ArrayRef<MachineBasicBlock *> Blocks) const {
347 auto DomIt = Visited.find(&MBB);
348 if (DomIt != Visited.end() && DomIt->second <= LoopLevel)
351 if (llvm::find(Blocks, &MBB) != Blocks.end())
357 void advanceLevel() {
358 MachineBasicBlock *VisitedDom;
360 if (!VisitedPostDom) {
361 VisitedPostDom = DefBlock;
362 VisitedDom = DefBlock;
363 Stack.push_back(DefBlock);
365 VisitedPostDom = PDT.getNode(VisitedPostDom)->getIDom()->getBlock();
366 VisitedDom = CommonDominators.back();
368 for (unsigned i = 0; i < NextLevel.size();) {
369 if (PDT.dominates(VisitedPostDom, NextLevel[i])) {
370 Stack.push_back(NextLevel[i]);
372 NextLevel[i] = NextLevel.back();
373 NextLevel.pop_back();
380 unsigned Level = CommonDominators.size();
381 while (!Stack.empty()) {
382 MachineBasicBlock *MBB = Stack.pop_back_val();
383 if (!PDT.dominates(VisitedPostDom, MBB))
384 NextLevel.push_back(MBB);
386 Visited[MBB] = Level;
387 VisitedDom = DT.findNearestCommonDominator(VisitedDom, MBB);
389 for (MachineBasicBlock *Succ : MBB->successors()) {
390 if (Succ == DefBlock) {
391 if (MBB == VisitedPostDom)
392 FoundLoopLevel = std::min(FoundLoopLevel, Level + 1);
394 FoundLoopLevel = std::min(FoundLoopLevel, Level);
398 if (Visited.try_emplace(Succ, ~0u).second) {
399 if (MBB == VisitedPostDom)
400 NextLevel.push_back(Succ);
402 Stack.push_back(Succ);
407 CommonDominators.push_back(VisitedDom);
411 } // End anonymous namespace.
413 INITIALIZE_PASS_BEGIN(SILowerI1Copies, DEBUG_TYPE, "SI Lower i1 Copies", false,
415 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
416 INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
417 INITIALIZE_PASS_END(SILowerI1Copies, DEBUG_TYPE, "SI Lower i1 Copies", false,
420 char SILowerI1Copies::ID = 0;
422 char &llvm::SILowerI1CopiesID = SILowerI1Copies::ID;
424 FunctionPass *llvm::createSILowerI1CopiesPass() {
425 return new SILowerI1Copies();
428 static unsigned createLaneMaskReg(MachineFunction &MF) {
429 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
430 MachineRegisterInfo &MRI = MF.getRegInfo();
431 return MRI.createVirtualRegister(ST.isWave32() ? &AMDGPU::SReg_32RegClass
432 : &AMDGPU::SReg_64RegClass);
435 static unsigned insertUndefLaneMask(MachineBasicBlock &MBB) {
436 MachineFunction &MF = *MBB.getParent();
437 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
438 const SIInstrInfo *TII = ST.getInstrInfo();
439 unsigned UndefReg = createLaneMaskReg(MF);
440 BuildMI(MBB, MBB.getFirstTerminator(), {}, TII->get(AMDGPU::IMPLICIT_DEF),
445 /// Lower all instructions that def or use vreg_1 registers.
447 /// In a first pass, we lower COPYs from vreg_1 to vector registers, as can
448 /// occur around inline assembly. We do this first, before vreg_1 registers
449 /// are changed to scalar mask registers.
451 /// Then we lower all defs of vreg_1 registers. Phi nodes are lowered before
452 /// all others, because phi lowering looks through copies and can therefore
453 /// often make copy lowering unnecessary.
454 bool SILowerI1Copies::runOnMachineFunction(MachineFunction &TheMF) {
456 MRI = &MF->getRegInfo();
457 DT = &getAnalysis<MachineDominatorTree>();
458 PDT = &getAnalysis<MachinePostDominatorTree>();
460 ST = &MF->getSubtarget<GCNSubtarget>();
461 TII = ST->getInstrInfo();
462 IsWave32 = ST->isWave32();
465 ExecReg = AMDGPU::EXEC_LO;
466 MovOp = AMDGPU::S_MOV_B32;
467 AndOp = AMDGPU::S_AND_B32;
468 OrOp = AMDGPU::S_OR_B32;
469 XorOp = AMDGPU::S_XOR_B32;
470 AndN2Op = AMDGPU::S_ANDN2_B32;
471 OrN2Op = AMDGPU::S_ORN2_B32;
473 ExecReg = AMDGPU::EXEC;
474 MovOp = AMDGPU::S_MOV_B64;
475 AndOp = AMDGPU::S_AND_B64;
476 OrOp = AMDGPU::S_OR_B64;
477 XorOp = AMDGPU::S_XOR_B64;
478 AndN2Op = AMDGPU::S_ANDN2_B64;
479 OrN2Op = AMDGPU::S_ORN2_B64;
486 for (unsigned Reg : ConstrainRegs)
487 MRI->constrainRegClass(Reg, &AMDGPU::SReg_1_XEXECRegClass);
488 ConstrainRegs.clear();
494 static bool isVRegCompatibleReg(const SIRegisterInfo &TRI,
495 const MachineRegisterInfo &MRI,
497 unsigned Size = TRI.getRegSizeInBits(Reg, MRI);
498 return Size == 1 || Size == 32;
502 void SILowerI1Copies::lowerCopiesFromI1() {
503 SmallVector<MachineInstr *, 4> DeadCopies;
505 for (MachineBasicBlock &MBB : *MF) {
506 for (MachineInstr &MI : MBB) {
507 if (MI.getOpcode() != AMDGPU::COPY)
510 Register DstReg = MI.getOperand(0).getReg();
511 Register SrcReg = MI.getOperand(1).getReg();
512 if (!isVreg1(SrcReg))
515 if (isLaneMaskReg(DstReg) || isVreg1(DstReg))
518 // Copy into a 32-bit vector register.
519 LLVM_DEBUG(dbgs() << "Lower copy from i1: " << MI);
520 DebugLoc DL = MI.getDebugLoc();
522 assert(isVRegCompatibleReg(TII->getRegisterInfo(), *MRI, DstReg));
523 assert(!MI.getOperand(0).getSubReg());
525 ConstrainRegs.insert(SrcReg);
526 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
532 DeadCopies.push_back(&MI);
535 for (MachineInstr *MI : DeadCopies)
536 MI->eraseFromParent();
541 void SILowerI1Copies::lowerPhis() {
542 MachineSSAUpdater SSAUpdater(*MF);
543 LoopFinder LF(*DT, *PDT);
544 PhiIncomingAnalysis PIA(*PDT);
545 SmallVector<MachineInstr *, 4> Vreg1Phis;
546 SmallVector<MachineBasicBlock *, 4> IncomingBlocks;
547 SmallVector<unsigned, 4> IncomingRegs;
548 SmallVector<unsigned, 4> IncomingUpdated;
550 DenseSet<unsigned> PhiRegisters;
553 for (MachineBasicBlock &MBB : *MF) {
554 for (MachineInstr &MI : MBB.phis()) {
555 if (isVreg1(MI.getOperand(0).getReg()))
556 Vreg1Phis.push_back(&MI);
560 MachineBasicBlock *PrevMBB = nullptr;
561 for (MachineInstr *MI : Vreg1Phis) {
562 MachineBasicBlock &MBB = *MI->getParent();
563 if (&MBB != PrevMBB) {
568 LLVM_DEBUG(dbgs() << "Lower PHI: " << *MI);
570 Register DstReg = MI->getOperand(0).getReg();
571 MRI->setRegClass(DstReg, IsWave32 ? &AMDGPU::SReg_32RegClass
572 : &AMDGPU::SReg_64RegClass);
574 // Collect incoming values.
575 for (unsigned i = 1; i < MI->getNumOperands(); i += 2) {
576 assert(i + 1 < MI->getNumOperands());
577 Register IncomingReg = MI->getOperand(i).getReg();
578 MachineBasicBlock *IncomingMBB = MI->getOperand(i + 1).getMBB();
579 MachineInstr *IncomingDef = MRI->getUniqueVRegDef(IncomingReg);
581 if (IncomingDef->getOpcode() == AMDGPU::COPY) {
582 IncomingReg = IncomingDef->getOperand(1).getReg();
583 assert(isLaneMaskReg(IncomingReg) || isVreg1(IncomingReg));
584 assert(!IncomingDef->getOperand(1).getSubReg());
585 } else if (IncomingDef->getOpcode() == AMDGPU::IMPLICIT_DEF) {
588 assert(IncomingDef->isPHI() || PhiRegisters.count(IncomingReg));
591 IncomingBlocks.push_back(IncomingMBB);
592 IncomingRegs.push_back(IncomingReg);
596 PhiRegisters.insert(DstReg);
599 // Phis in a loop that are observed outside the loop receive a simple but
600 // conservatively correct treatment.
601 std::vector<MachineBasicBlock *> DomBlocks = {&MBB};
602 for (MachineInstr &Use : MRI->use_instructions(DstReg))
603 DomBlocks.push_back(Use.getParent());
605 MachineBasicBlock *PostDomBound =
606 PDT->findNearestCommonDominator(DomBlocks);
607 unsigned FoundLoopLevel = LF.findLoop(PostDomBound);
609 SSAUpdater.Initialize(DstReg);
611 if (FoundLoopLevel) {
612 LF.addLoopEntries(FoundLoopLevel, SSAUpdater, IncomingBlocks);
614 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
615 IncomingUpdated.push_back(createLaneMaskReg(*MF));
616 SSAUpdater.AddAvailableValue(IncomingBlocks[i],
617 IncomingUpdated.back());
620 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
621 MachineBasicBlock &IMBB = *IncomingBlocks[i];
623 IMBB, getSaluInsertionAtEnd(IMBB), {}, IncomingUpdated[i],
624 SSAUpdater.GetValueInMiddleOfBlock(&IMBB), IncomingRegs[i]);
627 // The phi is not observed from outside a loop. Use a more accurate
629 PIA.analyze(MBB, IncomingBlocks);
631 for (MachineBasicBlock *MBB : PIA.predecessors())
632 SSAUpdater.AddAvailableValue(MBB, insertUndefLaneMask(*MBB));
634 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
635 MachineBasicBlock &IMBB = *IncomingBlocks[i];
636 if (PIA.isSource(IMBB)) {
637 IncomingUpdated.push_back(0);
638 SSAUpdater.AddAvailableValue(&IMBB, IncomingRegs[i]);
640 IncomingUpdated.push_back(createLaneMaskReg(*MF));
641 SSAUpdater.AddAvailableValue(&IMBB, IncomingUpdated.back());
645 for (unsigned i = 0; i < IncomingRegs.size(); ++i) {
646 if (!IncomingUpdated[i])
649 MachineBasicBlock &IMBB = *IncomingBlocks[i];
651 IMBB, getSaluInsertionAtEnd(IMBB), {}, IncomingUpdated[i],
652 SSAUpdater.GetValueInMiddleOfBlock(&IMBB), IncomingRegs[i]);
656 unsigned NewReg = SSAUpdater.GetValueInMiddleOfBlock(&MBB);
657 if (NewReg != DstReg) {
658 MRI->replaceRegWith(NewReg, DstReg);
659 MI->eraseFromParent();
662 IncomingBlocks.clear();
663 IncomingRegs.clear();
664 IncomingUpdated.clear();
668 void SILowerI1Copies::lowerCopiesToI1() {
669 MachineSSAUpdater SSAUpdater(*MF);
670 LoopFinder LF(*DT, *PDT);
671 SmallVector<MachineInstr *, 4> DeadCopies;
673 for (MachineBasicBlock &MBB : *MF) {
676 for (MachineInstr &MI : MBB) {
677 if (MI.getOpcode() != AMDGPU::IMPLICIT_DEF &&
678 MI.getOpcode() != AMDGPU::COPY)
681 Register DstReg = MI.getOperand(0).getReg();
682 if (!isVreg1(DstReg))
685 if (MRI->use_empty(DstReg)) {
686 DeadCopies.push_back(&MI);
690 LLVM_DEBUG(dbgs() << "Lower Other: " << MI);
692 MRI->setRegClass(DstReg, IsWave32 ? &AMDGPU::SReg_32RegClass
693 : &AMDGPU::SReg_64RegClass);
694 if (MI.getOpcode() == AMDGPU::IMPLICIT_DEF)
697 DebugLoc DL = MI.getDebugLoc();
698 Register SrcReg = MI.getOperand(1).getReg();
699 assert(!MI.getOperand(1).getSubReg());
701 if (!Register::isVirtualRegister(SrcReg) ||
702 (!isLaneMaskReg(SrcReg) && !isVreg1(SrcReg))) {
703 assert(TII->getRegisterInfo().getRegSizeInBits(SrcReg, *MRI) == 32);
704 unsigned TmpReg = createLaneMaskReg(*MF);
705 BuildMI(MBB, MI, DL, TII->get(AMDGPU::V_CMP_NE_U32_e64), TmpReg)
708 MI.getOperand(1).setReg(TmpReg);
712 // Defs in a loop that are observed outside the loop must be transformed
713 // into appropriate bit manipulation.
714 std::vector<MachineBasicBlock *> DomBlocks = {&MBB};
715 for (MachineInstr &Use : MRI->use_instructions(DstReg))
716 DomBlocks.push_back(Use.getParent());
718 MachineBasicBlock *PostDomBound =
719 PDT->findNearestCommonDominator(DomBlocks);
720 unsigned FoundLoopLevel = LF.findLoop(PostDomBound);
721 if (FoundLoopLevel) {
722 SSAUpdater.Initialize(DstReg);
723 SSAUpdater.AddAvailableValue(&MBB, DstReg);
724 LF.addLoopEntries(FoundLoopLevel, SSAUpdater);
726 buildMergeLaneMasks(MBB, MI, DL, DstReg,
727 SSAUpdater.GetValueInMiddleOfBlock(&MBB), SrcReg);
728 DeadCopies.push_back(&MI);
732 for (MachineInstr *MI : DeadCopies)
733 MI->eraseFromParent();
738 bool SILowerI1Copies::isConstantLaneMask(unsigned Reg, bool &Val) const {
739 const MachineInstr *MI;
741 MI = MRI->getUniqueVRegDef(Reg);
742 if (MI->getOpcode() != AMDGPU::COPY)
745 Reg = MI->getOperand(1).getReg();
746 if (!Register::isVirtualRegister(Reg))
748 if (!isLaneMaskReg(Reg))
752 if (MI->getOpcode() != MovOp)
755 if (!MI->getOperand(1).isImm())
758 int64_t Imm = MI->getOperand(1).getImm();
771 static void instrDefsUsesSCC(const MachineInstr &MI, bool &Def, bool &Use) {
775 for (const MachineOperand &MO : MI.operands()) {
776 if (MO.isReg() && MO.getReg() == AMDGPU::SCC) {
785 /// Return a point at the end of the given \p MBB to insert SALU instructions
786 /// for lane mask calculation. Take terminators and SCC into account.
787 MachineBasicBlock::iterator
788 SILowerI1Copies::getSaluInsertionAtEnd(MachineBasicBlock &MBB) const {
789 auto InsertionPt = MBB.getFirstTerminator();
790 bool TerminatorsUseSCC = false;
791 for (auto I = InsertionPt, E = MBB.end(); I != E; ++I) {
793 instrDefsUsesSCC(*I, DefsSCC, TerminatorsUseSCC);
794 if (TerminatorsUseSCC || DefsSCC)
798 if (!TerminatorsUseSCC)
801 while (InsertionPt != MBB.begin()) {
805 instrDefsUsesSCC(*InsertionPt, DefSCC, UseSCC);
810 // We should have at least seen an IMPLICIT_DEF or COPY
811 llvm_unreachable("SCC used by terminator but no def in block");
814 void SILowerI1Copies::buildMergeLaneMasks(MachineBasicBlock &MBB,
815 MachineBasicBlock::iterator I,
816 const DebugLoc &DL, unsigned DstReg,
817 unsigned PrevReg, unsigned CurReg) {
819 bool PrevConstant = isConstantLaneMask(PrevReg, PrevVal);
821 bool CurConstant = isConstantLaneMask(CurReg, CurVal);
823 if (PrevConstant && CurConstant) {
824 if (PrevVal == CurVal) {
825 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(CurReg);
827 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg).addReg(ExecReg);
829 BuildMI(MBB, I, DL, TII->get(XorOp), DstReg)
836 unsigned PrevMaskedReg = 0;
837 unsigned CurMaskedReg = 0;
839 if (CurConstant && CurVal) {
840 PrevMaskedReg = PrevReg;
842 PrevMaskedReg = createLaneMaskReg(*MF);
843 BuildMI(MBB, I, DL, TII->get(AndN2Op), PrevMaskedReg)
849 // TODO: check whether CurReg is already masked by EXEC
850 if (PrevConstant && PrevVal) {
851 CurMaskedReg = CurReg;
853 CurMaskedReg = createLaneMaskReg(*MF);
854 BuildMI(MBB, I, DL, TII->get(AndOp), CurMaskedReg)
860 if (PrevConstant && !PrevVal) {
861 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg)
862 .addReg(CurMaskedReg);
863 } else if (CurConstant && !CurVal) {
864 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), DstReg)
865 .addReg(PrevMaskedReg);
866 } else if (PrevConstant && PrevVal) {
867 BuildMI(MBB, I, DL, TII->get(OrN2Op), DstReg)
868 .addReg(CurMaskedReg)
871 BuildMI(MBB, I, DL, TII->get(OrOp), DstReg)
872 .addReg(PrevMaskedReg)
873 .addReg(CurMaskedReg ? CurMaskedReg : ExecReg);