1 //=- AArch64RedundantCopyElimination.cpp - Remove useless copy for AArch64 -=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 // This pass removes unnecessary copies/moves in BBs based on a dominating
11 // We handle three cases:
12 // 1. For BBs that are targets of CBZ/CBNZ instructions, we know the value of
13 // the CBZ/CBNZ source register is zero on the taken/not-taken path. For
14 // instance, the copy instruction in the code below can be removed because
15 // the CBZW jumps to %bb.2 when w0 is zero.
20 // mov w0, wzr ; <-- redundant
22 // 2. If the flag setting instruction defines a register other than WZR/XZR, we
23 // can remove a zero copy in some cases.
30 // mov w0, wzr ; <-- redundant
34 // 3. Finally, if the flag setting instruction is a comparison against a
35 // constant (i.e., ADDS[W|X]ri, SUBS[W|X]ri), we can remove a mov immediate
42 // orr x0, xzr, #0x1 ; <-- redundant
44 // This pass should be run after register allocation.
46 // FIXME: This could also be extended to check the whole dominance subtree below
47 // the comparison if the compile time regression is acceptable.
49 // FIXME: Add support for handling CCMP instructions.
50 // FIXME: If the known register value is zero, we should be able to rewrite uses
51 // to use WZR/XZR directly in some cases.
52 //===----------------------------------------------------------------------===//
54 #include "llvm/ADT/Optional.h"
55 #include "llvm/ADT/SetVector.h"
56 #include "llvm/ADT/Statistic.h"
57 #include "llvm/ADT/iterator_range.h"
58 #include "llvm/CodeGen/MachineFunctionPass.h"
59 #include "llvm/CodeGen/MachineRegisterInfo.h"
60 #include "llvm/Support/Debug.h"
64 #define DEBUG_TYPE "aarch64-copyelim"
66 STATISTIC(NumCopiesRemoved, "Number of copies removed.");
69 class AArch64RedundantCopyElimination : public MachineFunctionPass {
70 const MachineRegisterInfo *MRI;
71 const TargetRegisterInfo *TRI;
73 // DomBBClobberedRegs is used when computing known values in the dominating
75 BitVector DomBBClobberedRegs;
77 // OptBBClobberedRegs is used when optimizing away redundant copies/moves.
78 BitVector OptBBClobberedRegs;
82 AArch64RedundantCopyElimination() : MachineFunctionPass(ID) {
83 initializeAArch64RedundantCopyEliminationPass(
84 *PassRegistry::getPassRegistry());
90 RegImm(MCPhysReg Reg, int32_t Imm) : Reg(Reg), Imm(Imm) {}
93 bool knownRegValInBlock(MachineInstr &CondBr, MachineBasicBlock *MBB,
94 SmallVectorImpl<RegImm> &KnownRegs,
95 MachineBasicBlock::iterator &FirstUse);
96 bool optimizeBlock(MachineBasicBlock *MBB);
97 bool runOnMachineFunction(MachineFunction &MF) override;
98 MachineFunctionProperties getRequiredProperties() const override {
99 return MachineFunctionProperties().set(
100 MachineFunctionProperties::Property::NoVRegs);
102 StringRef getPassName() const override {
103 return "AArch64 Redundant Copy Elimination";
106 char AArch64RedundantCopyElimination::ID = 0;
109 INITIALIZE_PASS(AArch64RedundantCopyElimination, "aarch64-copyelim",
110 "AArch64 redundant copy elimination pass", false, false)
112 /// Remember what registers the specified instruction modifies.
113 static void trackRegDefs(const MachineInstr &MI, BitVector &ClobberedRegs,
114 const TargetRegisterInfo *TRI) {
115 for (const MachineOperand &MO : MI.operands()) {
116 if (MO.isRegMask()) {
117 ClobberedRegs.setBitsNotInMask(MO.getRegMask());
123 unsigned Reg = MO.getReg();
129 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
130 ClobberedRegs.set(*AI);
134 /// It's possible to determine the value of a register based on a dominating
135 /// condition. To do so, this function checks to see if the basic block \p MBB
136 /// is the target of a conditional branch \p CondBr with an equality comparison.
137 /// If the branch is a CBZ/CBNZ, we know the value of its source operand is zero
138 /// in \p MBB for some cases. Otherwise, we find and inspect the NZCV setting
139 /// instruction (e.g., SUBS, ADDS). If this instruction defines a register
140 /// other than WZR/XZR, we know the value of the destination register is zero in
141 /// \p MMB for some cases. In addition, if the NZCV setting instruction is
142 /// comparing against a constant we know the other source register is equal to
143 /// the constant in \p MBB for some cases. If we find any constant values, push
144 /// a physical register and constant value pair onto the KnownRegs vector and
145 /// return true. Otherwise, return false if no known values were found.
146 bool AArch64RedundantCopyElimination::knownRegValInBlock(
147 MachineInstr &CondBr, MachineBasicBlock *MBB,
148 SmallVectorImpl<RegImm> &KnownRegs, MachineBasicBlock::iterator &FirstUse) {
149 unsigned Opc = CondBr.getOpcode();
151 // Check if the current basic block is the target block to which the
152 // CBZ/CBNZ instruction jumps when its Wt/Xt is zero.
153 if (((Opc == AArch64::CBZW || Opc == AArch64::CBZX) &&
154 MBB == CondBr.getOperand(1).getMBB()) ||
155 ((Opc == AArch64::CBNZW || Opc == AArch64::CBNZX) &&
156 MBB != CondBr.getOperand(1).getMBB())) {
158 KnownRegs.push_back(RegImm(CondBr.getOperand(0).getReg(), 0));
162 // Otherwise, must be a conditional branch.
163 if (Opc != AArch64::Bcc)
166 // Must be an equality check (i.e., == or !=).
167 AArch64CC::CondCode CC = (AArch64CC::CondCode)CondBr.getOperand(0).getImm();
168 if (CC != AArch64CC::EQ && CC != AArch64CC::NE)
171 MachineBasicBlock *BrTarget = CondBr.getOperand(1).getMBB();
172 if ((CC == AArch64CC::EQ && BrTarget != MBB) ||
173 (CC == AArch64CC::NE && BrTarget == MBB))
176 // Stop if we get to the beginning of PredMBB.
177 MachineBasicBlock *PredMBB = *MBB->pred_begin();
178 assert(PredMBB == CondBr.getParent() &&
179 "Conditional branch not in predecessor block!");
180 if (CondBr == PredMBB->begin())
183 // Registers clobbered in PredMBB between CondBr instruction and current
184 // instruction being checked in loop.
185 DomBBClobberedRegs.reset();
187 // Find compare instruction that sets NZCV used by CondBr.
188 MachineBasicBlock::reverse_iterator RIt = CondBr.getReverseIterator();
189 for (MachineInstr &PredI : make_range(std::next(RIt), PredMBB->rend())) {
192 switch (PredI.getOpcode()) {
196 // CMN is an alias for ADDS with a dead destination register.
197 case AArch64::ADDSWri:
198 case AArch64::ADDSXri:
201 // CMP is an alias for SUBS with a dead destination register.
202 case AArch64::SUBSWri:
203 case AArch64::SUBSXri: {
204 // Sometimes the first operand is a FrameIndex. Bail if tht happens.
205 if (!PredI.getOperand(1).isReg())
207 MCPhysReg DstReg = PredI.getOperand(0).getReg();
208 MCPhysReg SrcReg = PredI.getOperand(1).getReg();
211 // If we're comparing against a non-symbolic immediate and the source
212 // register of the compare is not modified (including a self-clobbering
213 // compare) between the compare and conditional branch we known the value
214 // of the 1st source operand.
215 if (PredI.getOperand(2).isImm() && !DomBBClobberedRegs[SrcReg] &&
217 // We've found the instruction that sets NZCV.
218 int32_t KnownImm = PredI.getOperand(2).getImm();
219 int32_t Shift = PredI.getOperand(3).getImm();
222 KnownImm = -KnownImm;
224 KnownRegs.push_back(RegImm(SrcReg, KnownImm));
228 // If this instructions defines something other than WZR/XZR, we know it's
229 // result is zero in some cases.
230 if (DstReg == AArch64::WZR || DstReg == AArch64::XZR)
233 // The destination register must not be modified between the NZCV setting
234 // instruction and the conditional branch.
235 if (DomBBClobberedRegs[DstReg])
239 KnownRegs.push_back(RegImm(DstReg, 0));
243 // Look for NZCV setting instructions that define something other than
245 case AArch64::ADCSWr:
246 case AArch64::ADCSXr:
247 case AArch64::ADDSWrr:
248 case AArch64::ADDSWrs:
249 case AArch64::ADDSWrx:
250 case AArch64::ADDSXrr:
251 case AArch64::ADDSXrs:
252 case AArch64::ADDSXrx:
253 case AArch64::ADDSXrx64:
254 case AArch64::ANDSWri:
255 case AArch64::ANDSWrr:
256 case AArch64::ANDSWrs:
257 case AArch64::ANDSXri:
258 case AArch64::ANDSXrr:
259 case AArch64::ANDSXrs:
260 case AArch64::BICSWrr:
261 case AArch64::BICSWrs:
262 case AArch64::BICSXrs:
263 case AArch64::BICSXrr:
264 case AArch64::SBCSWr:
265 case AArch64::SBCSXr:
266 case AArch64::SUBSWrr:
267 case AArch64::SUBSWrs:
268 case AArch64::SUBSWrx:
269 case AArch64::SUBSXrr:
270 case AArch64::SUBSXrs:
271 case AArch64::SUBSXrx:
272 case AArch64::SUBSXrx64: {
273 MCPhysReg DstReg = PredI.getOperand(0).getReg();
274 if (DstReg == AArch64::WZR || DstReg == AArch64::XZR)
277 // The destination register of the NZCV setting instruction must not be
278 // modified before the conditional branch.
279 if (DomBBClobberedRegs[DstReg])
282 // We've found the instruction that sets NZCV whose DstReg == 0.
284 KnownRegs.push_back(RegImm(DstReg, 0));
289 // Bail if we see an instruction that defines NZCV that we don't handle.
290 if (PredI.definesRegister(AArch64::NZCV))
293 // Track clobbered registers.
294 trackRegDefs(PredI, DomBBClobberedRegs, TRI);
299 bool AArch64RedundantCopyElimination::optimizeBlock(MachineBasicBlock *MBB) {
300 // Check if the current basic block has a single predecessor.
301 if (MBB->pred_size() != 1)
304 // Check if the predecessor has two successors, implying the block ends in a
305 // conditional branch.
306 MachineBasicBlock *PredMBB = *MBB->pred_begin();
307 if (PredMBB->succ_size() != 2)
310 MachineBasicBlock::iterator CondBr = PredMBB->getLastNonDebugInstr();
311 if (CondBr == PredMBB->end())
314 // Keep track of the earliest point in the PredMBB block where kill markers
315 // need to be removed if a COPY is removed.
316 MachineBasicBlock::iterator FirstUse;
317 // After calling knownRegValInBlock, FirstUse will either point to a CBZ/CBNZ
318 // or a compare (i.e., SUBS). In the latter case, we must take care when
319 // updating FirstUse when scanning for COPY instructions. In particular, if
320 // there's a COPY in between the compare and branch the COPY should not
322 bool SeenFirstUse = false;
323 // Registers that contain a known value at the start of MBB.
324 SmallVector<RegImm, 4> KnownRegs;
326 MachineBasicBlock::iterator Itr = std::next(CondBr);
330 if (!knownRegValInBlock(*Itr, MBB, KnownRegs, FirstUse))
333 // Reset the clobber list.
334 OptBBClobberedRegs.reset();
336 // Look backward in PredMBB for COPYs from the known reg to find other
337 // registers that are known to be a constant value.
338 for (auto PredI = Itr;; --PredI) {
339 if (FirstUse == PredI)
342 if (PredI->isCopy()) {
343 MCPhysReg CopyDstReg = PredI->getOperand(0).getReg();
344 MCPhysReg CopySrcReg = PredI->getOperand(1).getReg();
345 for (auto &KnownReg : KnownRegs) {
346 if (OptBBClobberedRegs[KnownReg.Reg])
348 // If we have X = COPY Y, and Y is known to be zero, then now X is
350 if (CopySrcReg == KnownReg.Reg && !OptBBClobberedRegs[CopyDstReg]) {
351 KnownRegs.push_back(RegImm(CopyDstReg, KnownReg.Imm));
356 // If we have X = COPY Y, and X is known to be zero, then now Y is
358 if (CopyDstReg == KnownReg.Reg && !OptBBClobberedRegs[CopySrcReg]) {
359 KnownRegs.push_back(RegImm(CopySrcReg, KnownReg.Imm));
367 // Stop if we get to the beginning of PredMBB.
368 if (PredI == PredMBB->begin())
371 trackRegDefs(*PredI, OptBBClobberedRegs, TRI);
372 // Stop if all of the known-zero regs have been clobbered.
373 if (all_of(KnownRegs, [&](RegImm KnownReg) {
374 return OptBBClobberedRegs[KnownReg.Reg];
380 } while (Itr != PredMBB->begin() && Itr->isTerminator());
382 // We've not found a registers with a known value, time to bail out.
383 if (KnownRegs.empty())
386 bool Changed = false;
387 // UsedKnownRegs is the set of KnownRegs that have had uses added to MBB.
388 SmallSetVector<unsigned, 4> UsedKnownRegs;
389 MachineBasicBlock::iterator LastChange = MBB->begin();
390 // Remove redundant copy/move instructions unless KnownReg is modified.
391 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;) {
392 MachineInstr *MI = &*I;
394 bool RemovedMI = false;
395 bool IsCopy = MI->isCopy();
396 bool IsMoveImm = MI->isMoveImmediate();
397 if (IsCopy || IsMoveImm) {
398 MCPhysReg DefReg = MI->getOperand(0).getReg();
399 MCPhysReg SrcReg = IsCopy ? MI->getOperand(1).getReg() : 0;
400 int64_t SrcImm = IsMoveImm ? MI->getOperand(1).getImm() : 0;
401 if (!MRI->isReserved(DefReg) &&
402 ((IsCopy && (SrcReg == AArch64::XZR || SrcReg == AArch64::WZR)) ||
404 for (RegImm &KnownReg : KnownRegs) {
405 if (KnownReg.Reg != DefReg &&
406 !TRI->isSuperRegister(DefReg, KnownReg.Reg))
409 // For a copy, the known value must be a zero.
410 if (IsCopy && KnownReg.Imm != 0)
414 // For a move immediate, the known immediate must match the source
416 if (KnownReg.Imm != SrcImm)
419 // Don't remove a move immediate that implicitly defines the upper
420 // bits when only the lower 32 bits are known.
421 MCPhysReg CmpReg = KnownReg.Reg;
422 if (any_of(MI->implicit_operands(), [CmpReg](MachineOperand &O) {
423 return !O.isDead() && O.isReg() && O.isDef() &&
424 O.getReg() != CmpReg;
430 DEBUG(dbgs() << "Remove redundant Copy : " << *MI);
432 DEBUG(dbgs() << "Remove redundant Move : " << *MI);
434 MI->eraseFromParent();
438 UsedKnownRegs.insert(KnownReg.Reg);
445 // Skip to the next instruction if we removed the COPY/MovImm.
449 // Remove any regs the MI clobbers from the KnownConstRegs set.
450 for (unsigned RI = 0; RI < KnownRegs.size();)
451 if (MI->modifiesRegister(KnownRegs[RI].Reg, TRI)) {
452 std::swap(KnownRegs[RI], KnownRegs[KnownRegs.size() - 1]);
453 KnownRegs.pop_back();
454 // Don't increment RI since we need to now check the swapped-in
460 // Continue until the KnownRegs set is empty.
461 if (KnownRegs.empty())
468 // Add newly used regs to the block's live-in list if they aren't there
470 for (MCPhysReg KnownReg : UsedKnownRegs)
471 if (!MBB->isLiveIn(KnownReg))
472 MBB->addLiveIn(KnownReg);
474 // Clear kills in the range where changes were made. This is conservative,
475 // but should be okay since kill markers are being phased out.
476 DEBUG(dbgs() << "Clearing kill flags.\n\tFirstUse: " << *FirstUse
477 << "\tLastChange: " << *LastChange);
478 for (MachineInstr &MMI : make_range(FirstUse, PredMBB->end()))
480 for (MachineInstr &MMI : make_range(MBB->begin(), LastChange))
486 bool AArch64RedundantCopyElimination::runOnMachineFunction(
487 MachineFunction &MF) {
488 if (skipFunction(MF.getFunction()))
490 TRI = MF.getSubtarget().getRegisterInfo();
491 MRI = &MF.getRegInfo();
493 // Resize the clobber register bitfield trackers. We do this once per
495 DomBBClobberedRegs.resize(TRI->getNumRegs());
496 OptBBClobberedRegs.resize(TRI->getNumRegs());
498 bool Changed = false;
499 for (MachineBasicBlock &MBB : MF)
500 Changed |= optimizeBlock(&MBB);
504 FunctionPass *llvm::createAArch64RedundantCopyEliminationPass() {
505 return new AArch64RedundantCopyElimination();