1 //===--- HexagonBitSimplify.cpp -------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "hexbit"
12 #include "HexagonBitTracker.h"
13 #include "HexagonTargetMachine.h"
14 #include "llvm/ADT/BitVector.h"
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/CodeGen/MachineBasicBlock.h"
20 #include "llvm/CodeGen/MachineDominators.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineFunctionPass.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineOperand.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/IR/DebugLoc.h"
29 #include "llvm/MC/MCInstrDesc.h"
30 #include "llvm/Pass.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Compiler.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/MathExtras.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Target/TargetRegisterInfo.h"
47 static cl::opt<bool> PreserveTiedOps("hexbit-keep-tied", cl::Hidden,
48 cl::init(true), cl::desc("Preserve subregisters in tied operands"));
52 void initializeHexagonBitSimplifyPass(PassRegistry& Registry);
53 FunctionPass *createHexagonBitSimplify();
55 } // end namespace llvm
59 // Set of virtual registers, based on BitVector.
60 struct RegisterSet : private BitVector {
61 RegisterSet() = default;
62 explicit RegisterSet(unsigned s, bool t = false) : BitVector(s, t) {}
63 RegisterSet(const RegisterSet &RS) = default;
65 using BitVector::clear;
66 using BitVector::count;
68 unsigned find_first() const {
69 int First = BitVector::find_first();
75 unsigned find_next(unsigned Prev) const {
76 int Next = BitVector::find_next(v2x(Prev));
82 RegisterSet &insert(unsigned R) {
83 unsigned Idx = v2x(R);
85 return static_cast<RegisterSet&>(BitVector::set(Idx));
87 RegisterSet &remove(unsigned R) {
88 unsigned Idx = v2x(R);
91 return static_cast<RegisterSet&>(BitVector::reset(Idx));
94 RegisterSet &insert(const RegisterSet &Rs) {
95 return static_cast<RegisterSet&>(BitVector::operator|=(Rs));
97 RegisterSet &remove(const RegisterSet &Rs) {
98 return static_cast<RegisterSet&>(BitVector::reset(Rs));
101 reference operator[](unsigned R) {
102 unsigned Idx = v2x(R);
104 return BitVector::operator[](Idx);
106 bool operator[](unsigned R) const {
107 unsigned Idx = v2x(R);
108 assert(Idx < size());
109 return BitVector::operator[](Idx);
111 bool has(unsigned R) const {
112 unsigned Idx = v2x(R);
115 return BitVector::test(Idx);
119 return !BitVector::any();
121 bool includes(const RegisterSet &Rs) const {
122 // A.BitVector::test(B) <=> A-B != {}
123 return !Rs.BitVector::test(*this);
125 bool intersects(const RegisterSet &Rs) const {
126 return BitVector::anyCommon(Rs);
130 void ensure(unsigned Idx) {
132 resize(std::max(Idx+1, 32U));
135 static inline unsigned v2x(unsigned v) {
136 return TargetRegisterInfo::virtReg2Index(v);
139 static inline unsigned x2v(unsigned x) {
140 return TargetRegisterInfo::index2VirtReg(x);
145 PrintRegSet(const RegisterSet &S, const TargetRegisterInfo *RI)
148 friend raw_ostream &operator<< (raw_ostream &OS,
149 const PrintRegSet &P);
152 const RegisterSet &RS;
153 const TargetRegisterInfo *TRI;
156 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P)
157 LLVM_ATTRIBUTE_UNUSED;
158 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) {
160 for (unsigned R = P.RS.find_first(); R; R = P.RS.find_next(R))
161 OS << ' ' << PrintReg(R, P.TRI);
166 class Transformation;
168 class HexagonBitSimplify : public MachineFunctionPass {
172 HexagonBitSimplify() : MachineFunctionPass(ID), MDT(nullptr) {
173 initializeHexagonBitSimplifyPass(*PassRegistry::getPassRegistry());
176 StringRef getPassName() const override {
177 return "Hexagon bit simplification";
180 void getAnalysisUsage(AnalysisUsage &AU) const override {
181 AU.addRequired<MachineDominatorTree>();
182 AU.addPreserved<MachineDominatorTree>();
183 MachineFunctionPass::getAnalysisUsage(AU);
186 bool runOnMachineFunction(MachineFunction &MF) override;
188 static void getInstrDefs(const MachineInstr &MI, RegisterSet &Defs);
189 static void getInstrUses(const MachineInstr &MI, RegisterSet &Uses);
190 static bool isEqual(const BitTracker::RegisterCell &RC1, uint16_t B1,
191 const BitTracker::RegisterCell &RC2, uint16_t B2, uint16_t W);
192 static bool isZero(const BitTracker::RegisterCell &RC, uint16_t B,
194 static bool getConst(const BitTracker::RegisterCell &RC, uint16_t B,
195 uint16_t W, uint64_t &U);
196 static bool replaceReg(unsigned OldR, unsigned NewR,
197 MachineRegisterInfo &MRI);
198 static bool getSubregMask(const BitTracker::RegisterRef &RR,
199 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI);
200 static bool replaceRegWithSub(unsigned OldR, unsigned NewR,
201 unsigned NewSR, MachineRegisterInfo &MRI);
202 static bool replaceSubWithSub(unsigned OldR, unsigned OldSR,
203 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI);
204 static bool parseRegSequence(const MachineInstr &I,
205 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH,
206 const MachineRegisterInfo &MRI);
208 static bool getUsedBitsInStore(unsigned Opc, BitVector &Bits,
210 static bool getUsedBits(unsigned Opc, unsigned OpN, BitVector &Bits,
211 uint16_t Begin, const HexagonInstrInfo &HII);
213 static const TargetRegisterClass *getFinalVRegClass(
214 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI);
215 static bool isTransparentCopy(const BitTracker::RegisterRef &RD,
216 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI);
219 MachineDominatorTree *MDT;
221 bool visitBlock(MachineBasicBlock &B, Transformation &T, RegisterSet &AVs);
222 static bool hasTiedUse(unsigned Reg, MachineRegisterInfo &MRI,
223 unsigned NewSub = Hexagon::NoSubRegister);
226 char HexagonBitSimplify::ID = 0;
227 typedef HexagonBitSimplify HBS;
229 // The purpose of this class is to provide a common facility to traverse
230 // the function top-down or bottom-up via the dominator tree, and keep
231 // track of the available registers.
232 class Transformation {
236 Transformation(bool TD) : TopDown(TD) {}
237 virtual ~Transformation() = default;
239 virtual bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) = 0;
242 } // end anonymous namespace
244 INITIALIZE_PASS_BEGIN(HexagonBitSimplify, "hexbit",
245 "Hexagon bit simplification", false, false)
246 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
247 INITIALIZE_PASS_END(HexagonBitSimplify, "hexbit",
248 "Hexagon bit simplification", false, false)
250 bool HexagonBitSimplify::visitBlock(MachineBasicBlock &B, Transformation &T,
252 MachineDomTreeNode *N = MDT->getNode(&B);
253 typedef GraphTraits<MachineDomTreeNode*> GTN;
254 bool Changed = false;
257 Changed = T.processBlock(B, AVs);
261 getInstrDefs(I, Defs);
262 RegisterSet NewAVs = AVs;
265 for (auto I = GTN::child_begin(N), E = GTN::child_end(N); I != E; ++I) {
266 MachineBasicBlock *SB = (*I)->getBlock();
267 Changed |= visitBlock(*SB, T, NewAVs);
270 Changed |= T.processBlock(B, AVs);
276 // Utility functions:
278 void HexagonBitSimplify::getInstrDefs(const MachineInstr &MI,
280 for (auto &Op : MI.operands()) {
281 if (!Op.isReg() || !Op.isDef())
283 unsigned R = Op.getReg();
284 if (!TargetRegisterInfo::isVirtualRegister(R))
290 void HexagonBitSimplify::getInstrUses(const MachineInstr &MI,
292 for (auto &Op : MI.operands()) {
293 if (!Op.isReg() || !Op.isUse())
295 unsigned R = Op.getReg();
296 if (!TargetRegisterInfo::isVirtualRegister(R))
302 // Check if all the bits in range [B, E) in both cells are equal.
303 bool HexagonBitSimplify::isEqual(const BitTracker::RegisterCell &RC1,
304 uint16_t B1, const BitTracker::RegisterCell &RC2, uint16_t B2,
306 for (uint16_t i = 0; i < W; ++i) {
307 // If RC1[i] is "bottom", it cannot be proven equal to RC2[i].
308 if (RC1[B1+i].Type == BitTracker::BitValue::Ref && RC1[B1+i].RefI.Reg == 0)
311 if (RC2[B2+i].Type == BitTracker::BitValue::Ref && RC2[B2+i].RefI.Reg == 0)
313 if (RC1[B1+i] != RC2[B2+i])
319 bool HexagonBitSimplify::isZero(const BitTracker::RegisterCell &RC,
320 uint16_t B, uint16_t W) {
321 assert(B < RC.width() && B+W <= RC.width());
322 for (uint16_t i = B; i < B+W; ++i)
328 bool HexagonBitSimplify::getConst(const BitTracker::RegisterCell &RC,
329 uint16_t B, uint16_t W, uint64_t &U) {
330 assert(B < RC.width() && B+W <= RC.width());
332 for (uint16_t i = B+W; i > B; --i) {
333 const BitTracker::BitValue &BV = RC[i-1];
344 bool HexagonBitSimplify::replaceReg(unsigned OldR, unsigned NewR,
345 MachineRegisterInfo &MRI) {
346 if (!TargetRegisterInfo::isVirtualRegister(OldR) ||
347 !TargetRegisterInfo::isVirtualRegister(NewR))
349 auto Begin = MRI.use_begin(OldR), End = MRI.use_end();
351 for (auto I = Begin; I != End; I = NextI) {
352 NextI = std::next(I);
358 bool HexagonBitSimplify::replaceRegWithSub(unsigned OldR, unsigned NewR,
359 unsigned NewSR, MachineRegisterInfo &MRI) {
360 if (!TargetRegisterInfo::isVirtualRegister(OldR) ||
361 !TargetRegisterInfo::isVirtualRegister(NewR))
363 if (hasTiedUse(OldR, MRI, NewSR))
365 auto Begin = MRI.use_begin(OldR), End = MRI.use_end();
367 for (auto I = Begin; I != End; I = NextI) {
368 NextI = std::next(I);
375 bool HexagonBitSimplify::replaceSubWithSub(unsigned OldR, unsigned OldSR,
376 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI) {
377 if (!TargetRegisterInfo::isVirtualRegister(OldR) ||
378 !TargetRegisterInfo::isVirtualRegister(NewR))
380 if (OldSR != NewSR && hasTiedUse(OldR, MRI, NewSR))
382 auto Begin = MRI.use_begin(OldR), End = MRI.use_end();
384 for (auto I = Begin; I != End; I = NextI) {
385 NextI = std::next(I);
386 if (I->getSubReg() != OldSR)
394 // For a register ref (pair Reg:Sub), set Begin to the position of the LSB
395 // of Sub in Reg, and set Width to the size of Sub in bits. Return true,
396 // if this succeeded, otherwise return false.
397 bool HexagonBitSimplify::getSubregMask(const BitTracker::RegisterRef &RR,
398 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI) {
399 const TargetRegisterClass *RC = MRI.getRegClass(RR.Reg);
402 Width = RC->getSize()*8;
408 switch (RC->getID()) {
409 case Hexagon::DoubleRegsRegClassID:
410 case Hexagon::VecDblRegsRegClassID:
411 case Hexagon::VecDblRegs128BRegClassID:
412 Width = RC->getSize()*8 / 2;
413 if (RR.Sub == Hexagon::isub_hi || RR.Sub == Hexagon::vsub_hi)
423 // For a REG_SEQUENCE, set SL to the low subregister and SH to the high
425 bool HexagonBitSimplify::parseRegSequence(const MachineInstr &I,
426 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH,
427 const MachineRegisterInfo &MRI) {
428 assert(I.getOpcode() == TargetOpcode::REG_SEQUENCE);
429 unsigned Sub1 = I.getOperand(2).getImm(), Sub2 = I.getOperand(4).getImm();
430 auto *DstRC = MRI.getRegClass(I.getOperand(0).getReg());
431 auto &HRI = static_cast<const HexagonRegisterInfo&>(
432 *MRI.getTargetRegisterInfo());
433 unsigned SubLo = HRI.getHexagonSubRegIndex(DstRC, Hexagon::ps_sub_lo);
434 unsigned SubHi = HRI.getHexagonSubRegIndex(DstRC, Hexagon::ps_sub_hi);
435 assert((Sub1 == SubLo && Sub2 == SubHi) || (Sub1 == SubHi && Sub2 == SubLo));
436 if (Sub1 == SubLo && Sub2 == SubHi) {
437 SL = I.getOperand(1);
438 SH = I.getOperand(3);
441 if (Sub1 == SubHi && Sub2 == SubLo) {
442 SH = I.getOperand(1);
443 SL = I.getOperand(3);
449 // All stores (except 64-bit stores) take a 32-bit register as the source
450 // of the value to be stored. If the instruction stores into a location
451 // that is shorter than 32 bits, some bits of the source register are not
452 // used. For each store instruction, calculate the set of used bits in
453 // the source register, and set appropriate bits in Bits. Return true if
454 // the bits are calculated, false otherwise.
455 bool HexagonBitSimplify::getUsedBitsInStore(unsigned Opc, BitVector &Bits,
457 using namespace Hexagon;
461 case S2_storerb_io: // memb(Rs32+#s11:0)=Rt32
462 case S2_storerbnew_io: // memb(Rs32+#s11:0)=Nt8.new
463 case S2_pstorerbt_io: // if (Pv4) memb(Rs32+#u6:0)=Rt32
464 case S2_pstorerbf_io: // if (!Pv4) memb(Rs32+#u6:0)=Rt32
465 case S4_pstorerbtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Rt32
466 case S4_pstorerbfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Rt32
467 case S2_pstorerbnewt_io: // if (Pv4) memb(Rs32+#u6:0)=Nt8.new
468 case S2_pstorerbnewf_io: // if (!Pv4) memb(Rs32+#u6:0)=Nt8.new
469 case S4_pstorerbnewtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Nt8.new
470 case S4_pstorerbnewfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Nt8.new
471 case S2_storerb_pi: // memb(Rx32++#s4:0)=Rt32
472 case S2_storerbnew_pi: // memb(Rx32++#s4:0)=Nt8.new
473 case S2_pstorerbt_pi: // if (Pv4) memb(Rx32++#s4:0)=Rt32
474 case S2_pstorerbf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Rt32
475 case S2_pstorerbtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Rt32
476 case S2_pstorerbfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Rt32
477 case S2_pstorerbnewt_pi: // if (Pv4) memb(Rx32++#s4:0)=Nt8.new
478 case S2_pstorerbnewf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Nt8.new
479 case S2_pstorerbnewtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Nt8.new
480 case S2_pstorerbnewfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Nt8.new
481 case S4_storerb_ap: // memb(Re32=#U6)=Rt32
482 case S4_storerbnew_ap: // memb(Re32=#U6)=Nt8.new
483 case S2_storerb_pr: // memb(Rx32++Mu2)=Rt32
484 case S2_storerbnew_pr: // memb(Rx32++Mu2)=Nt8.new
485 case S4_storerb_ur: // memb(Ru32<<#u2+#U6)=Rt32
486 case S4_storerbnew_ur: // memb(Ru32<<#u2+#U6)=Nt8.new
487 case S2_storerb_pbr: // memb(Rx32++Mu2:brev)=Rt32
488 case S2_storerbnew_pbr: // memb(Rx32++Mu2:brev)=Nt8.new
489 case S2_storerb_pci: // memb(Rx32++#s4:0:circ(Mu2))=Rt32
490 case S2_storerbnew_pci: // memb(Rx32++#s4:0:circ(Mu2))=Nt8.new
491 case S2_storerb_pcr: // memb(Rx32++I:circ(Mu2))=Rt32
492 case S2_storerbnew_pcr: // memb(Rx32++I:circ(Mu2))=Nt8.new
493 case S4_storerb_rr: // memb(Rs32+Ru32<<#u2)=Rt32
494 case S4_storerbnew_rr: // memb(Rs32+Ru32<<#u2)=Nt8.new
495 case S4_pstorerbt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Rt32
496 case S4_pstorerbf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Rt32
497 case S4_pstorerbtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32
498 case S4_pstorerbfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32
499 case S4_pstorerbnewt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new
500 case S4_pstorerbnewf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new
501 case S4_pstorerbnewtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new
502 case S4_pstorerbnewfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new
503 case S2_storerbgp: // memb(gp+#u16:0)=Rt32
504 case S2_storerbnewgp: // memb(gp+#u16:0)=Nt8.new
505 case S4_pstorerbt_abs: // if (Pv4) memb(#u6)=Rt32
506 case S4_pstorerbf_abs: // if (!Pv4) memb(#u6)=Rt32
507 case S4_pstorerbtnew_abs: // if (Pv4.new) memb(#u6)=Rt32
508 case S4_pstorerbfnew_abs: // if (!Pv4.new) memb(#u6)=Rt32
509 case S4_pstorerbnewt_abs: // if (Pv4) memb(#u6)=Nt8.new
510 case S4_pstorerbnewf_abs: // if (!Pv4) memb(#u6)=Nt8.new
511 case S4_pstorerbnewtnew_abs: // if (Pv4.new) memb(#u6)=Nt8.new
512 case S4_pstorerbnewfnew_abs: // if (!Pv4.new) memb(#u6)=Nt8.new
513 Bits.set(Begin, Begin+8);
517 case S2_storerh_io: // memh(Rs32+#s11:1)=Rt32
518 case S2_storerhnew_io: // memh(Rs32+#s11:1)=Nt8.new
519 case S2_pstorerht_io: // if (Pv4) memh(Rs32+#u6:1)=Rt32
520 case S2_pstorerhf_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt32
521 case S4_pstorerhtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt32
522 case S4_pstorerhfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt32
523 case S2_pstorerhnewt_io: // if (Pv4) memh(Rs32+#u6:1)=Nt8.new
524 case S2_pstorerhnewf_io: // if (!Pv4) memh(Rs32+#u6:1)=Nt8.new
525 case S4_pstorerhnewtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Nt8.new
526 case S4_pstorerhnewfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Nt8.new
527 case S2_storerh_pi: // memh(Rx32++#s4:1)=Rt32
528 case S2_storerhnew_pi: // memh(Rx32++#s4:1)=Nt8.new
529 case S2_pstorerht_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt32
530 case S2_pstorerhf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt32
531 case S2_pstorerhtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt32
532 case S2_pstorerhfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt32
533 case S2_pstorerhnewt_pi: // if (Pv4) memh(Rx32++#s4:1)=Nt8.new
534 case S2_pstorerhnewf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Nt8.new
535 case S2_pstorerhnewtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Nt8.new
536 case S2_pstorerhnewfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Nt8.new
537 case S4_storerh_ap: // memh(Re32=#U6)=Rt32
538 case S4_storerhnew_ap: // memh(Re32=#U6)=Nt8.new
539 case S2_storerh_pr: // memh(Rx32++Mu2)=Rt32
540 case S2_storerhnew_pr: // memh(Rx32++Mu2)=Nt8.new
541 case S4_storerh_ur: // memh(Ru32<<#u2+#U6)=Rt32
542 case S4_storerhnew_ur: // memh(Ru32<<#u2+#U6)=Nt8.new
543 case S2_storerh_pbr: // memh(Rx32++Mu2:brev)=Rt32
544 case S2_storerhnew_pbr: // memh(Rx32++Mu2:brev)=Nt8.new
545 case S2_storerh_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt32
546 case S2_storerhnew_pci: // memh(Rx32++#s4:1:circ(Mu2))=Nt8.new
547 case S2_storerh_pcr: // memh(Rx32++I:circ(Mu2))=Rt32
548 case S2_storerhnew_pcr: // memh(Rx32++I:circ(Mu2))=Nt8.new
549 case S4_storerh_rr: // memh(Rs32+Ru32<<#u2)=Rt32
550 case S4_pstorerht_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt32
551 case S4_pstorerhf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt32
552 case S4_pstorerhtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32
553 case S4_pstorerhfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32
554 case S4_storerhnew_rr: // memh(Rs32+Ru32<<#u2)=Nt8.new
555 case S4_pstorerhnewt_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new
556 case S4_pstorerhnewf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new
557 case S4_pstorerhnewtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new
558 case S4_pstorerhnewfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new
559 case S2_storerhgp: // memh(gp+#u16:1)=Rt32
560 case S2_storerhnewgp: // memh(gp+#u16:1)=Nt8.new
561 case S4_pstorerht_abs: // if (Pv4) memh(#u6)=Rt32
562 case S4_pstorerhf_abs: // if (!Pv4) memh(#u6)=Rt32
563 case S4_pstorerhtnew_abs: // if (Pv4.new) memh(#u6)=Rt32
564 case S4_pstorerhfnew_abs: // if (!Pv4.new) memh(#u6)=Rt32
565 case S4_pstorerhnewt_abs: // if (Pv4) memh(#u6)=Nt8.new
566 case S4_pstorerhnewf_abs: // if (!Pv4) memh(#u6)=Nt8.new
567 case S4_pstorerhnewtnew_abs: // if (Pv4.new) memh(#u6)=Nt8.new
568 case S4_pstorerhnewfnew_abs: // if (!Pv4.new) memh(#u6)=Nt8.new
569 Bits.set(Begin, Begin+16);
573 case S2_storerf_io: // memh(Rs32+#s11:1)=Rt.H32
574 case S2_pstorerft_io: // if (Pv4) memh(Rs32+#u6:1)=Rt.H32
575 case S2_pstorerff_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt.H32
576 case S4_pstorerftnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt.H32
577 case S4_pstorerffnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt.H32
578 case S2_storerf_pi: // memh(Rx32++#s4:1)=Rt.H32
579 case S2_pstorerft_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt.H32
580 case S2_pstorerff_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt.H32
581 case S2_pstorerftnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt.H32
582 case S2_pstorerffnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt.H32
583 case S4_storerf_ap: // memh(Re32=#U6)=Rt.H32
584 case S2_storerf_pr: // memh(Rx32++Mu2)=Rt.H32
585 case S4_storerf_ur: // memh(Ru32<<#u2+#U6)=Rt.H32
586 case S2_storerf_pbr: // memh(Rx32++Mu2:brev)=Rt.H32
587 case S2_storerf_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt.H32
588 case S2_storerf_pcr: // memh(Rx32++I:circ(Mu2))=Rt.H32
589 case S4_storerf_rr: // memh(Rs32+Ru32<<#u2)=Rt.H32
590 case S4_pstorerft_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32
591 case S4_pstorerff_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32
592 case S4_pstorerftnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32
593 case S4_pstorerffnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32
594 case S2_storerfgp: // memh(gp+#u16:1)=Rt.H32
595 case S4_pstorerft_abs: // if (Pv4) memh(#u6)=Rt.H32
596 case S4_pstorerff_abs: // if (!Pv4) memh(#u6)=Rt.H32
597 case S4_pstorerftnew_abs: // if (Pv4.new) memh(#u6)=Rt.H32
598 case S4_pstorerffnew_abs: // if (!Pv4.new) memh(#u6)=Rt.H32
599 Bits.set(Begin+16, Begin+32);
606 // For an instruction with opcode Opc, calculate the set of bits that it
607 // uses in a register in operand OpN. This only calculates the set of used
608 // bits for cases where it does not depend on any operands (as is the case
609 // in shifts, for example). For concrete instructions from a program, the
610 // operand may be a subregister of a larger register, while Bits would
611 // correspond to the larger register in its entirety. Because of that,
612 // the parameter Begin can be used to indicate which bit of Bits should be
613 // considered the LSB of of the operand.
614 bool HexagonBitSimplify::getUsedBits(unsigned Opc, unsigned OpN,
615 BitVector &Bits, uint16_t Begin, const HexagonInstrInfo &HII) {
616 using namespace Hexagon;
618 const MCInstrDesc &D = HII.get(Opc);
620 if (OpN == D.getNumOperands()-1)
621 return getUsedBitsInStore(Opc, Bits, Begin);
626 // One register source. Used bits: R1[0-7].
633 Bits.set(Begin, Begin+8);
638 // One register source. Used bits: R1[0-15].
646 Bits.set(Begin, Begin+16);
651 // One register source. Used bits: R1[16-31].
654 Bits.set(Begin+16, Begin+32);
659 // Two register sources. Used bits: R1[0-7], R2[0-7].
664 Bits.set(Begin, Begin+8);
669 // Two register sources. Used bits: R1[0-15], R2[0-15].
674 case A2_addh_h16_sat_ll:
676 case A2_addh_l16_sat_ll:
679 case A2_subh_h16_sat_ll:
681 case A2_subh_l16_sat_ll:
682 case M2_mpy_acc_ll_s0:
683 case M2_mpy_acc_ll_s1:
684 case M2_mpy_acc_sat_ll_s0:
685 case M2_mpy_acc_sat_ll_s1:
688 case M2_mpy_nac_ll_s0:
689 case M2_mpy_nac_ll_s1:
690 case M2_mpy_nac_sat_ll_s0:
691 case M2_mpy_nac_sat_ll_s1:
692 case M2_mpy_rnd_ll_s0:
693 case M2_mpy_rnd_ll_s1:
694 case M2_mpy_sat_ll_s0:
695 case M2_mpy_sat_ll_s1:
696 case M2_mpy_sat_rnd_ll_s0:
697 case M2_mpy_sat_rnd_ll_s1:
698 case M2_mpyd_acc_ll_s0:
699 case M2_mpyd_acc_ll_s1:
702 case M2_mpyd_nac_ll_s0:
703 case M2_mpyd_nac_ll_s1:
704 case M2_mpyd_rnd_ll_s0:
705 case M2_mpyd_rnd_ll_s1:
706 case M2_mpyu_acc_ll_s0:
707 case M2_mpyu_acc_ll_s1:
710 case M2_mpyu_nac_ll_s0:
711 case M2_mpyu_nac_ll_s1:
712 case M2_mpyud_acc_ll_s0:
713 case M2_mpyud_acc_ll_s1:
716 case M2_mpyud_nac_ll_s0:
717 case M2_mpyud_nac_ll_s1:
718 if (OpN == 1 || OpN == 2) {
719 Bits.set(Begin, Begin+16);
724 // Two register sources. Used bits: R1[0-15], R2[16-31].
726 case A2_addh_h16_sat_lh:
729 case A2_subh_h16_sat_lh:
730 case M2_mpy_acc_lh_s0:
731 case M2_mpy_acc_lh_s1:
732 case M2_mpy_acc_sat_lh_s0:
733 case M2_mpy_acc_sat_lh_s1:
736 case M2_mpy_nac_lh_s0:
737 case M2_mpy_nac_lh_s1:
738 case M2_mpy_nac_sat_lh_s0:
739 case M2_mpy_nac_sat_lh_s1:
740 case M2_mpy_rnd_lh_s0:
741 case M2_mpy_rnd_lh_s1:
742 case M2_mpy_sat_lh_s0:
743 case M2_mpy_sat_lh_s1:
744 case M2_mpy_sat_rnd_lh_s0:
745 case M2_mpy_sat_rnd_lh_s1:
746 case M2_mpyd_acc_lh_s0:
747 case M2_mpyd_acc_lh_s1:
750 case M2_mpyd_nac_lh_s0:
751 case M2_mpyd_nac_lh_s1:
752 case M2_mpyd_rnd_lh_s0:
753 case M2_mpyd_rnd_lh_s1:
754 case M2_mpyu_acc_lh_s0:
755 case M2_mpyu_acc_lh_s1:
758 case M2_mpyu_nac_lh_s0:
759 case M2_mpyu_nac_lh_s1:
760 case M2_mpyud_acc_lh_s0:
761 case M2_mpyud_acc_lh_s1:
764 case M2_mpyud_nac_lh_s0:
765 case M2_mpyud_nac_lh_s1:
766 // These four are actually LH.
768 case A2_addh_l16_sat_hl:
770 case A2_subh_l16_sat_hl:
772 Bits.set(Begin, Begin+16);
776 Bits.set(Begin+16, Begin+32);
781 // Two register sources, used bits: R1[16-31], R2[0-15].
783 case A2_addh_h16_sat_hl:
786 case A2_subh_h16_sat_hl:
787 case M2_mpy_acc_hl_s0:
788 case M2_mpy_acc_hl_s1:
789 case M2_mpy_acc_sat_hl_s0:
790 case M2_mpy_acc_sat_hl_s1:
793 case M2_mpy_nac_hl_s0:
794 case M2_mpy_nac_hl_s1:
795 case M2_mpy_nac_sat_hl_s0:
796 case M2_mpy_nac_sat_hl_s1:
797 case M2_mpy_rnd_hl_s0:
798 case M2_mpy_rnd_hl_s1:
799 case M2_mpy_sat_hl_s0:
800 case M2_mpy_sat_hl_s1:
801 case M2_mpy_sat_rnd_hl_s0:
802 case M2_mpy_sat_rnd_hl_s1:
803 case M2_mpyd_acc_hl_s0:
804 case M2_mpyd_acc_hl_s1:
807 case M2_mpyd_nac_hl_s0:
808 case M2_mpyd_nac_hl_s1:
809 case M2_mpyd_rnd_hl_s0:
810 case M2_mpyd_rnd_hl_s1:
811 case M2_mpyu_acc_hl_s0:
812 case M2_mpyu_acc_hl_s1:
815 case M2_mpyu_nac_hl_s0:
816 case M2_mpyu_nac_hl_s1:
817 case M2_mpyud_acc_hl_s0:
818 case M2_mpyud_acc_hl_s1:
821 case M2_mpyud_nac_hl_s0:
822 case M2_mpyud_nac_hl_s1:
824 Bits.set(Begin+16, Begin+32);
828 Bits.set(Begin, Begin+16);
833 // Two register sources, used bits: R1[16-31], R2[16-31].
835 case A2_addh_h16_sat_hh:
838 case A2_subh_h16_sat_hh:
839 case M2_mpy_acc_hh_s0:
840 case M2_mpy_acc_hh_s1:
841 case M2_mpy_acc_sat_hh_s0:
842 case M2_mpy_acc_sat_hh_s1:
845 case M2_mpy_nac_hh_s0:
846 case M2_mpy_nac_hh_s1:
847 case M2_mpy_nac_sat_hh_s0:
848 case M2_mpy_nac_sat_hh_s1:
849 case M2_mpy_rnd_hh_s0:
850 case M2_mpy_rnd_hh_s1:
851 case M2_mpy_sat_hh_s0:
852 case M2_mpy_sat_hh_s1:
853 case M2_mpy_sat_rnd_hh_s0:
854 case M2_mpy_sat_rnd_hh_s1:
855 case M2_mpyd_acc_hh_s0:
856 case M2_mpyd_acc_hh_s1:
859 case M2_mpyd_nac_hh_s0:
860 case M2_mpyd_nac_hh_s1:
861 case M2_mpyd_rnd_hh_s0:
862 case M2_mpyd_rnd_hh_s1:
863 case M2_mpyu_acc_hh_s0:
864 case M2_mpyu_acc_hh_s1:
867 case M2_mpyu_nac_hh_s0:
868 case M2_mpyu_nac_hh_s1:
869 case M2_mpyud_acc_hh_s0:
870 case M2_mpyud_acc_hh_s1:
873 case M2_mpyud_nac_hh_s0:
874 case M2_mpyud_nac_hh_s1:
875 if (OpN == 1 || OpN == 2) {
876 Bits.set(Begin+16, Begin+32);
885 // Calculate the register class that matches Reg:Sub. For example, if
886 // vreg1 is a double register, then vreg1:isub_hi would match the "int"
888 const TargetRegisterClass *HexagonBitSimplify::getFinalVRegClass(
889 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI) {
890 if (!TargetRegisterInfo::isVirtualRegister(RR.Reg))
892 auto *RC = MRI.getRegClass(RR.Reg);
895 auto &HRI = static_cast<const HexagonRegisterInfo&>(
896 *MRI.getTargetRegisterInfo());
898 auto VerifySR = [&HRI] (const TargetRegisterClass *RC, unsigned Sub) -> void {
899 assert(Sub == HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo) ||
900 Sub == HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi));
903 switch (RC->getID()) {
904 case Hexagon::DoubleRegsRegClassID:
905 VerifySR(RC, RR.Sub);
906 return &Hexagon::IntRegsRegClass;
907 case Hexagon::VecDblRegsRegClassID:
908 VerifySR(RC, RR.Sub);
909 return &Hexagon::VectorRegsRegClass;
910 case Hexagon::VecDblRegs128BRegClassID:
911 VerifySR(RC, RR.Sub);
912 return &Hexagon::VectorRegs128BRegClass;
917 // Check if RD could be replaced with RS at any possible use of RD.
918 // For example a predicate register cannot be replaced with a integer
919 // register, but a 64-bit register with a subregister can be replaced
920 // with a 32-bit register.
921 bool HexagonBitSimplify::isTransparentCopy(const BitTracker::RegisterRef &RD,
922 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI) {
923 if (!TargetRegisterInfo::isVirtualRegister(RD.Reg) ||
924 !TargetRegisterInfo::isVirtualRegister(RS.Reg))
926 // Return false if one (or both) classes are nullptr.
927 auto *DRC = getFinalVRegClass(RD, MRI);
931 return DRC == getFinalVRegClass(RS, MRI);
934 bool HexagonBitSimplify::hasTiedUse(unsigned Reg, MachineRegisterInfo &MRI,
936 if (!PreserveTiedOps)
938 return llvm::any_of(MRI.use_operands(Reg),
939 [NewSub] (const MachineOperand &Op) -> bool {
940 return Op.getSubReg() != NewSub && Op.isTied();
946 class DeadCodeElimination {
948 DeadCodeElimination(MachineFunction &mf, MachineDominatorTree &mdt)
949 : MF(mf), HII(*MF.getSubtarget<HexagonSubtarget>().getInstrInfo()),
950 MDT(mdt), MRI(mf.getRegInfo()) {}
953 return runOnNode(MDT.getRootNode());
957 bool isDead(unsigned R) const;
958 bool runOnNode(MachineDomTreeNode *N);
961 const HexagonInstrInfo &HII;
962 MachineDominatorTree &MDT;
963 MachineRegisterInfo &MRI;
966 } // end anonymous namespace
968 bool DeadCodeElimination::isDead(unsigned R) const {
969 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) {
970 MachineInstr *UseI = I->getParent();
971 if (UseI->isDebugValue())
974 assert(!UseI->getOperand(0).getSubReg());
975 unsigned DR = UseI->getOperand(0).getReg();
984 bool DeadCodeElimination::runOnNode(MachineDomTreeNode *N) {
985 bool Changed = false;
986 typedef GraphTraits<MachineDomTreeNode*> GTN;
987 for (auto I = GTN::child_begin(N), E = GTN::child_end(N); I != E; ++I)
988 Changed |= runOnNode(*I);
990 MachineBasicBlock *B = N->getBlock();
991 std::vector<MachineInstr*> Instrs;
992 for (auto I = B->rbegin(), E = B->rend(); I != E; ++I)
993 Instrs.push_back(&*I);
995 for (auto MI : Instrs) {
996 unsigned Opc = MI->getOpcode();
997 // Do not touch lifetime markers. This is why the target-independent DCE
999 if (Opc == TargetOpcode::LIFETIME_START ||
1000 Opc == TargetOpcode::LIFETIME_END)
1003 if (MI->isInlineAsm())
1005 // Delete PHIs if possible.
1006 if (!MI->isPHI() && !MI->isSafeToMove(nullptr, Store))
1009 bool AllDead = true;
1010 SmallVector<unsigned,2> Regs;
1011 for (auto &Op : MI->operands()) {
1012 if (!Op.isReg() || !Op.isDef())
1014 unsigned R = Op.getReg();
1015 if (!TargetRegisterInfo::isVirtualRegister(R) || !isDead(R)) {
1025 for (unsigned i = 0, n = Regs.size(); i != n; ++i)
1026 MRI.markUsesInDebugValueAsUndef(Regs[i]);
1035 // Eliminate redundant instructions
1037 // This transformation will identify instructions where the output register
1038 // is the same as one of its input registers. This only works on instructions
1039 // that define a single register (unlike post-increment loads, for example).
1040 // The equality check is actually more detailed: the code calculates which
1041 // bits of the output are used, and only compares these bits with the input
1043 // If the output matches an input, the instruction is replaced with COPY.
1044 // The copies will be removed by another transformation.
1045 class RedundantInstrElimination : public Transformation {
1047 RedundantInstrElimination(BitTracker &bt, const HexagonInstrInfo &hii,
1048 MachineRegisterInfo &mri)
1049 : Transformation(true), HII(hii), MRI(mri), BT(bt) {}
1051 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1054 bool isLossyShiftLeft(const MachineInstr &MI, unsigned OpN,
1055 unsigned &LostB, unsigned &LostE);
1056 bool isLossyShiftRight(const MachineInstr &MI, unsigned OpN,
1057 unsigned &LostB, unsigned &LostE);
1058 bool computeUsedBits(unsigned Reg, BitVector &Bits);
1059 bool computeUsedBits(const MachineInstr &MI, unsigned OpN, BitVector &Bits,
1061 bool usedBitsEqual(BitTracker::RegisterRef RD, BitTracker::RegisterRef RS);
1063 const HexagonInstrInfo &HII;
1064 MachineRegisterInfo &MRI;
1068 } // end anonymous namespace
1070 // Check if the instruction is a lossy shift left, where the input being
1071 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range
1072 // of bit indices that are lost.
1073 bool RedundantInstrElimination::isLossyShiftLeft(const MachineInstr &MI,
1074 unsigned OpN, unsigned &LostB, unsigned &LostE) {
1075 using namespace Hexagon;
1077 unsigned Opc = MI.getOpcode();
1078 unsigned ImN, RegN, Width;
1085 case S2_asl_i_p_acc:
1086 case S2_asl_i_p_and:
1087 case S2_asl_i_p_nac:
1089 case S2_asl_i_p_xacc:
1099 case S2_addasl_rrri:
1100 case S4_andi_asl_ri:
1102 case S4_addi_asl_ri:
1103 case S4_subi_asl_ri:
1104 case S2_asl_i_r_acc:
1105 case S2_asl_i_r_and:
1106 case S2_asl_i_r_nac:
1108 case S2_asl_i_r_sat:
1109 case S2_asl_i_r_xacc:
1121 assert(MI.getOperand(ImN).isImm());
1122 unsigned S = MI.getOperand(ImN).getImm();
1130 // Check if the instruction is a lossy shift right, where the input being
1131 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range
1132 // of bit indices that are lost.
1133 bool RedundantInstrElimination::isLossyShiftRight(const MachineInstr &MI,
1134 unsigned OpN, unsigned &LostB, unsigned &LostE) {
1135 using namespace Hexagon;
1137 unsigned Opc = MI.getOpcode();
1145 case S2_asr_i_p_acc:
1146 case S2_asr_i_p_and:
1147 case S2_asr_i_p_nac:
1149 case S2_lsr_i_p_acc:
1150 case S2_lsr_i_p_and:
1151 case S2_lsr_i_p_nac:
1153 case S2_lsr_i_p_xacc:
1162 case S4_andi_lsr_ri:
1164 case S4_addi_lsr_ri:
1165 case S4_subi_lsr_ri:
1166 case S2_asr_i_r_acc:
1167 case S2_asr_i_r_and:
1168 case S2_asr_i_r_nac:
1170 case S2_lsr_i_r_acc:
1171 case S2_lsr_i_r_and:
1172 case S2_lsr_i_r_nac:
1174 case S2_lsr_i_r_xacc:
1186 assert(MI.getOperand(ImN).isImm());
1187 unsigned S = MI.getOperand(ImN).getImm();
1193 // Calculate the bit vector that corresponds to the used bits of register Reg.
1194 // The vector Bits has the same size, as the size of Reg in bits. If the cal-
1195 // culation fails (i.e. the used bits are unknown), it returns false. Other-
1196 // wise, it returns true and sets the corresponding bits in Bits.
1197 bool RedundantInstrElimination::computeUsedBits(unsigned Reg, BitVector &Bits) {
1198 BitVector Used(Bits.size());
1199 RegisterSet Visited;
1200 std::vector<unsigned> Pending;
1201 Pending.push_back(Reg);
1203 for (unsigned i = 0; i < Pending.size(); ++i) {
1204 unsigned R = Pending[i];
1208 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) {
1209 BitTracker::RegisterRef UR = *I;
1211 if (!HBS::getSubregMask(UR, B, W, MRI))
1213 MachineInstr &UseI = *I->getParent();
1214 if (UseI.isPHI() || UseI.isCopy()) {
1215 unsigned DefR = UseI.getOperand(0).getReg();
1216 if (!TargetRegisterInfo::isVirtualRegister(DefR))
1218 Pending.push_back(DefR);
1220 if (!computeUsedBits(UseI, I.getOperandNo(), Used, B))
1229 // Calculate the bits used by instruction MI in a register in operand OpN.
1230 // Return true/false if the calculation succeeds/fails. If is succeeds, set
1231 // used bits in Bits. This function does not reset any bits in Bits, so
1232 // subsequent calls over different instructions will result in the union
1233 // of the used bits in all these instructions.
1234 // The register in question may be used with a sub-register, whereas Bits
1235 // holds the bits for the entire register. To keep track of that, the
1236 // argument Begin indicates where in Bits is the lowest-significant bit
1237 // of the register used in operand OpN. For example, in instruction:
1238 // vreg1 = S2_lsr_i_r vreg2:isub_hi, 10
1239 // the operand 1 is a 32-bit register, which happens to be a subregister
1240 // of the 64-bit register vreg2, and that subregister starts at position 32.
1241 // In this case Begin=32, since Bits[32] would be the lowest-significant bit
1242 // of vreg2:isub_hi.
1243 bool RedundantInstrElimination::computeUsedBits(const MachineInstr &MI,
1244 unsigned OpN, BitVector &Bits, uint16_t Begin) {
1245 unsigned Opc = MI.getOpcode();
1246 BitVector T(Bits.size());
1247 bool GotBits = HBS::getUsedBits(Opc, OpN, T, Begin, HII);
1248 // Even if we don't have bits yet, we could still provide some information
1249 // if the instruction is a lossy shift: the lost bits will be marked as
1252 if (isLossyShiftLeft(MI, OpN, LB, LE) || isLossyShiftRight(MI, OpN, LB, LE)) {
1253 assert(MI.getOperand(OpN).isReg());
1254 BitTracker::RegisterRef RR = MI.getOperand(OpN);
1255 const TargetRegisterClass *RC = HBS::getFinalVRegClass(RR, MRI);
1256 uint16_t Width = RC->getSize()*8;
1259 T.set(Begin, Begin+Width);
1260 assert(LB <= LE && LB < Width && LE <= Width);
1261 T.reset(Begin+LB, Begin+LE);
1269 // Calculates the used bits in RD ("defined register"), and checks if these
1270 // bits in RS ("used register") and RD are identical.
1271 bool RedundantInstrElimination::usedBitsEqual(BitTracker::RegisterRef RD,
1272 BitTracker::RegisterRef RS) {
1273 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg);
1274 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg);
1277 if (!HBS::getSubregMask(RD, DB, DW, MRI))
1280 if (!HBS::getSubregMask(RS, SB, SW, MRI))
1285 BitVector Used(DC.width());
1286 if (!computeUsedBits(RD.Reg, Used))
1289 for (unsigned i = 0; i != DW; ++i)
1290 if (Used[i+DB] && DC[DB+i] != SC[SB+i])
1295 bool RedundantInstrElimination::processBlock(MachineBasicBlock &B,
1296 const RegisterSet&) {
1297 if (!BT.reached(&B))
1299 bool Changed = false;
1301 for (auto I = B.begin(), E = B.end(), NextI = I; I != E; ++I) {
1302 NextI = std::next(I);
1303 MachineInstr *MI = &*I;
1305 if (MI->getOpcode() == TargetOpcode::COPY)
1307 if (MI->hasUnmodeledSideEffects() || MI->isInlineAsm())
1309 unsigned NumD = MI->getDesc().getNumDefs();
1313 BitTracker::RegisterRef RD = MI->getOperand(0);
1314 if (!BT.has(RD.Reg))
1316 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg);
1317 auto At = MI->isPHI() ? B.getFirstNonPHI()
1318 : MachineBasicBlock::iterator(MI);
1320 // Find a source operand that is equal to the result.
1321 for (auto &Op : MI->uses()) {
1324 BitTracker::RegisterRef RS = Op;
1325 if (!BT.has(RS.Reg))
1327 if (!HBS::isTransparentCopy(RD, RS, MRI))
1331 if (!HBS::getSubregMask(RS, BN, BW, MRI))
1334 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg);
1335 if (!usedBitsEqual(RD, RS) && !HBS::isEqual(DC, 0, SC, BN, BW))
1338 // If found, replace the instruction with a COPY.
1339 const DebugLoc &DL = MI->getDebugLoc();
1340 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI);
1341 unsigned NewR = MRI.createVirtualRegister(FRC);
1342 MachineInstr *CopyI =
1343 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR)
1344 .addReg(RS.Reg, 0, RS.Sub);
1345 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
1346 // This pass can create copies between registers that don't have the
1347 // exact same values. Updating the tracker has to involve updating
1348 // all dependent cells. Example:
1349 // vreg1 = inst vreg2 ; vreg1 != vreg2, but used bits are equal
1351 // vreg3 = copy vreg2 ; <- inserted
1352 // ... = vreg3 ; <- replaced from vreg2
1353 // Indirectly, we can create a "copy" between vreg1 and vreg2 even
1354 // though their exact values do not match.
1366 // Recognize instructions that produce constant values known at compile-time.
1367 // Replace them with register definitions that load these constants directly.
1368 class ConstGeneration : public Transformation {
1370 ConstGeneration(BitTracker &bt, const HexagonInstrInfo &hii,
1371 MachineRegisterInfo &mri)
1372 : Transformation(true), HII(hii), MRI(mri), BT(bt) {}
1374 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1375 static bool isTfrConst(const MachineInstr &MI);
1378 unsigned genTfrConst(const TargetRegisterClass *RC, int64_t C,
1379 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL);
1381 const HexagonInstrInfo &HII;
1382 MachineRegisterInfo &MRI;
1386 } // end anonymous namespace
1388 bool ConstGeneration::isTfrConst(const MachineInstr &MI) {
1389 unsigned Opc = MI.getOpcode();
1391 case Hexagon::A2_combineii:
1392 case Hexagon::A4_combineii:
1393 case Hexagon::A2_tfrsi:
1394 case Hexagon::A2_tfrpi:
1395 case Hexagon::PS_true:
1396 case Hexagon::PS_false:
1397 case Hexagon::CONST32:
1398 case Hexagon::CONST64:
1404 // Generate a transfer-immediate instruction that is appropriate for the
1405 // register class and the actual value being transferred.
1406 unsigned ConstGeneration::genTfrConst(const TargetRegisterClass *RC, int64_t C,
1407 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL) {
1408 unsigned Reg = MRI.createVirtualRegister(RC);
1409 if (RC == &Hexagon::IntRegsRegClass) {
1410 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrsi), Reg)
1411 .addImm(int32_t(C));
1415 if (RC == &Hexagon::DoubleRegsRegClass) {
1417 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrpi), Reg)
1422 unsigned Lo = Lo_32(C), Hi = Hi_32(C);
1423 if (isInt<8>(Lo) || isInt<8>(Hi)) {
1424 unsigned Opc = isInt<8>(Lo) ? Hexagon::A2_combineii
1425 : Hexagon::A4_combineii;
1426 BuildMI(B, At, DL, HII.get(Opc), Reg)
1427 .addImm(int32_t(Hi))
1428 .addImm(int32_t(Lo));
1432 BuildMI(B, At, DL, HII.get(Hexagon::CONST64), Reg)
1437 if (RC == &Hexagon::PredRegsRegClass) {
1440 Opc = Hexagon::PS_false;
1441 else if ((C & 0xFF) == 0xFF)
1442 Opc = Hexagon::PS_true;
1445 BuildMI(B, At, DL, HII.get(Opc), Reg);
1452 bool ConstGeneration::processBlock(MachineBasicBlock &B, const RegisterSet&) {
1453 if (!BT.reached(&B))
1455 bool Changed = false;
1458 for (auto I = B.begin(), E = B.end(); I != E; ++I) {
1462 HBS::getInstrDefs(*I, Defs);
1463 if (Defs.count() != 1)
1465 unsigned DR = Defs.find_first();
1466 if (!TargetRegisterInfo::isVirtualRegister(DR))
1469 const BitTracker::RegisterCell &DRC = BT.lookup(DR);
1470 if (HBS::getConst(DRC, 0, DRC.width(), U)) {
1472 DebugLoc DL = I->getDebugLoc();
1473 auto At = I->isPHI() ? B.getFirstNonPHI() : I;
1474 unsigned ImmReg = genTfrConst(MRI.getRegClass(DR), C, B, At, DL);
1476 HBS::replaceReg(DR, ImmReg, MRI);
1477 BT.put(ImmReg, DRC);
1487 // Identify pairs of available registers which hold identical values.
1488 // In such cases, only one of them needs to be calculated, the other one
1489 // will be defined as a copy of the first.
1490 class CopyGeneration : public Transformation {
1492 CopyGeneration(BitTracker &bt, const HexagonInstrInfo &hii,
1493 const HexagonRegisterInfo &hri, MachineRegisterInfo &mri)
1494 : Transformation(true), HII(hii), HRI(hri), MRI(mri), BT(bt) {}
1496 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1499 bool findMatch(const BitTracker::RegisterRef &Inp,
1500 BitTracker::RegisterRef &Out, const RegisterSet &AVs);
1502 const HexagonInstrInfo &HII;
1503 const HexagonRegisterInfo &HRI;
1504 MachineRegisterInfo &MRI;
1506 RegisterSet Forbidden;
1509 // Eliminate register copies RD = RS, by replacing the uses of RD with
1511 class CopyPropagation : public Transformation {
1513 CopyPropagation(const HexagonRegisterInfo &hri, MachineRegisterInfo &mri)
1514 : Transformation(false), HRI(hri), MRI(mri) {}
1516 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1518 static bool isCopyReg(unsigned Opc, bool NoConv);
1521 bool propagateRegCopy(MachineInstr &MI);
1523 const HexagonRegisterInfo &HRI;
1524 MachineRegisterInfo &MRI;
1527 } // end anonymous namespace
1529 /// Check if there is a register in AVs that is identical to Inp. If so,
1530 /// set Out to the found register. The output may be a pair Reg:Sub.
1531 bool CopyGeneration::findMatch(const BitTracker::RegisterRef &Inp,
1532 BitTracker::RegisterRef &Out, const RegisterSet &AVs) {
1533 if (!BT.has(Inp.Reg))
1535 const BitTracker::RegisterCell &InpRC = BT.lookup(Inp.Reg);
1536 auto *FRC = HBS::getFinalVRegClass(Inp, MRI);
1538 if (!HBS::getSubregMask(Inp, B, W, MRI))
1541 for (unsigned R = AVs.find_first(); R; R = AVs.find_next(R)) {
1542 if (!BT.has(R) || Forbidden[R])
1544 const BitTracker::RegisterCell &RC = BT.lookup(R);
1545 unsigned RW = RC.width();
1547 if (FRC != MRI.getRegClass(R))
1549 if (!HBS::isTransparentCopy(R, Inp, MRI))
1551 if (!HBS::isEqual(InpRC, B, RC, 0, W))
1557 // Check if there is a super-register, whose part (with a subregister)
1558 // is equal to the input.
1559 // Only do double registers for now.
1562 if (MRI.getRegClass(R) != &Hexagon::DoubleRegsRegClass)
1565 if (HBS::isEqual(InpRC, B, RC, 0, W))
1566 Out.Sub = Hexagon::isub_lo;
1567 else if (HBS::isEqual(InpRC, B, RC, W, W))
1568 Out.Sub = Hexagon::isub_hi;
1572 if (HBS::isTransparentCopy(Out, Inp, MRI))
1578 bool CopyGeneration::processBlock(MachineBasicBlock &B,
1579 const RegisterSet &AVs) {
1580 if (!BT.reached(&B))
1582 RegisterSet AVB(AVs);
1583 bool Changed = false;
1586 for (auto I = B.begin(), E = B.end(), NextI = I; I != E;
1587 ++I, AVB.insert(Defs)) {
1588 NextI = std::next(I);
1590 HBS::getInstrDefs(*I, Defs);
1592 unsigned Opc = I->getOpcode();
1593 if (CopyPropagation::isCopyReg(Opc, false) ||
1594 ConstGeneration::isTfrConst(*I))
1597 DebugLoc DL = I->getDebugLoc();
1598 auto At = I->isPHI() ? B.getFirstNonPHI() : I;
1600 for (unsigned R = Defs.find_first(); R; R = Defs.find_next(R)) {
1601 BitTracker::RegisterRef MR;
1602 auto *FRC = HBS::getFinalVRegClass(R, MRI);
1604 if (findMatch(R, MR, AVB)) {
1605 unsigned NewR = MRI.createVirtualRegister(FRC);
1606 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR)
1607 .addReg(MR.Reg, 0, MR.Sub);
1608 BT.put(BitTracker::RegisterRef(NewR), BT.get(MR));
1609 HBS::replaceReg(R, NewR, MRI);
1610 Forbidden.insert(R);
1614 if (FRC == &Hexagon::DoubleRegsRegClass ||
1615 FRC == &Hexagon::VecDblRegsRegClass ||
1616 FRC == &Hexagon::VecDblRegs128BRegClass) {
1617 // Try to generate REG_SEQUENCE.
1618 unsigned SubLo = HRI.getHexagonSubRegIndex(FRC, Hexagon::ps_sub_lo);
1619 unsigned SubHi = HRI.getHexagonSubRegIndex(FRC, Hexagon::ps_sub_hi);
1620 BitTracker::RegisterRef TL = { R, SubLo };
1621 BitTracker::RegisterRef TH = { R, SubHi };
1622 BitTracker::RegisterRef ML, MH;
1623 if (findMatch(TL, ML, AVB) && findMatch(TH, MH, AVB)) {
1624 auto *FRC = HBS::getFinalVRegClass(R, MRI);
1625 unsigned NewR = MRI.createVirtualRegister(FRC);
1626 BuildMI(B, At, DL, HII.get(TargetOpcode::REG_SEQUENCE), NewR)
1627 .addReg(ML.Reg, 0, ML.Sub)
1629 .addReg(MH.Reg, 0, MH.Sub)
1631 BT.put(BitTracker::RegisterRef(NewR), BT.get(R));
1632 HBS::replaceReg(R, NewR, MRI);
1633 Forbidden.insert(R);
1642 bool CopyPropagation::isCopyReg(unsigned Opc, bool NoConv) {
1644 case TargetOpcode::COPY:
1645 case TargetOpcode::REG_SEQUENCE:
1646 case Hexagon::A4_combineir:
1647 case Hexagon::A4_combineri:
1649 case Hexagon::A2_tfr:
1650 case Hexagon::A2_tfrp:
1651 case Hexagon::A2_combinew:
1652 case Hexagon::V6_vcombine:
1653 case Hexagon::V6_vcombine_128B:
1661 bool CopyPropagation::propagateRegCopy(MachineInstr &MI) {
1662 bool Changed = false;
1663 unsigned Opc = MI.getOpcode();
1664 BitTracker::RegisterRef RD = MI.getOperand(0);
1665 assert(MI.getOperand(0).getSubReg() == 0);
1668 case TargetOpcode::COPY:
1669 case Hexagon::A2_tfr:
1670 case Hexagon::A2_tfrp: {
1671 BitTracker::RegisterRef RS = MI.getOperand(1);
1672 if (!HBS::isTransparentCopy(RD, RS, MRI))
1675 Changed = HBS::replaceRegWithSub(RD.Reg, RS.Reg, RS.Sub, MRI);
1677 Changed = HBS::replaceReg(RD.Reg, RS.Reg, MRI);
1680 case TargetOpcode::REG_SEQUENCE: {
1681 BitTracker::RegisterRef SL, SH;
1682 if (HBS::parseRegSequence(MI, SL, SH, MRI)) {
1683 const TargetRegisterClass *RC = MRI.getRegClass(RD.Reg);
1684 unsigned SubLo = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo);
1685 unsigned SubHi = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi);
1686 Changed = HBS::replaceSubWithSub(RD.Reg, SubLo, SL.Reg, SL.Sub, MRI);
1687 Changed |= HBS::replaceSubWithSub(RD.Reg, SubHi, SH.Reg, SH.Sub, MRI);
1691 case Hexagon::A2_combinew:
1692 case Hexagon::V6_vcombine:
1693 case Hexagon::V6_vcombine_128B: {
1694 const TargetRegisterClass *RC = MRI.getRegClass(RD.Reg);
1695 unsigned SubLo = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo);
1696 unsigned SubHi = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi);
1697 BitTracker::RegisterRef RH = MI.getOperand(1), RL = MI.getOperand(2);
1698 Changed = HBS::replaceSubWithSub(RD.Reg, SubLo, RL.Reg, RL.Sub, MRI);
1699 Changed |= HBS::replaceSubWithSub(RD.Reg, SubHi, RH.Reg, RH.Sub, MRI);
1702 case Hexagon::A4_combineir:
1703 case Hexagon::A4_combineri: {
1704 unsigned SrcX = (Opc == Hexagon::A4_combineir) ? 2 : 1;
1705 unsigned Sub = (Opc == Hexagon::A4_combineir) ? Hexagon::isub_lo
1707 BitTracker::RegisterRef RS = MI.getOperand(SrcX);
1708 Changed = HBS::replaceSubWithSub(RD.Reg, Sub, RS.Reg, RS.Sub, MRI);
1715 bool CopyPropagation::processBlock(MachineBasicBlock &B, const RegisterSet&) {
1716 std::vector<MachineInstr*> Instrs;
1717 for (auto I = B.rbegin(), E = B.rend(); I != E; ++I)
1718 Instrs.push_back(&*I);
1720 bool Changed = false;
1721 for (auto I : Instrs) {
1722 unsigned Opc = I->getOpcode();
1723 if (!CopyPropagation::isCopyReg(Opc, true))
1725 Changed |= propagateRegCopy(*I);
1733 // Recognize patterns that can be simplified and replace them with the
1735 // This is by no means complete
1736 class BitSimplification : public Transformation {
1738 BitSimplification(BitTracker &bt, const HexagonInstrInfo &hii,
1739 const HexagonRegisterInfo &hri, MachineRegisterInfo &mri,
1740 MachineFunction &mf)
1741 : Transformation(true), HII(hii), HRI(hri), MRI(mri), MF(mf), BT(bt) {}
1743 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1746 struct RegHalf : public BitTracker::RegisterRef {
1747 bool Low; // Low/High halfword.
1750 bool matchHalf(unsigned SelfR, const BitTracker::RegisterCell &RC,
1751 unsigned B, RegHalf &RH);
1752 bool validateReg(BitTracker::RegisterRef R, unsigned Opc, unsigned OpNum);
1754 bool matchPackhl(unsigned SelfR, const BitTracker::RegisterCell &RC,
1755 BitTracker::RegisterRef &Rs, BitTracker::RegisterRef &Rt);
1756 unsigned getCombineOpcode(bool HLow, bool LLow);
1758 bool genStoreUpperHalf(MachineInstr *MI);
1759 bool genStoreImmediate(MachineInstr *MI);
1760 bool genPackhl(MachineInstr *MI, BitTracker::RegisterRef RD,
1761 const BitTracker::RegisterCell &RC);
1762 bool genExtractHalf(MachineInstr *MI, BitTracker::RegisterRef RD,
1763 const BitTracker::RegisterCell &RC);
1764 bool genCombineHalf(MachineInstr *MI, BitTracker::RegisterRef RD,
1765 const BitTracker::RegisterCell &RC);
1766 bool genExtractLow(MachineInstr *MI, BitTracker::RegisterRef RD,
1767 const BitTracker::RegisterCell &RC);
1768 bool simplifyTstbit(MachineInstr *MI, BitTracker::RegisterRef RD,
1769 const BitTracker::RegisterCell &RC);
1771 const HexagonInstrInfo &HII;
1772 const HexagonRegisterInfo &HRI;
1773 MachineRegisterInfo &MRI;
1774 MachineFunction &MF;
1778 } // end anonymous namespace
1780 // Check if the bits [B..B+16) in register cell RC form a valid halfword,
1781 // i.e. [0..16), [16..32), etc. of some register. If so, return true and
1782 // set the information about the found register in RH.
1783 bool BitSimplification::matchHalf(unsigned SelfR,
1784 const BitTracker::RegisterCell &RC, unsigned B, RegHalf &RH) {
1785 // XXX This could be searching in the set of available registers, in case
1786 // the match is not exact.
1788 // Match 16-bit chunks, where the RC[B..B+15] references exactly one
1789 // register and all the bits B..B+15 match between RC and the register.
1790 // This is meant to match "v1[0-15]", where v1 = { [0]:0 [1-15]:v1... },
1791 // and RC = { [0]:0 [1-15]:v1[1-15]... }.
1794 while (I < B+16 && RC[I].num())
1799 unsigned Reg = RC[I].RefI.Reg;
1800 unsigned P = RC[I].RefI.Pos; // The RefI.Pos will be advanced by I-B.
1803 unsigned Pos = P - (I-B);
1805 if (Reg == 0 || Reg == SelfR) // Don't match "self".
1807 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1812 const BitTracker::RegisterCell &SC = BT.lookup(Reg);
1813 if (Pos+16 > SC.width())
1816 for (unsigned i = 0; i < 16; ++i) {
1817 const BitTracker::BitValue &RV = RC[i+B];
1818 if (RV.Type == BitTracker::BitValue::Ref) {
1819 if (RV.RefI.Reg != Reg)
1821 if (RV.RefI.Pos != i+Pos)
1825 if (RC[i+B] != SC[i+Pos])
1832 Sub = Hexagon::isub_lo;
1836 Sub = Hexagon::isub_lo;
1840 Sub = Hexagon::isub_hi;
1844 Sub = Hexagon::isub_hi;
1854 // If the subregister is not valid with the register, set it to 0.
1855 if (!HBS::getFinalVRegClass(RH, MRI))
1861 bool BitSimplification::validateReg(BitTracker::RegisterRef R, unsigned Opc,
1863 auto *OpRC = HII.getRegClass(HII.get(Opc), OpNum, &HRI, MF);
1864 auto *RRC = HBS::getFinalVRegClass(R, MRI);
1865 return OpRC->hasSubClassEq(RRC);
1868 // Check if RC matches the pattern of a S2_packhl. If so, return true and
1869 // set the inputs Rs and Rt.
1870 bool BitSimplification::matchPackhl(unsigned SelfR,
1871 const BitTracker::RegisterCell &RC, BitTracker::RegisterRef &Rs,
1872 BitTracker::RegisterRef &Rt) {
1873 RegHalf L1, H1, L2, H2;
1875 if (!matchHalf(SelfR, RC, 0, L2) || !matchHalf(SelfR, RC, 16, L1))
1877 if (!matchHalf(SelfR, RC, 32, H2) || !matchHalf(SelfR, RC, 48, H1))
1880 // Rs = H1.L1, Rt = H2.L2
1881 if (H1.Reg != L1.Reg || H1.Sub != L1.Sub || H1.Low || !L1.Low)
1883 if (H2.Reg != L2.Reg || H2.Sub != L2.Sub || H2.Low || !L2.Low)
1891 unsigned BitSimplification::getCombineOpcode(bool HLow, bool LLow) {
1892 return HLow ? LLow ? Hexagon::A2_combine_ll
1893 : Hexagon::A2_combine_lh
1894 : LLow ? Hexagon::A2_combine_hl
1895 : Hexagon::A2_combine_hh;
1898 // If MI stores the upper halfword of a register (potentially obtained via
1899 // shifts or extracts), replace it with a storerf instruction. This could
1900 // cause the "extraction" code to become dead.
1901 bool BitSimplification::genStoreUpperHalf(MachineInstr *MI) {
1902 unsigned Opc = MI->getOpcode();
1903 if (Opc != Hexagon::S2_storerh_io)
1906 MachineOperand &ValOp = MI->getOperand(2);
1907 BitTracker::RegisterRef RS = ValOp;
1908 if (!BT.has(RS.Reg))
1910 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg);
1912 if (!matchHalf(0, RC, 0, H))
1916 MI->setDesc(HII.get(Hexagon::S2_storerf_io));
1917 ValOp.setReg(H.Reg);
1918 ValOp.setSubReg(H.Sub);
1922 // If MI stores a value known at compile-time, and the value is within a range
1923 // that avoids using constant-extenders, replace it with a store-immediate.
1924 bool BitSimplification::genStoreImmediate(MachineInstr *MI) {
1925 unsigned Opc = MI->getOpcode();
1928 case Hexagon::S2_storeri_io:
1930 case Hexagon::S2_storerh_io:
1932 case Hexagon::S2_storerb_io:
1938 // Avoid stores to frame-indices (due to an unknown offset).
1939 if (!MI->getOperand(0).isReg())
1941 MachineOperand &OffOp = MI->getOperand(1);
1945 int64_t Off = OffOp.getImm();
1946 // Offset is u6:a. Sadly, there is no isShiftedUInt(n,x).
1947 if (!isUIntN(6+Align, Off) || (Off & ((1<<Align)-1)))
1950 BitTracker::RegisterRef RS = MI->getOperand(2);
1951 if (!BT.has(RS.Reg))
1953 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg);
1955 if (!HBS::getConst(RC, 0, RC.width(), U))
1958 // Only consider 8-bit values to avoid constant-extenders.
1961 case Hexagon::S2_storerb_io:
1964 case Hexagon::S2_storerh_io:
1967 case Hexagon::S2_storeri_io:
1974 MI->RemoveOperand(2);
1976 case Hexagon::S2_storerb_io:
1977 MI->setDesc(HII.get(Hexagon::S4_storeirb_io));
1979 case Hexagon::S2_storerh_io:
1980 MI->setDesc(HII.get(Hexagon::S4_storeirh_io));
1982 case Hexagon::S2_storeri_io:
1983 MI->setDesc(HII.get(Hexagon::S4_storeiri_io));
1986 MI->addOperand(MachineOperand::CreateImm(V));
1990 // If MI is equivalent o S2_packhl, generate the S2_packhl. MI could be the
1991 // last instruction in a sequence that results in something equivalent to
1992 // the pack-halfwords. The intent is to cause the entire sequence to become
1994 bool BitSimplification::genPackhl(MachineInstr *MI,
1995 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
1996 unsigned Opc = MI->getOpcode();
1997 if (Opc == Hexagon::S2_packhl)
1999 BitTracker::RegisterRef Rs, Rt;
2000 if (!matchPackhl(RD.Reg, RC, Rs, Rt))
2002 if (!validateReg(Rs, Hexagon::S2_packhl, 1) ||
2003 !validateReg(Rt, Hexagon::S2_packhl, 2))
2006 MachineBasicBlock &B = *MI->getParent();
2007 unsigned NewR = MRI.createVirtualRegister(&Hexagon::DoubleRegsRegClass);
2008 DebugLoc DL = MI->getDebugLoc();
2009 auto At = MI->isPHI() ? B.getFirstNonPHI()
2010 : MachineBasicBlock::iterator(MI);
2011 BuildMI(B, At, DL, HII.get(Hexagon::S2_packhl), NewR)
2012 .addReg(Rs.Reg, 0, Rs.Sub)
2013 .addReg(Rt.Reg, 0, Rt.Sub);
2014 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
2015 BT.put(BitTracker::RegisterRef(NewR), RC);
2019 // If MI produces halfword of the input in the low half of the output,
2020 // replace it with zero-extend or extractu.
2021 bool BitSimplification::genExtractHalf(MachineInstr *MI,
2022 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
2024 // Check for halfword in low 16 bits, zeros elsewhere.
2025 if (!matchHalf(RD.Reg, RC, 0, L) || !HBS::isZero(RC, 16, 16))
2028 unsigned Opc = MI->getOpcode();
2029 MachineBasicBlock &B = *MI->getParent();
2030 DebugLoc DL = MI->getDebugLoc();
2032 // Prefer zxth, since zxth can go in any slot, while extractu only in
2035 auto At = MI->isPHI() ? B.getFirstNonPHI()
2036 : MachineBasicBlock::iterator(MI);
2037 if (L.Low && Opc != Hexagon::A2_zxth) {
2038 if (validateReg(L, Hexagon::A2_zxth, 1)) {
2039 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
2040 BuildMI(B, At, DL, HII.get(Hexagon::A2_zxth), NewR)
2041 .addReg(L.Reg, 0, L.Sub);
2043 } else if (!L.Low && Opc != Hexagon::S2_lsr_i_r) {
2044 if (validateReg(L, Hexagon::S2_lsr_i_r, 1)) {
2045 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
2046 BuildMI(B, MI, DL, HII.get(Hexagon::S2_lsr_i_r), NewR)
2047 .addReg(L.Reg, 0, L.Sub)
2053 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
2054 BT.put(BitTracker::RegisterRef(NewR), RC);
2058 // If MI is equivalent to a combine(.L/.H, .L/.H) replace with with the
2060 bool BitSimplification::genCombineHalf(MachineInstr *MI,
2061 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
2063 // Check for combine h/l
2064 if (!matchHalf(RD.Reg, RC, 0, L) || !matchHalf(RD.Reg, RC, 16, H))
2066 // Do nothing if this is just a reg copy.
2067 if (L.Reg == H.Reg && L.Sub == H.Sub && !H.Low && L.Low)
2070 unsigned Opc = MI->getOpcode();
2071 unsigned COpc = getCombineOpcode(H.Low, L.Low);
2074 if (!validateReg(H, COpc, 1) || !validateReg(L, COpc, 2))
2077 MachineBasicBlock &B = *MI->getParent();
2078 DebugLoc DL = MI->getDebugLoc();
2079 unsigned NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
2080 auto At = MI->isPHI() ? B.getFirstNonPHI()
2081 : MachineBasicBlock::iterator(MI);
2082 BuildMI(B, At, DL, HII.get(COpc), NewR)
2083 .addReg(H.Reg, 0, H.Sub)
2084 .addReg(L.Reg, 0, L.Sub);
2085 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
2086 BT.put(BitTracker::RegisterRef(NewR), RC);
2090 // If MI resets high bits of a register and keeps the lower ones, replace it
2091 // with zero-extend byte/half, and-immediate, or extractu, as appropriate.
2092 bool BitSimplification::genExtractLow(MachineInstr *MI,
2093 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
2094 unsigned Opc = MI->getOpcode();
2096 case Hexagon::A2_zxtb:
2097 case Hexagon::A2_zxth:
2098 case Hexagon::S2_extractu:
2101 if (Opc == Hexagon::A2_andir && MI->getOperand(2).isImm()) {
2102 int32_t Imm = MI->getOperand(2).getImm();
2107 if (MI->hasUnmodeledSideEffects() || MI->isInlineAsm())
2109 unsigned W = RC.width();
2110 while (W > 0 && RC[W-1].is(0))
2112 if (W == 0 || W == RC.width())
2114 unsigned NewOpc = (W == 8) ? Hexagon::A2_zxtb
2115 : (W == 16) ? Hexagon::A2_zxth
2116 : (W < 10) ? Hexagon::A2_andir
2117 : Hexagon::S2_extractu;
2118 MachineBasicBlock &B = *MI->getParent();
2119 DebugLoc DL = MI->getDebugLoc();
2121 for (auto &Op : MI->uses()) {
2124 BitTracker::RegisterRef RS = Op;
2125 if (!BT.has(RS.Reg))
2127 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg);
2129 if (!HBS::getSubregMask(RS, BN, BW, MRI))
2131 if (BW < W || !HBS::isEqual(RC, 0, SC, BN, W))
2133 if (!validateReg(RS, NewOpc, 1))
2136 unsigned NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
2137 auto At = MI->isPHI() ? B.getFirstNonPHI()
2138 : MachineBasicBlock::iterator(MI);
2139 auto MIB = BuildMI(B, At, DL, HII.get(NewOpc), NewR)
2140 .addReg(RS.Reg, 0, RS.Sub);
2141 if (NewOpc == Hexagon::A2_andir)
2142 MIB.addImm((1 << W) - 1);
2143 else if (NewOpc == Hexagon::S2_extractu)
2144 MIB.addImm(W).addImm(0);
2145 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
2146 BT.put(BitTracker::RegisterRef(NewR), RC);
2152 // Check for tstbit simplification opportunity, where the bit being checked
2153 // can be tracked back to another register. For example:
2154 // vreg2 = S2_lsr_i_r vreg1, 5
2155 // vreg3 = S2_tstbit_i vreg2, 0
2157 // vreg3 = S2_tstbit_i vreg1, 5
2158 bool BitSimplification::simplifyTstbit(MachineInstr *MI,
2159 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
2160 unsigned Opc = MI->getOpcode();
2161 if (Opc != Hexagon::S2_tstbit_i)
2164 unsigned BN = MI->getOperand(2).getImm();
2165 BitTracker::RegisterRef RS = MI->getOperand(1);
2167 DebugLoc DL = MI->getDebugLoc();
2168 if (!BT.has(RS.Reg) || !HBS::getSubregMask(RS, F, W, MRI))
2170 MachineBasicBlock &B = *MI->getParent();
2171 auto At = MI->isPHI() ? B.getFirstNonPHI()
2172 : MachineBasicBlock::iterator(MI);
2174 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg);
2175 const BitTracker::BitValue &V = SC[F+BN];
2176 if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg != RS.Reg) {
2177 const TargetRegisterClass *TC = MRI.getRegClass(V.RefI.Reg);
2178 // Need to map V.RefI.Reg to a 32-bit register, i.e. if it is
2179 // a double register, need to use a subregister and adjust bit
2181 unsigned P = std::numeric_limits<unsigned>::max();
2182 BitTracker::RegisterRef RR(V.RefI.Reg, 0);
2183 if (TC == &Hexagon::DoubleRegsRegClass) {
2185 RR.Sub = Hexagon::isub_lo;
2188 RR.Sub = Hexagon::isub_hi;
2190 } else if (TC == &Hexagon::IntRegsRegClass) {
2193 if (P != std::numeric_limits<unsigned>::max()) {
2194 unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
2195 BuildMI(B, At, DL, HII.get(Hexagon::S2_tstbit_i), NewR)
2196 .addReg(RR.Reg, 0, RR.Sub)
2198 HBS::replaceReg(RD.Reg, NewR, MRI);
2202 } else if (V.is(0) || V.is(1)) {
2203 unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
2204 unsigned NewOpc = V.is(0) ? Hexagon::PS_false : Hexagon::PS_true;
2205 BuildMI(B, At, DL, HII.get(NewOpc), NewR);
2206 HBS::replaceReg(RD.Reg, NewR, MRI);
2213 bool BitSimplification::processBlock(MachineBasicBlock &B,
2214 const RegisterSet &AVs) {
2215 if (!BT.reached(&B))
2217 bool Changed = false;
2218 RegisterSet AVB = AVs;
2221 for (auto I = B.begin(), E = B.end(); I != E; ++I, AVB.insert(Defs)) {
2222 MachineInstr *MI = &*I;
2224 HBS::getInstrDefs(*MI, Defs);
2226 unsigned Opc = MI->getOpcode();
2227 if (Opc == TargetOpcode::COPY || Opc == TargetOpcode::REG_SEQUENCE)
2230 if (MI->mayStore()) {
2231 bool T = genStoreUpperHalf(MI);
2232 T = T || genStoreImmediate(MI);
2237 if (Defs.count() != 1)
2239 const MachineOperand &Op0 = MI->getOperand(0);
2240 if (!Op0.isReg() || !Op0.isDef())
2242 BitTracker::RegisterRef RD = Op0;
2243 if (!BT.has(RD.Reg))
2245 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI);
2246 const BitTracker::RegisterCell &RC = BT.lookup(RD.Reg);
2248 if (FRC->getID() == Hexagon::DoubleRegsRegClassID) {
2249 bool T = genPackhl(MI, RD, RC);
2254 if (FRC->getID() == Hexagon::IntRegsRegClassID) {
2255 bool T = genExtractHalf(MI, RD, RC);
2256 T = T || genCombineHalf(MI, RD, RC);
2257 T = T || genExtractLow(MI, RD, RC);
2262 if (FRC->getID() == Hexagon::PredRegsRegClassID) {
2263 bool T = simplifyTstbit(MI, RD, RC);
2271 bool HexagonBitSimplify::runOnMachineFunction(MachineFunction &MF) {
2272 if (skipFunction(*MF.getFunction()))
2275 auto &HST = MF.getSubtarget<HexagonSubtarget>();
2276 auto &HRI = *HST.getRegisterInfo();
2277 auto &HII = *HST.getInstrInfo();
2279 MDT = &getAnalysis<MachineDominatorTree>();
2280 MachineRegisterInfo &MRI = MF.getRegInfo();
2283 Changed = DeadCodeElimination(MF, *MDT).run();
2285 const HexagonEvaluator HE(HRI, MRI, HII, MF);
2286 BitTracker BT(HE, MF);
2287 DEBUG(BT.trace(true));
2290 MachineBasicBlock &Entry = MF.front();
2292 RegisterSet AIG; // Available registers for IG.
2293 ConstGeneration ImmG(BT, HII, MRI);
2294 Changed |= visitBlock(Entry, ImmG, AIG);
2296 RegisterSet ARE; // Available registers for RIE.
2297 RedundantInstrElimination RIE(BT, HII, MRI);
2298 bool Ried = visitBlock(Entry, RIE, ARE);
2304 RegisterSet ACG; // Available registers for CG.
2305 CopyGeneration CopyG(BT, HII, HRI, MRI);
2306 Changed |= visitBlock(Entry, CopyG, ACG);
2308 RegisterSet ACP; // Available registers for CP.
2309 CopyPropagation CopyP(HRI, MRI);
2310 Changed |= visitBlock(Entry, CopyP, ACP);
2312 Changed = DeadCodeElimination(MF, *MDT).run() || Changed;
2315 RegisterSet ABS; // Available registers for BS.
2316 BitSimplification BitS(BT, HII, HRI, MRI, MF);
2317 Changed |= visitBlock(Entry, BitS, ABS);
2319 Changed = DeadCodeElimination(MF, *MDT).run() || Changed;
2325 DeadCodeElimination(MF, *MDT).run();
2330 // Recognize loops where the code at the end of the loop matches the code
2331 // before the entry of the loop, and the matching code is such that is can
2332 // be simplified. This pass relies on the bit simplification above and only
2333 // prepares code in a way that can be handled by the bit simplifcation.
2335 // This is the motivating testcase (and explanation):
2338 // loop0(.LBB0_2, r1) // %for.body.preheader
2339 // r5:4 = memd(r0++#8)
2342 // r3 = lsr(r4, #16)
2343 // r7:6 = combine(r5, r5)
2346 // r3 = insert(r5, #16, #16)
2347 // r7:6 = vlsrw(r7:6, #16)
2352 // memh(r2+#6) = r6 # R6 is really R5.H
2357 // memh(r2+#2) = r3 # R3 is really R4.H
2360 // r5:4 = memd(r0++#8)
2362 // { # "Shuffling" code that sets up R3 and R6
2363 // r3 = lsr(r4, #16) # so that their halves can be stored in the
2364 // r7:6 = combine(r5, r5) # next iteration. This could be folded into
2365 // } # the stores if the code was at the beginning
2366 // { # of the loop iteration. Since the same code
2367 // r3 = insert(r5, #16, #16) # precedes the loop, it can actually be moved
2368 // r7:6 = vlsrw(r7:6, #16) # there.
2375 // loop0(.LBB0_2, r1)
2376 // r5:4 = memd(r0++#8)
2381 // memh(r2+#6) = r5.h
2386 // memh(r2+#2) = r4.h
2389 // r5:4 = memd(r0++#8)
2394 FunctionPass *createHexagonLoopRescheduling();
2395 void initializeHexagonLoopReschedulingPass(PassRegistry&);
2397 } // end namespace llvm
2401 class HexagonLoopRescheduling : public MachineFunctionPass {
2405 HexagonLoopRescheduling() : MachineFunctionPass(ID),
2406 HII(nullptr), HRI(nullptr), MRI(nullptr), BTP(nullptr) {
2407 initializeHexagonLoopReschedulingPass(*PassRegistry::getPassRegistry());
2410 bool runOnMachineFunction(MachineFunction &MF) override;
2413 const HexagonInstrInfo *HII;
2414 const HexagonRegisterInfo *HRI;
2415 MachineRegisterInfo *MRI;
2419 LoopCand(MachineBasicBlock *lb, MachineBasicBlock *pb,
2420 MachineBasicBlock *eb) : LB(lb), PB(pb), EB(eb) {}
2421 MachineBasicBlock *LB, *PB, *EB;
2423 typedef std::vector<MachineInstr*> InstrList;
2425 BitTracker::RegisterRef Inp, Out;
2429 PhiInfo(MachineInstr &P, MachineBasicBlock &B);
2431 BitTracker::RegisterRef LR, PR; // Loop Register, Preheader Register
2432 MachineBasicBlock *LB, *PB; // Loop Block, Preheader Block
2435 static unsigned getDefReg(const MachineInstr *MI);
2436 bool isConst(unsigned Reg) const;
2437 bool isBitShuffle(const MachineInstr *MI, unsigned DefR) const;
2438 bool isStoreInput(const MachineInstr *MI, unsigned DefR) const;
2439 bool isShuffleOf(unsigned OutR, unsigned InpR) const;
2440 bool isSameShuffle(unsigned OutR1, unsigned InpR1, unsigned OutR2,
2441 unsigned &InpR2) const;
2442 void moveGroup(InstrGroup &G, MachineBasicBlock &LB, MachineBasicBlock &PB,
2443 MachineBasicBlock::iterator At, unsigned OldPhiR, unsigned NewPredR);
2444 bool processLoop(LoopCand &C);
2447 } // end anonymous namespace
2449 char HexagonLoopRescheduling::ID = 0;
2451 INITIALIZE_PASS(HexagonLoopRescheduling, "hexagon-loop-resched",
2452 "Hexagon Loop Rescheduling", false, false)
2454 HexagonLoopRescheduling::PhiInfo::PhiInfo(MachineInstr &P,
2455 MachineBasicBlock &B) {
2456 DefR = HexagonLoopRescheduling::getDefReg(&P);
2459 for (unsigned i = 1, n = P.getNumOperands(); i < n; i += 2) {
2460 const MachineOperand &OpB = P.getOperand(i+1);
2461 if (OpB.getMBB() == &B) {
2462 LR = P.getOperand(i);
2466 PR = P.getOperand(i);
2470 unsigned HexagonLoopRescheduling::getDefReg(const MachineInstr *MI) {
2472 HBS::getInstrDefs(*MI, Defs);
2473 if (Defs.count() != 1)
2475 return Defs.find_first();
2478 bool HexagonLoopRescheduling::isConst(unsigned Reg) const {
2481 const BitTracker::RegisterCell &RC = BTP->lookup(Reg);
2482 for (unsigned i = 0, w = RC.width(); i < w; ++i) {
2483 const BitTracker::BitValue &V = RC[i];
2484 if (!V.is(0) && !V.is(1))
2490 bool HexagonLoopRescheduling::isBitShuffle(const MachineInstr *MI,
2491 unsigned DefR) const {
2492 unsigned Opc = MI->getOpcode();
2494 case TargetOpcode::COPY:
2495 case Hexagon::S2_lsr_i_r:
2496 case Hexagon::S2_asr_i_r:
2497 case Hexagon::S2_asl_i_r:
2498 case Hexagon::S2_lsr_i_p:
2499 case Hexagon::S2_asr_i_p:
2500 case Hexagon::S2_asl_i_p:
2501 case Hexagon::S2_insert:
2502 case Hexagon::A2_or:
2503 case Hexagon::A2_orp:
2504 case Hexagon::A2_and:
2505 case Hexagon::A2_andp:
2506 case Hexagon::A2_combinew:
2507 case Hexagon::A4_combineri:
2508 case Hexagon::A4_combineir:
2509 case Hexagon::A2_combineii:
2510 case Hexagon::A4_combineii:
2511 case Hexagon::A2_combine_ll:
2512 case Hexagon::A2_combine_lh:
2513 case Hexagon::A2_combine_hl:
2514 case Hexagon::A2_combine_hh:
2520 bool HexagonLoopRescheduling::isStoreInput(const MachineInstr *MI,
2521 unsigned InpR) const {
2522 for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) {
2523 const MachineOperand &Op = MI->getOperand(i);
2526 if (Op.getReg() == InpR)
2532 bool HexagonLoopRescheduling::isShuffleOf(unsigned OutR, unsigned InpR) const {
2533 if (!BTP->has(OutR) || !BTP->has(InpR))
2535 const BitTracker::RegisterCell &OutC = BTP->lookup(OutR);
2536 for (unsigned i = 0, w = OutC.width(); i < w; ++i) {
2537 const BitTracker::BitValue &V = OutC[i];
2538 if (V.Type != BitTracker::BitValue::Ref)
2540 if (V.RefI.Reg != InpR)
2546 bool HexagonLoopRescheduling::isSameShuffle(unsigned OutR1, unsigned InpR1,
2547 unsigned OutR2, unsigned &InpR2) const {
2548 if (!BTP->has(OutR1) || !BTP->has(InpR1) || !BTP->has(OutR2))
2550 const BitTracker::RegisterCell &OutC1 = BTP->lookup(OutR1);
2551 const BitTracker::RegisterCell &OutC2 = BTP->lookup(OutR2);
2552 unsigned W = OutC1.width();
2553 unsigned MatchR = 0;
2554 if (W != OutC2.width())
2556 for (unsigned i = 0; i < W; ++i) {
2557 const BitTracker::BitValue &V1 = OutC1[i], &V2 = OutC2[i];
2558 if (V1.Type != V2.Type || V1.Type == BitTracker::BitValue::One)
2560 if (V1.Type != BitTracker::BitValue::Ref)
2562 if (V1.RefI.Pos != V2.RefI.Pos)
2564 if (V1.RefI.Reg != InpR1)
2566 if (V2.RefI.Reg == 0 || V2.RefI.Reg == OutR2)
2569 MatchR = V2.RefI.Reg;
2570 else if (V2.RefI.Reg != MatchR)
2577 void HexagonLoopRescheduling::moveGroup(InstrGroup &G, MachineBasicBlock &LB,
2578 MachineBasicBlock &PB, MachineBasicBlock::iterator At, unsigned OldPhiR,
2579 unsigned NewPredR) {
2580 DenseMap<unsigned,unsigned> RegMap;
2582 const TargetRegisterClass *PhiRC = MRI->getRegClass(NewPredR);
2583 unsigned PhiR = MRI->createVirtualRegister(PhiRC);
2584 BuildMI(LB, At, At->getDebugLoc(), HII->get(TargetOpcode::PHI), PhiR)
2589 RegMap.insert(std::make_pair(G.Inp.Reg, PhiR));
2591 for (unsigned i = G.Ins.size(); i > 0; --i) {
2592 const MachineInstr *SI = G.Ins[i-1];
2593 unsigned DR = getDefReg(SI);
2594 const TargetRegisterClass *RC = MRI->getRegClass(DR);
2595 unsigned NewDR = MRI->createVirtualRegister(RC);
2596 DebugLoc DL = SI->getDebugLoc();
2598 auto MIB = BuildMI(LB, At, DL, HII->get(SI->getOpcode()), NewDR);
2599 for (unsigned j = 0, m = SI->getNumOperands(); j < m; ++j) {
2600 const MachineOperand &Op = SI->getOperand(j);
2607 unsigned UseR = RegMap[Op.getReg()];
2608 MIB.addReg(UseR, 0, Op.getSubReg());
2610 RegMap.insert(std::make_pair(DR, NewDR));
2613 HBS::replaceReg(OldPhiR, RegMap[G.Out.Reg], *MRI);
2616 bool HexagonLoopRescheduling::processLoop(LoopCand &C) {
2617 DEBUG(dbgs() << "Processing loop in BB#" << C.LB->getNumber() << "\n");
2618 std::vector<PhiInfo> Phis;
2619 for (auto &I : *C.LB) {
2622 unsigned PR = getDefReg(&I);
2625 bool BadUse = false, GoodUse = false;
2626 for (auto UI = MRI->use_begin(PR), UE = MRI->use_end(); UI != UE; ++UI) {
2627 MachineInstr *UseI = UI->getParent();
2628 if (UseI->getParent() != C.LB) {
2632 if (isBitShuffle(UseI, PR) || isStoreInput(UseI, PR))
2635 if (BadUse || !GoodUse)
2638 Phis.push_back(PhiInfo(I, *C.LB));
2642 dbgs() << "Phis: {";
2643 for (auto &I : Phis) {
2644 dbgs() << ' ' << PrintReg(I.DefR, HRI) << "=phi("
2645 << PrintReg(I.PR.Reg, HRI, I.PR.Sub) << ":b" << I.PB->getNumber()
2646 << ',' << PrintReg(I.LR.Reg, HRI, I.LR.Sub) << ":b"
2647 << I.LB->getNumber() << ')';
2655 bool Changed = false;
2658 // Go backwards in the block: for each bit shuffling instruction, check
2659 // if that instruction could potentially be moved to the front of the loop:
2660 // the output of the loop cannot be used in a non-shuffling instruction
2662 for (auto I = C.LB->rbegin(), E = C.LB->rend(); I != E; ++I) {
2663 if (I->isTerminator())
2669 HBS::getInstrDefs(*I, Defs);
2670 if (Defs.count() != 1)
2672 unsigned DefR = Defs.find_first();
2673 if (!TargetRegisterInfo::isVirtualRegister(DefR))
2675 if (!isBitShuffle(&*I, DefR))
2678 bool BadUse = false;
2679 for (auto UI = MRI->use_begin(DefR), UE = MRI->use_end(); UI != UE; ++UI) {
2680 MachineInstr *UseI = UI->getParent();
2681 if (UseI->getParent() == C.LB) {
2682 if (UseI->isPHI()) {
2683 // If the use is in a phi node in this loop, then it should be
2684 // the value corresponding to the back edge.
2685 unsigned Idx = UI.getOperandNo();
2686 if (UseI->getOperand(Idx+1).getMBB() != C.LB)
2689 auto F = find(ShufIns, UseI);
2690 if (F == ShufIns.end())
2694 // There is a use outside of the loop, but there is no epilog block
2695 // suitable for a copy-out.
2696 if (C.EB == nullptr)
2705 ShufIns.push_back(&*I);
2708 // Partition the list of shuffling instructions into instruction groups,
2709 // where each group has to be moved as a whole (i.e. a group is a chain of
2710 // dependent instructions). A group produces a single live output register,
2711 // which is meant to be the input of the loop phi node (although this is
2712 // not checked here yet). It also uses a single register as its input,
2713 // which is some value produced in the loop body. After moving the group
2714 // to the beginning of the loop, that input register would need to be
2715 // the loop-carried register (through a phi node) instead of the (currently
2716 // loop-carried) output register.
2717 typedef std::vector<InstrGroup> InstrGroupList;
2718 InstrGroupList Groups;
2720 for (unsigned i = 0, n = ShufIns.size(); i < n; ++i) {
2721 MachineInstr *SI = ShufIns[i];
2726 G.Ins.push_back(SI);
2727 G.Out.Reg = getDefReg(SI);
2729 HBS::getInstrUses(*SI, Inputs);
2731 for (unsigned j = i+1; j < n; ++j) {
2732 MachineInstr *MI = ShufIns[j];
2736 HBS::getInstrDefs(*MI, Defs);
2737 // If this instruction does not define any pending inputs, skip it.
2738 if (!Defs.intersects(Inputs))
2740 // Otherwise, add it to the current group and remove the inputs that
2741 // are defined by MI.
2742 G.Ins.push_back(MI);
2743 Inputs.remove(Defs);
2744 // Then add all registers used by MI.
2745 HBS::getInstrUses(*MI, Inputs);
2746 ShufIns[j] = nullptr;
2749 // Only add a group if it requires at most one register.
2750 if (Inputs.count() > 1)
2752 auto LoopInpEq = [G] (const PhiInfo &P) -> bool {
2753 return G.Out.Reg == P.LR.Reg;
2755 if (llvm::find_if(Phis, LoopInpEq) == Phis.end())
2758 G.Inp.Reg = Inputs.find_first();
2759 Groups.push_back(G);
2763 for (unsigned i = 0, n = Groups.size(); i < n; ++i) {
2764 InstrGroup &G = Groups[i];
2765 dbgs() << "Group[" << i << "] inp: "
2766 << PrintReg(G.Inp.Reg, HRI, G.Inp.Sub)
2767 << " out: " << PrintReg(G.Out.Reg, HRI, G.Out.Sub) << "\n";
2768 for (unsigned j = 0, m = G.Ins.size(); j < m; ++j)
2769 dbgs() << " " << *G.Ins[j];
2773 for (unsigned i = 0, n = Groups.size(); i < n; ++i) {
2774 InstrGroup &G = Groups[i];
2775 if (!isShuffleOf(G.Out.Reg, G.Inp.Reg))
2777 auto LoopInpEq = [G] (const PhiInfo &P) -> bool {
2778 return G.Out.Reg == P.LR.Reg;
2780 auto F = llvm::find_if(Phis, LoopInpEq);
2781 if (F == Phis.end())
2784 if (!isSameShuffle(G.Out.Reg, G.Inp.Reg, F->PR.Reg, PrehR)) {
2785 const MachineInstr *DefPrehR = MRI->getVRegDef(F->PR.Reg);
2786 unsigned Opc = DefPrehR->getOpcode();
2787 if (Opc != Hexagon::A2_tfrsi && Opc != Hexagon::A2_tfrpi)
2789 if (!DefPrehR->getOperand(1).isImm())
2791 if (DefPrehR->getOperand(1).getImm() != 0)
2793 const TargetRegisterClass *RC = MRI->getRegClass(G.Inp.Reg);
2794 if (RC != MRI->getRegClass(F->PR.Reg)) {
2795 PrehR = MRI->createVirtualRegister(RC);
2796 unsigned TfrI = (RC == &Hexagon::IntRegsRegClass) ? Hexagon::A2_tfrsi
2797 : Hexagon::A2_tfrpi;
2798 auto T = C.PB->getFirstTerminator();
2799 DebugLoc DL = (T != C.PB->end()) ? T->getDebugLoc() : DebugLoc();
2800 BuildMI(*C.PB, T, DL, HII->get(TfrI), PrehR)
2806 // isSameShuffle could match with PrehR being of a wider class than
2807 // G.Inp.Reg, for example if G shuffles the low 32 bits of its input,
2808 // it would match for the input being a 32-bit register, and PrehR
2809 // being a 64-bit register (where the low 32 bits match). This could
2810 // be handled, but for now skip these cases.
2811 if (MRI->getRegClass(PrehR) != MRI->getRegClass(G.Inp.Reg))
2813 moveGroup(G, *F->LB, *F->PB, F->LB->getFirstNonPHI(), F->DefR, PrehR);
2820 bool HexagonLoopRescheduling::runOnMachineFunction(MachineFunction &MF) {
2821 if (skipFunction(*MF.getFunction()))
2824 auto &HST = MF.getSubtarget<HexagonSubtarget>();
2825 HII = HST.getInstrInfo();
2826 HRI = HST.getRegisterInfo();
2827 MRI = &MF.getRegInfo();
2828 const HexagonEvaluator HE(*HRI, *MRI, *HII, MF);
2829 BitTracker BT(HE, MF);
2830 DEBUG(BT.trace(true));
2834 std::vector<LoopCand> Cand;
2836 for (auto &B : MF) {
2837 if (B.pred_size() != 2 || B.succ_size() != 2)
2839 MachineBasicBlock *PB = nullptr;
2840 bool IsLoop = false;
2841 for (auto PI = B.pred_begin(), PE = B.pred_end(); PI != PE; ++PI) {
2850 MachineBasicBlock *EB = nullptr;
2851 for (auto SI = B.succ_begin(), SE = B.succ_end(); SI != SE; ++SI) {
2854 // Set EP to the epilog block, if it has only 1 predecessor (i.e. the
2855 // edge from B to EP is non-critical.
2856 if ((*SI)->pred_size() == 1)
2861 Cand.push_back(LoopCand(&B, PB, EB));
2864 bool Changed = false;
2865 for (auto &C : Cand)
2866 Changed |= processLoop(C);
2871 //===----------------------------------------------------------------------===//
2872 // Public Constructor Functions
2873 //===----------------------------------------------------------------------===//
2875 FunctionPass *llvm::createHexagonLoopRescheduling() {
2876 return new HexagonLoopRescheduling();
2879 FunctionPass *llvm::createHexagonBitSimplify() {
2880 return new HexagonBitSimplify();