1 //===--- HexagonBitSimplify.cpp -------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "hexbit"
12 #include "HexagonBitTracker.h"
13 #include "HexagonTargetMachine.h"
14 #include "llvm/CodeGen/MachineDominators.h"
15 #include "llvm/CodeGen/MachineFunctionPass.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/Passes.h"
19 #include "llvm/Support/Debug.h"
20 #include "llvm/Support/raw_ostream.h"
21 #include "llvm/Target/TargetInstrInfo.h"
22 #include "llvm/Target/TargetMachine.h"
27 void initializeHexagonBitSimplifyPass(PassRegistry& Registry);
28 FunctionPass *createHexagonBitSimplify();
32 // Set of virtual registers, based on BitVector.
33 struct RegisterSet : private BitVector {
34 RegisterSet() : BitVector() {}
35 explicit RegisterSet(unsigned s, bool t = false) : BitVector(s, t) {}
36 RegisterSet(const RegisterSet &RS) : BitVector(RS) {}
38 using BitVector::clear;
39 using BitVector::count;
41 unsigned find_first() const {
42 int First = BitVector::find_first();
48 unsigned find_next(unsigned Prev) const {
49 int Next = BitVector::find_next(v2x(Prev));
55 RegisterSet &insert(unsigned R) {
56 unsigned Idx = v2x(R);
58 return static_cast<RegisterSet&>(BitVector::set(Idx));
60 RegisterSet &remove(unsigned R) {
61 unsigned Idx = v2x(R);
64 return static_cast<RegisterSet&>(BitVector::reset(Idx));
67 RegisterSet &insert(const RegisterSet &Rs) {
68 return static_cast<RegisterSet&>(BitVector::operator|=(Rs));
70 RegisterSet &remove(const RegisterSet &Rs) {
71 return static_cast<RegisterSet&>(BitVector::reset(Rs));
74 reference operator[](unsigned R) {
75 unsigned Idx = v2x(R);
77 return BitVector::operator[](Idx);
79 bool operator[](unsigned R) const {
80 unsigned Idx = v2x(R);
82 return BitVector::operator[](Idx);
84 bool has(unsigned R) const {
85 unsigned Idx = v2x(R);
88 return BitVector::test(Idx);
92 return !BitVector::any();
94 bool includes(const RegisterSet &Rs) const {
95 // A.BitVector::test(B) <=> A-B != {}
96 return !Rs.BitVector::test(*this);
98 bool intersects(const RegisterSet &Rs) const {
99 return BitVector::anyCommon(Rs);
103 void ensure(unsigned Idx) {
105 resize(std::max(Idx+1, 32U));
107 static inline unsigned v2x(unsigned v) {
108 return TargetRegisterInfo::virtReg2Index(v);
110 static inline unsigned x2v(unsigned x) {
111 return TargetRegisterInfo::index2VirtReg(x);
117 PrintRegSet(const RegisterSet &S, const TargetRegisterInfo *RI)
119 friend raw_ostream &operator<< (raw_ostream &OS,
120 const PrintRegSet &P);
122 const RegisterSet &RS;
123 const TargetRegisterInfo *TRI;
126 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P)
127 LLVM_ATTRIBUTE_UNUSED;
128 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) {
130 for (unsigned R = P.RS.find_first(); R; R = P.RS.find_next(R))
131 OS << ' ' << PrintReg(R, P.TRI);
139 class Transformation;
141 class HexagonBitSimplify : public MachineFunctionPass {
144 HexagonBitSimplify() : MachineFunctionPass(ID), MDT(0) {
145 initializeHexagonBitSimplifyPass(*PassRegistry::getPassRegistry());
147 virtual const char *getPassName() const {
148 return "Hexagon bit simplification";
150 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
151 AU.addRequired<MachineDominatorTree>();
152 AU.addPreserved<MachineDominatorTree>();
153 MachineFunctionPass::getAnalysisUsage(AU);
155 virtual bool runOnMachineFunction(MachineFunction &MF);
157 static void getInstrDefs(const MachineInstr &MI, RegisterSet &Defs);
158 static void getInstrUses(const MachineInstr &MI, RegisterSet &Uses);
159 static bool isEqual(const BitTracker::RegisterCell &RC1, uint16_t B1,
160 const BitTracker::RegisterCell &RC2, uint16_t B2, uint16_t W);
161 static bool isZero(const BitTracker::RegisterCell &RC, uint16_t B,
163 static bool getConst(const BitTracker::RegisterCell &RC, uint16_t B,
164 uint16_t W, uint64_t &U);
165 static bool replaceReg(unsigned OldR, unsigned NewR,
166 MachineRegisterInfo &MRI);
167 static bool getSubregMask(const BitTracker::RegisterRef &RR,
168 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI);
169 static bool replaceRegWithSub(unsigned OldR, unsigned NewR,
170 unsigned NewSR, MachineRegisterInfo &MRI);
171 static bool replaceSubWithSub(unsigned OldR, unsigned OldSR,
172 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI);
173 static bool parseRegSequence(const MachineInstr &I,
174 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH);
176 static bool getUsedBitsInStore(unsigned Opc, BitVector &Bits,
178 static bool getUsedBits(unsigned Opc, unsigned OpN, BitVector &Bits,
179 uint16_t Begin, const HexagonInstrInfo &HII);
181 static const TargetRegisterClass *getFinalVRegClass(
182 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI);
183 static bool isTransparentCopy(const BitTracker::RegisterRef &RD,
184 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI);
187 MachineDominatorTree *MDT;
189 bool visitBlock(MachineBasicBlock &B, Transformation &T, RegisterSet &AVs);
192 char HexagonBitSimplify::ID = 0;
193 typedef HexagonBitSimplify HBS;
196 // The purpose of this class is to provide a common facility to traverse
197 // the function top-down or bottom-up via the dominator tree, and keep
198 // track of the available registers.
199 class Transformation {
202 Transformation(bool TD) : TopDown(TD) {}
203 virtual bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) = 0;
204 virtual ~Transformation() {}
208 INITIALIZE_PASS_BEGIN(HexagonBitSimplify, "hexbit",
209 "Hexagon bit simplification", false, false)
210 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
211 INITIALIZE_PASS_END(HexagonBitSimplify, "hexbit",
212 "Hexagon bit simplification", false, false)
215 bool HexagonBitSimplify::visitBlock(MachineBasicBlock &B, Transformation &T,
217 MachineDomTreeNode *N = MDT->getNode(&B);
218 typedef GraphTraits<MachineDomTreeNode*> GTN;
219 bool Changed = false;
222 Changed = T.processBlock(B, AVs);
226 getInstrDefs(I, Defs);
227 RegisterSet NewAVs = AVs;
230 for (auto I = GTN::child_begin(N), E = GTN::child_end(N); I != E; ++I) {
231 MachineBasicBlock *SB = (*I)->getBlock();
232 Changed |= visitBlock(*SB, T, NewAVs);
235 Changed |= T.processBlock(B, AVs);
241 // Utility functions:
243 void HexagonBitSimplify::getInstrDefs(const MachineInstr &MI,
245 for (auto &Op : MI.operands()) {
246 if (!Op.isReg() || !Op.isDef())
248 unsigned R = Op.getReg();
249 if (!TargetRegisterInfo::isVirtualRegister(R))
255 void HexagonBitSimplify::getInstrUses(const MachineInstr &MI,
257 for (auto &Op : MI.operands()) {
258 if (!Op.isReg() || !Op.isUse())
260 unsigned R = Op.getReg();
261 if (!TargetRegisterInfo::isVirtualRegister(R))
267 // Check if all the bits in range [B, E) in both cells are equal.
268 bool HexagonBitSimplify::isEqual(const BitTracker::RegisterCell &RC1,
269 uint16_t B1, const BitTracker::RegisterCell &RC2, uint16_t B2,
271 for (uint16_t i = 0; i < W; ++i) {
272 // If RC1[i] is "bottom", it cannot be proven equal to RC2[i].
273 if (RC1[B1+i].Type == BitTracker::BitValue::Ref && RC1[B1+i].RefI.Reg == 0)
276 if (RC2[B2+i].Type == BitTracker::BitValue::Ref && RC2[B2+i].RefI.Reg == 0)
278 if (RC1[B1+i] != RC2[B2+i])
284 bool HexagonBitSimplify::isZero(const BitTracker::RegisterCell &RC,
285 uint16_t B, uint16_t W) {
286 assert(B < RC.width() && B+W <= RC.width());
287 for (uint16_t i = B; i < B+W; ++i)
294 bool HexagonBitSimplify::getConst(const BitTracker::RegisterCell &RC,
295 uint16_t B, uint16_t W, uint64_t &U) {
296 assert(B < RC.width() && B+W <= RC.width());
298 for (uint16_t i = B+W; i > B; --i) {
299 const BitTracker::BitValue &BV = RC[i-1];
311 bool HexagonBitSimplify::replaceReg(unsigned OldR, unsigned NewR,
312 MachineRegisterInfo &MRI) {
313 if (!TargetRegisterInfo::isVirtualRegister(OldR) ||
314 !TargetRegisterInfo::isVirtualRegister(NewR))
316 auto Begin = MRI.use_begin(OldR), End = MRI.use_end();
318 for (auto I = Begin; I != End; I = NextI) {
319 NextI = std::next(I);
326 bool HexagonBitSimplify::replaceRegWithSub(unsigned OldR, unsigned NewR,
327 unsigned NewSR, MachineRegisterInfo &MRI) {
328 if (!TargetRegisterInfo::isVirtualRegister(OldR) ||
329 !TargetRegisterInfo::isVirtualRegister(NewR))
331 auto Begin = MRI.use_begin(OldR), End = MRI.use_end();
333 for (auto I = Begin; I != End; I = NextI) {
334 NextI = std::next(I);
342 bool HexagonBitSimplify::replaceSubWithSub(unsigned OldR, unsigned OldSR,
343 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI) {
344 if (!TargetRegisterInfo::isVirtualRegister(OldR) ||
345 !TargetRegisterInfo::isVirtualRegister(NewR))
347 auto Begin = MRI.use_begin(OldR), End = MRI.use_end();
349 for (auto I = Begin; I != End; I = NextI) {
350 NextI = std::next(I);
351 if (I->getSubReg() != OldSR)
360 // For a register ref (pair Reg:Sub), set Begin to the position of the LSB
361 // of Sub in Reg, and set Width to the size of Sub in bits. Return true,
362 // if this succeeded, otherwise return false.
363 bool HexagonBitSimplify::getSubregMask(const BitTracker::RegisterRef &RR,
364 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI) {
365 const TargetRegisterClass *RC = MRI.getRegClass(RR.Reg);
366 if (RC == &Hexagon::IntRegsRegClass) {
372 if (RC == &Hexagon::DoubleRegsRegClass) {
378 assert(RR.Sub == Hexagon::subreg_loreg || RR.Sub == Hexagon::subreg_hireg);
380 Begin = (RR.Sub == Hexagon::subreg_loreg ? 0 : 32);
387 // For a REG_SEQUENCE, set SL to the low subregister and SH to the high
389 bool HexagonBitSimplify::parseRegSequence(const MachineInstr &I,
390 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH) {
391 assert(I.getOpcode() == TargetOpcode::REG_SEQUENCE);
392 unsigned Sub1 = I.getOperand(2).getImm(), Sub2 = I.getOperand(4).getImm();
393 assert(Sub1 != Sub2);
394 if (Sub1 == Hexagon::subreg_loreg && Sub2 == Hexagon::subreg_hireg) {
395 SL = I.getOperand(1);
396 SH = I.getOperand(3);
399 if (Sub1 == Hexagon::subreg_hireg && Sub2 == Hexagon::subreg_loreg) {
400 SH = I.getOperand(1);
401 SL = I.getOperand(3);
408 // All stores (except 64-bit stores) take a 32-bit register as the source
409 // of the value to be stored. If the instruction stores into a location
410 // that is shorter than 32 bits, some bits of the source register are not
411 // used. For each store instruction, calculate the set of used bits in
412 // the source register, and set appropriate bits in Bits. Return true if
413 // the bits are calculated, false otherwise.
414 bool HexagonBitSimplify::getUsedBitsInStore(unsigned Opc, BitVector &Bits,
416 using namespace Hexagon;
420 case S2_storerb_io: // memb(Rs32+#s11:0)=Rt32
421 case S2_storerbnew_io: // memb(Rs32+#s11:0)=Nt8.new
422 case S2_pstorerbt_io: // if (Pv4) memb(Rs32+#u6:0)=Rt32
423 case S2_pstorerbf_io: // if (!Pv4) memb(Rs32+#u6:0)=Rt32
424 case S4_pstorerbtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Rt32
425 case S4_pstorerbfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Rt32
426 case S2_pstorerbnewt_io: // if (Pv4) memb(Rs32+#u6:0)=Nt8.new
427 case S2_pstorerbnewf_io: // if (!Pv4) memb(Rs32+#u6:0)=Nt8.new
428 case S4_pstorerbnewtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Nt8.new
429 case S4_pstorerbnewfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Nt8.new
430 case S2_storerb_pi: // memb(Rx32++#s4:0)=Rt32
431 case S2_storerbnew_pi: // memb(Rx32++#s4:0)=Nt8.new
432 case S2_pstorerbt_pi: // if (Pv4) memb(Rx32++#s4:0)=Rt32
433 case S2_pstorerbf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Rt32
434 case S2_pstorerbtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Rt32
435 case S2_pstorerbfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Rt32
436 case S2_pstorerbnewt_pi: // if (Pv4) memb(Rx32++#s4:0)=Nt8.new
437 case S2_pstorerbnewf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Nt8.new
438 case S2_pstorerbnewtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Nt8.new
439 case S2_pstorerbnewfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Nt8.new
440 case S4_storerb_ap: // memb(Re32=#U6)=Rt32
441 case S4_storerbnew_ap: // memb(Re32=#U6)=Nt8.new
442 case S2_storerb_pr: // memb(Rx32++Mu2)=Rt32
443 case S2_storerbnew_pr: // memb(Rx32++Mu2)=Nt8.new
444 case S4_storerb_ur: // memb(Ru32<<#u2+#U6)=Rt32
445 case S4_storerbnew_ur: // memb(Ru32<<#u2+#U6)=Nt8.new
446 case S2_storerb_pbr: // memb(Rx32++Mu2:brev)=Rt32
447 case S2_storerbnew_pbr: // memb(Rx32++Mu2:brev)=Nt8.new
448 case S2_storerb_pci: // memb(Rx32++#s4:0:circ(Mu2))=Rt32
449 case S2_storerbnew_pci: // memb(Rx32++#s4:0:circ(Mu2))=Nt8.new
450 case S2_storerb_pcr: // memb(Rx32++I:circ(Mu2))=Rt32
451 case S2_storerbnew_pcr: // memb(Rx32++I:circ(Mu2))=Nt8.new
452 case S4_storerb_rr: // memb(Rs32+Ru32<<#u2)=Rt32
453 case S4_storerbnew_rr: // memb(Rs32+Ru32<<#u2)=Nt8.new
454 case S4_pstorerbt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Rt32
455 case S4_pstorerbf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Rt32
456 case S4_pstorerbtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32
457 case S4_pstorerbfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32
458 case S4_pstorerbnewt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new
459 case S4_pstorerbnewf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new
460 case S4_pstorerbnewtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new
461 case S4_pstorerbnewfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new
462 case S2_storerbgp: // memb(gp+#u16:0)=Rt32
463 case S2_storerbnewgp: // memb(gp+#u16:0)=Nt8.new
464 case S4_pstorerbt_abs: // if (Pv4) memb(#u6)=Rt32
465 case S4_pstorerbf_abs: // if (!Pv4) memb(#u6)=Rt32
466 case S4_pstorerbtnew_abs: // if (Pv4.new) memb(#u6)=Rt32
467 case S4_pstorerbfnew_abs: // if (!Pv4.new) memb(#u6)=Rt32
468 case S4_pstorerbnewt_abs: // if (Pv4) memb(#u6)=Nt8.new
469 case S4_pstorerbnewf_abs: // if (!Pv4) memb(#u6)=Nt8.new
470 case S4_pstorerbnewtnew_abs: // if (Pv4.new) memb(#u6)=Nt8.new
471 case S4_pstorerbnewfnew_abs: // if (!Pv4.new) memb(#u6)=Nt8.new
472 Bits.set(Begin, Begin+8);
476 case S2_storerh_io: // memh(Rs32+#s11:1)=Rt32
477 case S2_storerhnew_io: // memh(Rs32+#s11:1)=Nt8.new
478 case S2_pstorerht_io: // if (Pv4) memh(Rs32+#u6:1)=Rt32
479 case S2_pstorerhf_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt32
480 case S4_pstorerhtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt32
481 case S4_pstorerhfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt32
482 case S2_pstorerhnewt_io: // if (Pv4) memh(Rs32+#u6:1)=Nt8.new
483 case S2_pstorerhnewf_io: // if (!Pv4) memh(Rs32+#u6:1)=Nt8.new
484 case S4_pstorerhnewtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Nt8.new
485 case S4_pstorerhnewfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Nt8.new
486 case S2_storerh_pi: // memh(Rx32++#s4:1)=Rt32
487 case S2_storerhnew_pi: // memh(Rx32++#s4:1)=Nt8.new
488 case S2_pstorerht_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt32
489 case S2_pstorerhf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt32
490 case S2_pstorerhtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt32
491 case S2_pstorerhfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt32
492 case S2_pstorerhnewt_pi: // if (Pv4) memh(Rx32++#s4:1)=Nt8.new
493 case S2_pstorerhnewf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Nt8.new
494 case S2_pstorerhnewtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Nt8.new
495 case S2_pstorerhnewfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Nt8.new
496 case S4_storerh_ap: // memh(Re32=#U6)=Rt32
497 case S4_storerhnew_ap: // memh(Re32=#U6)=Nt8.new
498 case S2_storerh_pr: // memh(Rx32++Mu2)=Rt32
499 case S2_storerhnew_pr: // memh(Rx32++Mu2)=Nt8.new
500 case S4_storerh_ur: // memh(Ru32<<#u2+#U6)=Rt32
501 case S4_storerhnew_ur: // memh(Ru32<<#u2+#U6)=Nt8.new
502 case S2_storerh_pbr: // memh(Rx32++Mu2:brev)=Rt32
503 case S2_storerhnew_pbr: // memh(Rx32++Mu2:brev)=Nt8.new
504 case S2_storerh_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt32
505 case S2_storerhnew_pci: // memh(Rx32++#s4:1:circ(Mu2))=Nt8.new
506 case S2_storerh_pcr: // memh(Rx32++I:circ(Mu2))=Rt32
507 case S2_storerhnew_pcr: // memh(Rx32++I:circ(Mu2))=Nt8.new
508 case S4_storerh_rr: // memh(Rs32+Ru32<<#u2)=Rt32
509 case S4_pstorerht_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt32
510 case S4_pstorerhf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt32
511 case S4_pstorerhtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32
512 case S4_pstorerhfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32
513 case S4_storerhnew_rr: // memh(Rs32+Ru32<<#u2)=Nt8.new
514 case S4_pstorerhnewt_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new
515 case S4_pstorerhnewf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new
516 case S4_pstorerhnewtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new
517 case S4_pstorerhnewfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new
518 case S2_storerhgp: // memh(gp+#u16:1)=Rt32
519 case S2_storerhnewgp: // memh(gp+#u16:1)=Nt8.new
520 case S4_pstorerht_abs: // if (Pv4) memh(#u6)=Rt32
521 case S4_pstorerhf_abs: // if (!Pv4) memh(#u6)=Rt32
522 case S4_pstorerhtnew_abs: // if (Pv4.new) memh(#u6)=Rt32
523 case S4_pstorerhfnew_abs: // if (!Pv4.new) memh(#u6)=Rt32
524 case S4_pstorerhnewt_abs: // if (Pv4) memh(#u6)=Nt8.new
525 case S4_pstorerhnewf_abs: // if (!Pv4) memh(#u6)=Nt8.new
526 case S4_pstorerhnewtnew_abs: // if (Pv4.new) memh(#u6)=Nt8.new
527 case S4_pstorerhnewfnew_abs: // if (!Pv4.new) memh(#u6)=Nt8.new
528 Bits.set(Begin, Begin+16);
532 case S2_storerf_io: // memh(Rs32+#s11:1)=Rt.H32
533 case S2_pstorerft_io: // if (Pv4) memh(Rs32+#u6:1)=Rt.H32
534 case S2_pstorerff_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt.H32
535 case S4_pstorerftnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt.H32
536 case S4_pstorerffnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt.H32
537 case S2_storerf_pi: // memh(Rx32++#s4:1)=Rt.H32
538 case S2_pstorerft_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt.H32
539 case S2_pstorerff_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt.H32
540 case S2_pstorerftnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt.H32
541 case S2_pstorerffnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt.H32
542 case S4_storerf_ap: // memh(Re32=#U6)=Rt.H32
543 case S2_storerf_pr: // memh(Rx32++Mu2)=Rt.H32
544 case S4_storerf_ur: // memh(Ru32<<#u2+#U6)=Rt.H32
545 case S2_storerf_pbr: // memh(Rx32++Mu2:brev)=Rt.H32
546 case S2_storerf_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt.H32
547 case S2_storerf_pcr: // memh(Rx32++I:circ(Mu2))=Rt.H32
548 case S4_storerf_rr: // memh(Rs32+Ru32<<#u2)=Rt.H32
549 case S4_pstorerft_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32
550 case S4_pstorerff_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32
551 case S4_pstorerftnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32
552 case S4_pstorerffnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32
553 case S2_storerfgp: // memh(gp+#u16:1)=Rt.H32
554 case S4_pstorerft_abs: // if (Pv4) memh(#u6)=Rt.H32
555 case S4_pstorerff_abs: // if (!Pv4) memh(#u6)=Rt.H32
556 case S4_pstorerftnew_abs: // if (Pv4.new) memh(#u6)=Rt.H32
557 case S4_pstorerffnew_abs: // if (!Pv4.new) memh(#u6)=Rt.H32
558 Bits.set(Begin+16, Begin+32);
566 // For an instruction with opcode Opc, calculate the set of bits that it
567 // uses in a register in operand OpN. This only calculates the set of used
568 // bits for cases where it does not depend on any operands (as is the case
569 // in shifts, for example). For concrete instructions from a program, the
570 // operand may be a subregister of a larger register, while Bits would
571 // correspond to the larger register in its entirety. Because of that,
572 // the parameter Begin can be used to indicate which bit of Bits should be
573 // considered the LSB of of the operand.
574 bool HexagonBitSimplify::getUsedBits(unsigned Opc, unsigned OpN,
575 BitVector &Bits, uint16_t Begin, const HexagonInstrInfo &HII) {
576 using namespace Hexagon;
578 const MCInstrDesc &D = HII.get(Opc);
580 if (OpN == D.getNumOperands()-1)
581 return getUsedBitsInStore(Opc, Bits, Begin);
586 // One register source. Used bits: R1[0-7].
593 Bits.set(Begin, Begin+8);
598 // One register source. Used bits: R1[0-15].
606 Bits.set(Begin, Begin+16);
611 // One register source. Used bits: R1[16-31].
614 Bits.set(Begin+16, Begin+32);
619 // Two register sources. Used bits: R1[0-7], R2[0-7].
624 Bits.set(Begin, Begin+8);
629 // Two register sources. Used bits: R1[0-15], R2[0-15].
634 case A2_addh_h16_sat_ll:
636 case A2_addh_l16_sat_ll:
639 case A2_subh_h16_sat_ll:
641 case A2_subh_l16_sat_ll:
642 case M2_mpy_acc_ll_s0:
643 case M2_mpy_acc_ll_s1:
644 case M2_mpy_acc_sat_ll_s0:
645 case M2_mpy_acc_sat_ll_s1:
648 case M2_mpy_nac_ll_s0:
649 case M2_mpy_nac_ll_s1:
650 case M2_mpy_nac_sat_ll_s0:
651 case M2_mpy_nac_sat_ll_s1:
652 case M2_mpy_rnd_ll_s0:
653 case M2_mpy_rnd_ll_s1:
654 case M2_mpy_sat_ll_s0:
655 case M2_mpy_sat_ll_s1:
656 case M2_mpy_sat_rnd_ll_s0:
657 case M2_mpy_sat_rnd_ll_s1:
658 case M2_mpyd_acc_ll_s0:
659 case M2_mpyd_acc_ll_s1:
662 case M2_mpyd_nac_ll_s0:
663 case M2_mpyd_nac_ll_s1:
664 case M2_mpyd_rnd_ll_s0:
665 case M2_mpyd_rnd_ll_s1:
666 case M2_mpyu_acc_ll_s0:
667 case M2_mpyu_acc_ll_s1:
670 case M2_mpyu_nac_ll_s0:
671 case M2_mpyu_nac_ll_s1:
672 case M2_mpyud_acc_ll_s0:
673 case M2_mpyud_acc_ll_s1:
676 case M2_mpyud_nac_ll_s0:
677 case M2_mpyud_nac_ll_s1:
678 if (OpN == 1 || OpN == 2) {
679 Bits.set(Begin, Begin+16);
684 // Two register sources. Used bits: R1[0-15], R2[16-31].
686 case A2_addh_h16_sat_lh:
689 case A2_subh_h16_sat_lh:
690 case M2_mpy_acc_lh_s0:
691 case M2_mpy_acc_lh_s1:
692 case M2_mpy_acc_sat_lh_s0:
693 case M2_mpy_acc_sat_lh_s1:
696 case M2_mpy_nac_lh_s0:
697 case M2_mpy_nac_lh_s1:
698 case M2_mpy_nac_sat_lh_s0:
699 case M2_mpy_nac_sat_lh_s1:
700 case M2_mpy_rnd_lh_s0:
701 case M2_mpy_rnd_lh_s1:
702 case M2_mpy_sat_lh_s0:
703 case M2_mpy_sat_lh_s1:
704 case M2_mpy_sat_rnd_lh_s0:
705 case M2_mpy_sat_rnd_lh_s1:
706 case M2_mpyd_acc_lh_s0:
707 case M2_mpyd_acc_lh_s1:
710 case M2_mpyd_nac_lh_s0:
711 case M2_mpyd_nac_lh_s1:
712 case M2_mpyd_rnd_lh_s0:
713 case M2_mpyd_rnd_lh_s1:
714 case M2_mpyu_acc_lh_s0:
715 case M2_mpyu_acc_lh_s1:
718 case M2_mpyu_nac_lh_s0:
719 case M2_mpyu_nac_lh_s1:
720 case M2_mpyud_acc_lh_s0:
721 case M2_mpyud_acc_lh_s1:
724 case M2_mpyud_nac_lh_s0:
725 case M2_mpyud_nac_lh_s1:
726 // These four are actually LH.
728 case A2_addh_l16_sat_hl:
730 case A2_subh_l16_sat_hl:
732 Bits.set(Begin, Begin+16);
736 Bits.set(Begin+16, Begin+32);
741 // Two register sources, used bits: R1[16-31], R2[0-15].
743 case A2_addh_h16_sat_hl:
746 case A2_subh_h16_sat_hl:
747 case M2_mpy_acc_hl_s0:
748 case M2_mpy_acc_hl_s1:
749 case M2_mpy_acc_sat_hl_s0:
750 case M2_mpy_acc_sat_hl_s1:
753 case M2_mpy_nac_hl_s0:
754 case M2_mpy_nac_hl_s1:
755 case M2_mpy_nac_sat_hl_s0:
756 case M2_mpy_nac_sat_hl_s1:
757 case M2_mpy_rnd_hl_s0:
758 case M2_mpy_rnd_hl_s1:
759 case M2_mpy_sat_hl_s0:
760 case M2_mpy_sat_hl_s1:
761 case M2_mpy_sat_rnd_hl_s0:
762 case M2_mpy_sat_rnd_hl_s1:
763 case M2_mpyd_acc_hl_s0:
764 case M2_mpyd_acc_hl_s1:
767 case M2_mpyd_nac_hl_s0:
768 case M2_mpyd_nac_hl_s1:
769 case M2_mpyd_rnd_hl_s0:
770 case M2_mpyd_rnd_hl_s1:
771 case M2_mpyu_acc_hl_s0:
772 case M2_mpyu_acc_hl_s1:
775 case M2_mpyu_nac_hl_s0:
776 case M2_mpyu_nac_hl_s1:
777 case M2_mpyud_acc_hl_s0:
778 case M2_mpyud_acc_hl_s1:
781 case M2_mpyud_nac_hl_s0:
782 case M2_mpyud_nac_hl_s1:
784 Bits.set(Begin+16, Begin+32);
788 Bits.set(Begin, Begin+16);
793 // Two register sources, used bits: R1[16-31], R2[16-31].
795 case A2_addh_h16_sat_hh:
798 case A2_subh_h16_sat_hh:
799 case M2_mpy_acc_hh_s0:
800 case M2_mpy_acc_hh_s1:
801 case M2_mpy_acc_sat_hh_s0:
802 case M2_mpy_acc_sat_hh_s1:
805 case M2_mpy_nac_hh_s0:
806 case M2_mpy_nac_hh_s1:
807 case M2_mpy_nac_sat_hh_s0:
808 case M2_mpy_nac_sat_hh_s1:
809 case M2_mpy_rnd_hh_s0:
810 case M2_mpy_rnd_hh_s1:
811 case M2_mpy_sat_hh_s0:
812 case M2_mpy_sat_hh_s1:
813 case M2_mpy_sat_rnd_hh_s0:
814 case M2_mpy_sat_rnd_hh_s1:
815 case M2_mpyd_acc_hh_s0:
816 case M2_mpyd_acc_hh_s1:
819 case M2_mpyd_nac_hh_s0:
820 case M2_mpyd_nac_hh_s1:
821 case M2_mpyd_rnd_hh_s0:
822 case M2_mpyd_rnd_hh_s1:
823 case M2_mpyu_acc_hh_s0:
824 case M2_mpyu_acc_hh_s1:
827 case M2_mpyu_nac_hh_s0:
828 case M2_mpyu_nac_hh_s1:
829 case M2_mpyud_acc_hh_s0:
830 case M2_mpyud_acc_hh_s1:
833 case M2_mpyud_nac_hh_s0:
834 case M2_mpyud_nac_hh_s1:
835 if (OpN == 1 || OpN == 2) {
836 Bits.set(Begin+16, Begin+32);
846 // Calculate the register class that matches Reg:Sub. For example, if
847 // vreg1 is a double register, then vreg1:subreg_hireg would match "int"
849 const TargetRegisterClass *HexagonBitSimplify::getFinalVRegClass(
850 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI) {
851 if (!TargetRegisterInfo::isVirtualRegister(RR.Reg))
853 auto *RC = MRI.getRegClass(RR.Reg);
857 auto VerifySR = [] (unsigned Sub) -> void {
858 assert(Sub == Hexagon::subreg_hireg || Sub == Hexagon::subreg_loreg);
861 switch (RC->getID()) {
862 case Hexagon::DoubleRegsRegClassID:
864 return &Hexagon::IntRegsRegClass;
865 case Hexagon::VecDblRegsRegClassID:
867 return &Hexagon::VectorRegsRegClass;
868 case Hexagon::VecDblRegs128BRegClassID:
870 return &Hexagon::VectorRegs128BRegClass;
876 // Check if RD could be replaced with RS at any possible use of RD.
877 // For example a predicate register cannot be replaced with a integer
878 // register, but a 64-bit register with a subregister can be replaced
879 // with a 32-bit register.
880 bool HexagonBitSimplify::isTransparentCopy(const BitTracker::RegisterRef &RD,
881 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI) {
882 if (!TargetRegisterInfo::isVirtualRegister(RD.Reg) ||
883 !TargetRegisterInfo::isVirtualRegister(RS.Reg))
885 // Return false if one (or both) classes are nullptr.
886 auto *DRC = getFinalVRegClass(RD, MRI);
890 return DRC == getFinalVRegClass(RS, MRI);
895 // Dead code elimination
898 class DeadCodeElimination {
900 DeadCodeElimination(MachineFunction &mf, MachineDominatorTree &mdt)
901 : MF(mf), HII(*MF.getSubtarget<HexagonSubtarget>().getInstrInfo()),
902 MDT(mdt), MRI(mf.getRegInfo()) {}
905 return runOnNode(MDT.getRootNode());
909 bool isDead(unsigned R) const;
910 bool runOnNode(MachineDomTreeNode *N);
913 const HexagonInstrInfo &HII;
914 MachineDominatorTree &MDT;
915 MachineRegisterInfo &MRI;
920 bool DeadCodeElimination::isDead(unsigned R) const {
921 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) {
922 MachineInstr *UseI = I->getParent();
923 if (UseI->isDebugValue())
926 assert(!UseI->getOperand(0).getSubReg());
927 unsigned DR = UseI->getOperand(0).getReg();
937 bool DeadCodeElimination::runOnNode(MachineDomTreeNode *N) {
938 bool Changed = false;
939 typedef GraphTraits<MachineDomTreeNode*> GTN;
940 for (auto I = GTN::child_begin(N), E = GTN::child_end(N); I != E; ++I)
941 Changed |= runOnNode(*I);
943 MachineBasicBlock *B = N->getBlock();
944 std::vector<MachineInstr*> Instrs;
945 for (auto I = B->rbegin(), E = B->rend(); I != E; ++I)
946 Instrs.push_back(&*I);
948 for (auto MI : Instrs) {
949 unsigned Opc = MI->getOpcode();
950 // Do not touch lifetime markers. This is why the target-independent DCE
952 if (Opc == TargetOpcode::LIFETIME_START ||
953 Opc == TargetOpcode::LIFETIME_END)
956 if (MI->isInlineAsm())
958 // Delete PHIs if possible.
959 if (!MI->isPHI() && !MI->isSafeToMove(nullptr, Store))
963 SmallVector<unsigned,2> Regs;
964 for (auto &Op : MI->operands()) {
965 if (!Op.isReg() || !Op.isDef())
967 unsigned R = Op.getReg();
968 if (!TargetRegisterInfo::isVirtualRegister(R) || !isDead(R)) {
978 for (unsigned i = 0, n = Regs.size(); i != n; ++i)
979 MRI.markUsesInDebugValueAsUndef(Regs[i]);
988 // Eliminate redundant instructions
990 // This transformation will identify instructions where the output register
991 // is the same as one of its input registers. This only works on instructions
992 // that define a single register (unlike post-increment loads, for example).
993 // The equality check is actually more detailed: the code calculates which
994 // bits of the output are used, and only compares these bits with the input
996 // If the output matches an input, the instruction is replaced with COPY.
997 // The copies will be removed by another transformation.
999 class RedundantInstrElimination : public Transformation {
1001 RedundantInstrElimination(BitTracker &bt, const HexagonInstrInfo &hii,
1002 MachineRegisterInfo &mri)
1003 : Transformation(true), HII(hii), MRI(mri), BT(bt) {}
1004 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1006 bool isLossyShiftLeft(const MachineInstr &MI, unsigned OpN,
1007 unsigned &LostB, unsigned &LostE);
1008 bool isLossyShiftRight(const MachineInstr &MI, unsigned OpN,
1009 unsigned &LostB, unsigned &LostE);
1010 bool computeUsedBits(unsigned Reg, BitVector &Bits);
1011 bool computeUsedBits(const MachineInstr &MI, unsigned OpN, BitVector &Bits,
1013 bool usedBitsEqual(BitTracker::RegisterRef RD, BitTracker::RegisterRef RS);
1015 const HexagonInstrInfo &HII;
1016 MachineRegisterInfo &MRI;
1022 // Check if the instruction is a lossy shift left, where the input being
1023 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range
1024 // of bit indices that are lost.
1025 bool RedundantInstrElimination::isLossyShiftLeft(const MachineInstr &MI,
1026 unsigned OpN, unsigned &LostB, unsigned &LostE) {
1027 using namespace Hexagon;
1028 unsigned Opc = MI.getOpcode();
1029 unsigned ImN, RegN, Width;
1036 case S2_asl_i_p_acc:
1037 case S2_asl_i_p_and:
1038 case S2_asl_i_p_nac:
1040 case S2_asl_i_p_xacc:
1050 case S2_addasl_rrri:
1051 case S4_andi_asl_ri:
1053 case S4_addi_asl_ri:
1054 case S4_subi_asl_ri:
1055 case S2_asl_i_r_acc:
1056 case S2_asl_i_r_and:
1057 case S2_asl_i_r_nac:
1059 case S2_asl_i_r_sat:
1060 case S2_asl_i_r_xacc:
1072 assert(MI.getOperand(ImN).isImm());
1073 unsigned S = MI.getOperand(ImN).getImm();
1082 // Check if the instruction is a lossy shift right, where the input being
1083 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range
1084 // of bit indices that are lost.
1085 bool RedundantInstrElimination::isLossyShiftRight(const MachineInstr &MI,
1086 unsigned OpN, unsigned &LostB, unsigned &LostE) {
1087 using namespace Hexagon;
1088 unsigned Opc = MI.getOpcode();
1096 case S2_asr_i_p_acc:
1097 case S2_asr_i_p_and:
1098 case S2_asr_i_p_nac:
1100 case S2_lsr_i_p_acc:
1101 case S2_lsr_i_p_and:
1102 case S2_lsr_i_p_nac:
1104 case S2_lsr_i_p_xacc:
1113 case S4_andi_lsr_ri:
1115 case S4_addi_lsr_ri:
1116 case S4_subi_lsr_ri:
1117 case S2_asr_i_r_acc:
1118 case S2_asr_i_r_and:
1119 case S2_asr_i_r_nac:
1121 case S2_lsr_i_r_acc:
1122 case S2_lsr_i_r_and:
1123 case S2_lsr_i_r_nac:
1125 case S2_lsr_i_r_xacc:
1137 assert(MI.getOperand(ImN).isImm());
1138 unsigned S = MI.getOperand(ImN).getImm();
1145 // Calculate the bit vector that corresponds to the used bits of register Reg.
1146 // The vector Bits has the same size, as the size of Reg in bits. If the cal-
1147 // culation fails (i.e. the used bits are unknown), it returns false. Other-
1148 // wise, it returns true and sets the corresponding bits in Bits.
1149 bool RedundantInstrElimination::computeUsedBits(unsigned Reg, BitVector &Bits) {
1150 BitVector Used(Bits.size());
1151 RegisterSet Visited;
1152 std::vector<unsigned> Pending;
1153 Pending.push_back(Reg);
1155 for (unsigned i = 0; i < Pending.size(); ++i) {
1156 unsigned R = Pending[i];
1160 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) {
1161 BitTracker::RegisterRef UR = *I;
1163 if (!HBS::getSubregMask(UR, B, W, MRI))
1165 MachineInstr &UseI = *I->getParent();
1166 if (UseI.isPHI() || UseI.isCopy()) {
1167 unsigned DefR = UseI.getOperand(0).getReg();
1168 if (!TargetRegisterInfo::isVirtualRegister(DefR))
1170 Pending.push_back(DefR);
1172 if (!computeUsedBits(UseI, I.getOperandNo(), Used, B))
1182 // Calculate the bits used by instruction MI in a register in operand OpN.
1183 // Return true/false if the calculation succeeds/fails. If is succeeds, set
1184 // used bits in Bits. This function does not reset any bits in Bits, so
1185 // subsequent calls over different instructions will result in the union
1186 // of the used bits in all these instructions.
1187 // The register in question may be used with a sub-register, whereas Bits
1188 // holds the bits for the entire register. To keep track of that, the
1189 // argument Begin indicates where in Bits is the lowest-significant bit
1190 // of the register used in operand OpN. For example, in instruction:
1191 // vreg1 = S2_lsr_i_r vreg2:subreg_hireg, 10
1192 // the operand 1 is a 32-bit register, which happens to be a subregister
1193 // of the 64-bit register vreg2, and that subregister starts at position 32.
1194 // In this case Begin=32, since Bits[32] would be the lowest-significant bit
1195 // of vreg2:subreg_hireg.
1196 bool RedundantInstrElimination::computeUsedBits(const MachineInstr &MI,
1197 unsigned OpN, BitVector &Bits, uint16_t Begin) {
1198 unsigned Opc = MI.getOpcode();
1199 BitVector T(Bits.size());
1200 bool GotBits = HBS::getUsedBits(Opc, OpN, T, Begin, HII);
1201 // Even if we don't have bits yet, we could still provide some information
1202 // if the instruction is a lossy shift: the lost bits will be marked as
1205 if (isLossyShiftLeft(MI, OpN, LB, LE) || isLossyShiftRight(MI, OpN, LB, LE)) {
1206 assert(MI.getOperand(OpN).isReg());
1207 BitTracker::RegisterRef RR = MI.getOperand(OpN);
1208 const TargetRegisterClass *RC = HBS::getFinalVRegClass(RR, MRI);
1209 uint16_t Width = RC->getSize()*8;
1212 T.set(Begin, Begin+Width);
1213 assert(LB <= LE && LB < Width && LE <= Width);
1214 T.reset(Begin+LB, Begin+LE);
1223 // Calculates the used bits in RD ("defined register"), and checks if these
1224 // bits in RS ("used register") and RD are identical.
1225 bool RedundantInstrElimination::usedBitsEqual(BitTracker::RegisterRef RD,
1226 BitTracker::RegisterRef RS) {
1227 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg);
1228 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg);
1231 if (!HBS::getSubregMask(RD, DB, DW, MRI))
1234 if (!HBS::getSubregMask(RS, SB, SW, MRI))
1239 BitVector Used(DC.width());
1240 if (!computeUsedBits(RD.Reg, Used))
1243 for (unsigned i = 0; i != DW; ++i)
1244 if (Used[i+DB] && DC[DB+i] != SC[SB+i])
1250 bool RedundantInstrElimination::processBlock(MachineBasicBlock &B,
1251 const RegisterSet&) {
1252 bool Changed = false;
1254 for (auto I = B.begin(), E = B.end(), NextI = I; I != E; ++I) {
1255 NextI = std::next(I);
1256 MachineInstr *MI = &*I;
1258 if (MI->getOpcode() == TargetOpcode::COPY)
1260 if (MI->hasUnmodeledSideEffects() || MI->isInlineAsm())
1262 unsigned NumD = MI->getDesc().getNumDefs();
1266 BitTracker::RegisterRef RD = MI->getOperand(0);
1267 if (!BT.has(RD.Reg))
1269 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg);
1270 auto At = MI->isPHI() ? B.getFirstNonPHI()
1271 : MachineBasicBlock::iterator(MI);
1273 // Find a source operand that is equal to the result.
1274 for (auto &Op : MI->uses()) {
1277 BitTracker::RegisterRef RS = Op;
1278 if (!BT.has(RS.Reg))
1280 if (!HBS::isTransparentCopy(RD, RS, MRI))
1284 if (!HBS::getSubregMask(RS, BN, BW, MRI))
1287 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg);
1288 if (!usedBitsEqual(RD, RS) && !HBS::isEqual(DC, 0, SC, BN, BW))
1291 // If found, replace the instruction with a COPY.
1292 const DebugLoc &DL = MI->getDebugLoc();
1293 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI);
1294 unsigned NewR = MRI.createVirtualRegister(FRC);
1295 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR)
1296 .addReg(RS.Reg, 0, RS.Sub);
1297 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
1298 BT.put(BitTracker::RegisterRef(NewR), SC);
1311 // Recognize instructions that produce constant values known at compile-time.
1312 // Replace them with register definitions that load these constants directly.
1314 class ConstGeneration : public Transformation {
1316 ConstGeneration(BitTracker &bt, const HexagonInstrInfo &hii,
1317 MachineRegisterInfo &mri)
1318 : Transformation(true), HII(hii), MRI(mri), BT(bt) {}
1319 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1321 bool isTfrConst(const MachineInstr &MI) const;
1322 bool isConst(unsigned R, int64_t &V) const;
1323 unsigned genTfrConst(const TargetRegisterClass *RC, int64_t C,
1324 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL);
1326 const HexagonInstrInfo &HII;
1327 MachineRegisterInfo &MRI;
1332 bool ConstGeneration::isConst(unsigned R, int64_t &C) const {
1335 const BitTracker::RegisterCell &RC = BT.lookup(R);
1337 for (unsigned i = RC.width(); i > 0; --i) {
1338 const BitTracker::BitValue &V = RC[i-1];
1349 bool ConstGeneration::isTfrConst(const MachineInstr &MI) const {
1350 unsigned Opc = MI.getOpcode();
1352 case Hexagon::A2_combineii:
1353 case Hexagon::A4_combineii:
1354 case Hexagon::A2_tfrsi:
1355 case Hexagon::A2_tfrpi:
1356 case Hexagon::TFR_PdTrue:
1357 case Hexagon::TFR_PdFalse:
1358 case Hexagon::CONST32_Int_Real:
1359 case Hexagon::CONST64_Int_Real:
1366 // Generate a transfer-immediate instruction that is appropriate for the
1367 // register class and the actual value being transferred.
1368 unsigned ConstGeneration::genTfrConst(const TargetRegisterClass *RC, int64_t C,
1369 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL) {
1370 unsigned Reg = MRI.createVirtualRegister(RC);
1371 if (RC == &Hexagon::IntRegsRegClass) {
1372 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrsi), Reg)
1373 .addImm(int32_t(C));
1377 if (RC == &Hexagon::DoubleRegsRegClass) {
1379 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrpi), Reg)
1384 unsigned Lo = Lo_32(C), Hi = Hi_32(C);
1385 if (isInt<8>(Lo) || isInt<8>(Hi)) {
1386 unsigned Opc = isInt<8>(Lo) ? Hexagon::A2_combineii
1387 : Hexagon::A4_combineii;
1388 BuildMI(B, At, DL, HII.get(Opc), Reg)
1389 .addImm(int32_t(Hi))
1390 .addImm(int32_t(Lo));
1394 BuildMI(B, At, DL, HII.get(Hexagon::CONST64_Int_Real), Reg)
1399 if (RC == &Hexagon::PredRegsRegClass) {
1402 Opc = Hexagon::TFR_PdFalse;
1403 else if ((C & 0xFF) == 0xFF)
1404 Opc = Hexagon::TFR_PdTrue;
1407 BuildMI(B, At, DL, HII.get(Opc), Reg);
1415 bool ConstGeneration::processBlock(MachineBasicBlock &B, const RegisterSet&) {
1416 bool Changed = false;
1419 for (auto I = B.begin(), E = B.end(); I != E; ++I) {
1423 HBS::getInstrDefs(*I, Defs);
1424 if (Defs.count() != 1)
1426 unsigned DR = Defs.find_first();
1427 if (!TargetRegisterInfo::isVirtualRegister(DR))
1430 if (isConst(DR, C)) {
1431 DebugLoc DL = I->getDebugLoc();
1432 auto At = I->isPHI() ? B.getFirstNonPHI() : I;
1433 unsigned ImmReg = genTfrConst(MRI.getRegClass(DR), C, B, At, DL);
1435 HBS::replaceReg(DR, ImmReg, MRI);
1436 BT.put(ImmReg, BT.lookup(DR));
1448 // Identify pairs of available registers which hold identical values.
1449 // In such cases, only one of them needs to be calculated, the other one
1450 // will be defined as a copy of the first.
1454 // Eliminate register copies RD = RS, by replacing the uses of RD with
1457 class CopyGeneration : public Transformation {
1459 CopyGeneration(BitTracker &bt, const HexagonInstrInfo &hii,
1460 MachineRegisterInfo &mri)
1461 : Transformation(true), HII(hii), MRI(mri), BT(bt) {}
1462 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1464 bool findMatch(const BitTracker::RegisterRef &Inp,
1465 BitTracker::RegisterRef &Out, const RegisterSet &AVs);
1467 const HexagonInstrInfo &HII;
1468 MachineRegisterInfo &MRI;
1472 class CopyPropagation : public Transformation {
1474 CopyPropagation(const HexagonRegisterInfo &hri, MachineRegisterInfo &mri)
1475 : Transformation(false), MRI(mri) {}
1476 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1477 static bool isCopyReg(unsigned Opc);
1479 bool propagateRegCopy(MachineInstr &MI);
1481 MachineRegisterInfo &MRI;
1487 /// Check if there is a register in AVs that is identical to Inp. If so,
1488 /// set Out to the found register. The output may be a pair Reg:Sub.
1489 bool CopyGeneration::findMatch(const BitTracker::RegisterRef &Inp,
1490 BitTracker::RegisterRef &Out, const RegisterSet &AVs) {
1491 if (!BT.has(Inp.Reg))
1493 const BitTracker::RegisterCell &InpRC = BT.lookup(Inp.Reg);
1495 if (!HBS::getSubregMask(Inp, B, W, MRI))
1498 for (unsigned R = AVs.find_first(); R; R = AVs.find_next(R)) {
1499 if (!BT.has(R) || !HBS::isTransparentCopy(R, Inp, MRI))
1501 const BitTracker::RegisterCell &RC = BT.lookup(R);
1502 unsigned RW = RC.width();
1504 if (MRI.getRegClass(Inp.Reg) != MRI.getRegClass(R))
1506 if (!HBS::isEqual(InpRC, B, RC, 0, W))
1512 // Check if there is a super-register, whose part (with a subregister)
1513 // is equal to the input.
1514 // Only do double registers for now.
1517 if (MRI.getRegClass(R) != &Hexagon::DoubleRegsRegClass)
1520 if (HBS::isEqual(InpRC, B, RC, 0, W))
1521 Out.Sub = Hexagon::subreg_loreg;
1522 else if (HBS::isEqual(InpRC, B, RC, W, W))
1523 Out.Sub = Hexagon::subreg_hireg;
1533 bool CopyGeneration::processBlock(MachineBasicBlock &B,
1534 const RegisterSet &AVs) {
1535 RegisterSet AVB(AVs);
1536 bool Changed = false;
1539 for (auto I = B.begin(), E = B.end(), NextI = I; I != E;
1540 ++I, AVB.insert(Defs)) {
1541 NextI = std::next(I);
1543 HBS::getInstrDefs(*I, Defs);
1545 unsigned Opc = I->getOpcode();
1546 if (CopyPropagation::isCopyReg(Opc))
1549 for (unsigned R = Defs.find_first(); R; R = Defs.find_next(R)) {
1550 BitTracker::RegisterRef MR;
1551 if (!findMatch(R, MR, AVB))
1553 DebugLoc DL = I->getDebugLoc();
1554 auto *FRC = HBS::getFinalVRegClass(MR, MRI);
1555 unsigned NewR = MRI.createVirtualRegister(FRC);
1556 auto At = I->isPHI() ? B.getFirstNonPHI() : I;
1557 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR)
1558 .addReg(MR.Reg, 0, MR.Sub);
1559 BT.put(BitTracker::RegisterRef(NewR), BT.get(MR));
1567 bool CopyPropagation::isCopyReg(unsigned Opc) {
1569 case TargetOpcode::COPY:
1570 case TargetOpcode::REG_SEQUENCE:
1571 case Hexagon::A2_tfr:
1572 case Hexagon::A2_tfrp:
1573 case Hexagon::A2_combinew:
1574 case Hexagon::A4_combineir:
1575 case Hexagon::A4_combineri:
1584 bool CopyPropagation::propagateRegCopy(MachineInstr &MI) {
1585 bool Changed = false;
1586 unsigned Opc = MI.getOpcode();
1587 BitTracker::RegisterRef RD = MI.getOperand(0);
1588 assert(MI.getOperand(0).getSubReg() == 0);
1591 case TargetOpcode::COPY:
1592 case Hexagon::A2_tfr:
1593 case Hexagon::A2_tfrp: {
1594 BitTracker::RegisterRef RS = MI.getOperand(1);
1595 if (!HBS::isTransparentCopy(RD, RS, MRI))
1598 Changed = HBS::replaceRegWithSub(RD.Reg, RS.Reg, RS.Sub, MRI);
1600 Changed = HBS::replaceReg(RD.Reg, RS.Reg, MRI);
1603 case TargetOpcode::REG_SEQUENCE: {
1604 BitTracker::RegisterRef SL, SH;
1605 if (HBS::parseRegSequence(MI, SL, SH)) {
1606 Changed = HBS::replaceSubWithSub(RD.Reg, Hexagon::subreg_loreg,
1607 SL.Reg, SL.Sub, MRI);
1608 Changed |= HBS::replaceSubWithSub(RD.Reg, Hexagon::subreg_hireg,
1609 SH.Reg, SH.Sub, MRI);
1613 case Hexagon::A2_combinew: {
1614 BitTracker::RegisterRef RH = MI.getOperand(1), RL = MI.getOperand(2);
1615 Changed = HBS::replaceSubWithSub(RD.Reg, Hexagon::subreg_loreg,
1616 RL.Reg, RL.Sub, MRI);
1617 Changed |= HBS::replaceSubWithSub(RD.Reg, Hexagon::subreg_hireg,
1618 RH.Reg, RH.Sub, MRI);
1621 case Hexagon::A4_combineir:
1622 case Hexagon::A4_combineri: {
1623 unsigned SrcX = (Opc == Hexagon::A4_combineir) ? 2 : 1;
1624 unsigned Sub = (Opc == Hexagon::A4_combineir) ? Hexagon::subreg_loreg
1625 : Hexagon::subreg_hireg;
1626 BitTracker::RegisterRef RS = MI.getOperand(SrcX);
1627 Changed = HBS::replaceSubWithSub(RD.Reg, Sub, RS.Reg, RS.Sub, MRI);
1635 bool CopyPropagation::processBlock(MachineBasicBlock &B, const RegisterSet&) {
1636 std::vector<MachineInstr*> Instrs;
1637 for (auto I = B.rbegin(), E = B.rend(); I != E; ++I)
1638 Instrs.push_back(&*I);
1640 bool Changed = false;
1641 for (auto I : Instrs) {
1642 unsigned Opc = I->getOpcode();
1643 if (!CopyPropagation::isCopyReg(Opc))
1645 Changed |= propagateRegCopy(*I);
1653 // Bit simplification
1655 // Recognize patterns that can be simplified and replace them with the
1657 // This is by no means complete
1659 class BitSimplification : public Transformation {
1661 BitSimplification(BitTracker &bt, const HexagonInstrInfo &hii,
1662 MachineRegisterInfo &mri)
1663 : Transformation(true), HII(hii), MRI(mri), BT(bt) {}
1664 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1666 struct RegHalf : public BitTracker::RegisterRef {
1667 bool Low; // Low/High halfword.
1670 bool matchHalf(unsigned SelfR, const BitTracker::RegisterCell &RC,
1671 unsigned B, RegHalf &RH);
1673 bool matchPackhl(unsigned SelfR, const BitTracker::RegisterCell &RC,
1674 BitTracker::RegisterRef &Rs, BitTracker::RegisterRef &Rt);
1675 unsigned getCombineOpcode(bool HLow, bool LLow);
1677 bool genStoreUpperHalf(MachineInstr *MI);
1678 bool genStoreImmediate(MachineInstr *MI);
1679 bool genPackhl(MachineInstr *MI, BitTracker::RegisterRef RD,
1680 const BitTracker::RegisterCell &RC);
1681 bool genExtractHalf(MachineInstr *MI, BitTracker::RegisterRef RD,
1682 const BitTracker::RegisterCell &RC);
1683 bool genCombineHalf(MachineInstr *MI, BitTracker::RegisterRef RD,
1684 const BitTracker::RegisterCell &RC);
1685 bool genExtractLow(MachineInstr *MI, BitTracker::RegisterRef RD,
1686 const BitTracker::RegisterCell &RC);
1687 bool simplifyTstbit(MachineInstr *MI, BitTracker::RegisterRef RD,
1688 const BitTracker::RegisterCell &RC);
1690 const HexagonInstrInfo &HII;
1691 MachineRegisterInfo &MRI;
1697 // Check if the bits [B..B+16) in register cell RC form a valid halfword,
1698 // i.e. [0..16), [16..32), etc. of some register. If so, return true and
1699 // set the information about the found register in RH.
1700 bool BitSimplification::matchHalf(unsigned SelfR,
1701 const BitTracker::RegisterCell &RC, unsigned B, RegHalf &RH) {
1702 // XXX This could be searching in the set of available registers, in case
1703 // the match is not exact.
1705 // Match 16-bit chunks, where the RC[B..B+15] references exactly one
1706 // register and all the bits B..B+15 match between RC and the register.
1707 // This is meant to match "v1[0-15]", where v1 = { [0]:0 [1-15]:v1... },
1708 // and RC = { [0]:0 [1-15]:v1[1-15]... }.
1711 while (I < B+16 && RC[I].num())
1716 unsigned Reg = RC[I].RefI.Reg;
1717 unsigned P = RC[I].RefI.Pos; // The RefI.Pos will be advanced by I-B.
1720 unsigned Pos = P - (I-B);
1722 if (Reg == 0 || Reg == SelfR) // Don't match "self".
1724 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1729 const BitTracker::RegisterCell &SC = BT.lookup(Reg);
1730 if (Pos+16 > SC.width())
1733 for (unsigned i = 0; i < 16; ++i) {
1734 const BitTracker::BitValue &RV = RC[i+B];
1735 if (RV.Type == BitTracker::BitValue::Ref) {
1736 if (RV.RefI.Reg != Reg)
1738 if (RV.RefI.Pos != i+Pos)
1742 if (RC[i+B] != SC[i+Pos])
1749 Sub = Hexagon::subreg_loreg;
1753 Sub = Hexagon::subreg_loreg;
1757 Sub = Hexagon::subreg_hireg;
1761 Sub = Hexagon::subreg_hireg;
1771 // If the subregister is not valid with the register, set it to 0.
1772 if (!HBS::getFinalVRegClass(RH, MRI))
1779 // Check if RC matches the pattern of a S2_packhl. If so, return true and
1780 // set the inputs Rs and Rt.
1781 bool BitSimplification::matchPackhl(unsigned SelfR,
1782 const BitTracker::RegisterCell &RC, BitTracker::RegisterRef &Rs,
1783 BitTracker::RegisterRef &Rt) {
1784 RegHalf L1, H1, L2, H2;
1786 if (!matchHalf(SelfR, RC, 0, L2) || !matchHalf(SelfR, RC, 16, L1))
1788 if (!matchHalf(SelfR, RC, 32, H2) || !matchHalf(SelfR, RC, 48, H1))
1791 // Rs = H1.L1, Rt = H2.L2
1792 if (H1.Reg != L1.Reg || H1.Sub != L1.Sub || H1.Low || !L1.Low)
1794 if (H2.Reg != L2.Reg || H2.Sub != L2.Sub || H2.Low || !L2.Low)
1803 unsigned BitSimplification::getCombineOpcode(bool HLow, bool LLow) {
1804 return HLow ? LLow ? Hexagon::A2_combine_ll
1805 : Hexagon::A2_combine_lh
1806 : LLow ? Hexagon::A2_combine_hl
1807 : Hexagon::A2_combine_hh;
1811 // If MI stores the upper halfword of a register (potentially obtained via
1812 // shifts or extracts), replace it with a storerf instruction. This could
1813 // cause the "extraction" code to become dead.
1814 bool BitSimplification::genStoreUpperHalf(MachineInstr *MI) {
1815 unsigned Opc = MI->getOpcode();
1816 if (Opc != Hexagon::S2_storerh_io)
1819 MachineOperand &ValOp = MI->getOperand(2);
1820 BitTracker::RegisterRef RS = ValOp;
1821 if (!BT.has(RS.Reg))
1823 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg);
1825 if (!matchHalf(0, RC, 0, H))
1829 MI->setDesc(HII.get(Hexagon::S2_storerf_io));
1830 ValOp.setReg(H.Reg);
1831 ValOp.setSubReg(H.Sub);
1836 // If MI stores a value known at compile-time, and the value is within a range
1837 // that avoids using constant-extenders, replace it with a store-immediate.
1838 bool BitSimplification::genStoreImmediate(MachineInstr *MI) {
1839 unsigned Opc = MI->getOpcode();
1842 case Hexagon::S2_storeri_io:
1844 case Hexagon::S2_storerh_io:
1846 case Hexagon::S2_storerb_io:
1852 // Avoid stores to frame-indices (due to an unknown offset).
1853 if (!MI->getOperand(0).isReg())
1855 MachineOperand &OffOp = MI->getOperand(1);
1859 int64_t Off = OffOp.getImm();
1860 // Offset is u6:a. Sadly, there is no isShiftedUInt(n,x).
1861 if (!isUIntN(6+Align, Off) || (Off & ((1<<Align)-1)))
1864 BitTracker::RegisterRef RS = MI->getOperand(2);
1865 if (!BT.has(RS.Reg))
1867 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg);
1869 if (!HBS::getConst(RC, 0, RC.width(), U))
1872 // Only consider 8-bit values to avoid constant-extenders.
1875 case Hexagon::S2_storerb_io:
1878 case Hexagon::S2_storerh_io:
1881 case Hexagon::S2_storeri_io:
1888 MI->RemoveOperand(2);
1890 case Hexagon::S2_storerb_io:
1891 MI->setDesc(HII.get(Hexagon::S4_storeirb_io));
1893 case Hexagon::S2_storerh_io:
1894 MI->setDesc(HII.get(Hexagon::S4_storeirh_io));
1896 case Hexagon::S2_storeri_io:
1897 MI->setDesc(HII.get(Hexagon::S4_storeiri_io));
1900 MI->addOperand(MachineOperand::CreateImm(V));
1905 // If MI is equivalent o S2_packhl, generate the S2_packhl. MI could be the
1906 // last instruction in a sequence that results in something equivalent to
1907 // the pack-halfwords. The intent is to cause the entire sequence to become
1909 bool BitSimplification::genPackhl(MachineInstr *MI,
1910 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
1911 unsigned Opc = MI->getOpcode();
1912 if (Opc == Hexagon::S2_packhl)
1914 BitTracker::RegisterRef Rs, Rt;
1915 if (!matchPackhl(RD.Reg, RC, Rs, Rt))
1918 MachineBasicBlock &B = *MI->getParent();
1919 unsigned NewR = MRI.createVirtualRegister(&Hexagon::DoubleRegsRegClass);
1920 DebugLoc DL = MI->getDebugLoc();
1921 auto At = MI->isPHI() ? B.getFirstNonPHI()
1922 : MachineBasicBlock::iterator(MI);
1923 BuildMI(B, At, DL, HII.get(Hexagon::S2_packhl), NewR)
1924 .addReg(Rs.Reg, 0, Rs.Sub)
1925 .addReg(Rt.Reg, 0, Rt.Sub);
1926 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
1927 BT.put(BitTracker::RegisterRef(NewR), RC);
1932 // If MI produces halfword of the input in the low half of the output,
1933 // replace it with zero-extend or extractu.
1934 bool BitSimplification::genExtractHalf(MachineInstr *MI,
1935 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
1937 // Check for halfword in low 16 bits, zeros elsewhere.
1938 if (!matchHalf(RD.Reg, RC, 0, L) || !HBS::isZero(RC, 16, 16))
1941 unsigned Opc = MI->getOpcode();
1942 MachineBasicBlock &B = *MI->getParent();
1943 DebugLoc DL = MI->getDebugLoc();
1945 // Prefer zxth, since zxth can go in any slot, while extractu only in
1948 auto At = MI->isPHI() ? B.getFirstNonPHI()
1949 : MachineBasicBlock::iterator(MI);
1950 if (L.Low && Opc != Hexagon::A2_zxth) {
1951 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1952 BuildMI(B, At, DL, HII.get(Hexagon::A2_zxth), NewR)
1953 .addReg(L.Reg, 0, L.Sub);
1954 } else if (!L.Low && Opc != Hexagon::S2_lsr_i_r) {
1955 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1956 BuildMI(B, MI, DL, HII.get(Hexagon::S2_lsr_i_r), NewR)
1957 .addReg(L.Reg, 0, L.Sub)
1962 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
1963 BT.put(BitTracker::RegisterRef(NewR), RC);
1968 // If MI is equivalent to a combine(.L/.H, .L/.H) replace with with the
1970 bool BitSimplification::genCombineHalf(MachineInstr *MI,
1971 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
1973 // Check for combine h/l
1974 if (!matchHalf(RD.Reg, RC, 0, L) || !matchHalf(RD.Reg, RC, 16, H))
1976 // Do nothing if this is just a reg copy.
1977 if (L.Reg == H.Reg && L.Sub == H.Sub && !H.Low && L.Low)
1980 unsigned Opc = MI->getOpcode();
1981 unsigned COpc = getCombineOpcode(H.Low, L.Low);
1985 MachineBasicBlock &B = *MI->getParent();
1986 DebugLoc DL = MI->getDebugLoc();
1987 unsigned NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1988 auto At = MI->isPHI() ? B.getFirstNonPHI()
1989 : MachineBasicBlock::iterator(MI);
1990 BuildMI(B, At, DL, HII.get(COpc), NewR)
1991 .addReg(H.Reg, 0, H.Sub)
1992 .addReg(L.Reg, 0, L.Sub);
1993 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
1994 BT.put(BitTracker::RegisterRef(NewR), RC);
1999 // If MI resets high bits of a register and keeps the lower ones, replace it
2000 // with zero-extend byte/half, and-immediate, or extractu, as appropriate.
2001 bool BitSimplification::genExtractLow(MachineInstr *MI,
2002 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
2003 unsigned Opc = MI->getOpcode();
2005 case Hexagon::A2_zxtb:
2006 case Hexagon::A2_zxth:
2007 case Hexagon::S2_extractu:
2010 if (Opc == Hexagon::A2_andir && MI->getOperand(2).isImm()) {
2011 int32_t Imm = MI->getOperand(2).getImm();
2016 if (MI->hasUnmodeledSideEffects() || MI->isInlineAsm())
2018 unsigned W = RC.width();
2019 while (W > 0 && RC[W-1].is(0))
2021 if (W == 0 || W == RC.width())
2023 unsigned NewOpc = (W == 8) ? Hexagon::A2_zxtb
2024 : (W == 16) ? Hexagon::A2_zxth
2025 : (W < 10) ? Hexagon::A2_andir
2026 : Hexagon::S2_extractu;
2027 MachineBasicBlock &B = *MI->getParent();
2028 DebugLoc DL = MI->getDebugLoc();
2030 for (auto &Op : MI->uses()) {
2033 BitTracker::RegisterRef RS = Op;
2034 if (!BT.has(RS.Reg))
2036 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg);
2038 if (!HBS::getSubregMask(RS, BN, BW, MRI))
2040 if (BW < W || !HBS::isEqual(RC, 0, SC, BN, W))
2043 unsigned NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
2044 auto At = MI->isPHI() ? B.getFirstNonPHI()
2045 : MachineBasicBlock::iterator(MI);
2046 auto MIB = BuildMI(B, At, DL, HII.get(NewOpc), NewR)
2047 .addReg(RS.Reg, 0, RS.Sub);
2048 if (NewOpc == Hexagon::A2_andir)
2049 MIB.addImm((1 << W) - 1);
2050 else if (NewOpc == Hexagon::S2_extractu)
2051 MIB.addImm(W).addImm(0);
2052 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
2053 BT.put(BitTracker::RegisterRef(NewR), RC);
2060 // Check for tstbit simplification opportunity, where the bit being checked
2061 // can be tracked back to another register. For example:
2062 // vreg2 = S2_lsr_i_r vreg1, 5
2063 // vreg3 = S2_tstbit_i vreg2, 0
2065 // vreg3 = S2_tstbit_i vreg1, 5
2066 bool BitSimplification::simplifyTstbit(MachineInstr *MI,
2067 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
2068 unsigned Opc = MI->getOpcode();
2069 if (Opc != Hexagon::S2_tstbit_i)
2072 unsigned BN = MI->getOperand(2).getImm();
2073 BitTracker::RegisterRef RS = MI->getOperand(1);
2075 DebugLoc DL = MI->getDebugLoc();
2076 if (!BT.has(RS.Reg) || !HBS::getSubregMask(RS, F, W, MRI))
2078 MachineBasicBlock &B = *MI->getParent();
2079 auto At = MI->isPHI() ? B.getFirstNonPHI()
2080 : MachineBasicBlock::iterator(MI);
2082 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg);
2083 const BitTracker::BitValue &V = SC[F+BN];
2084 if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg != RS.Reg) {
2085 const TargetRegisterClass *TC = MRI.getRegClass(V.RefI.Reg);
2086 // Need to map V.RefI.Reg to a 32-bit register, i.e. if it is
2087 // a double register, need to use a subregister and adjust bit
2089 unsigned P = UINT_MAX;
2090 BitTracker::RegisterRef RR(V.RefI.Reg, 0);
2091 if (TC == &Hexagon::DoubleRegsRegClass) {
2093 RR.Sub = Hexagon::subreg_loreg;
2096 RR.Sub = Hexagon::subreg_hireg;
2098 } else if (TC == &Hexagon::IntRegsRegClass) {
2101 if (P != UINT_MAX) {
2102 unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
2103 BuildMI(B, At, DL, HII.get(Hexagon::S2_tstbit_i), NewR)
2104 .addReg(RR.Reg, 0, RR.Sub)
2106 HBS::replaceReg(RD.Reg, NewR, MRI);
2110 } else if (V.is(0) || V.is(1)) {
2111 unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
2112 unsigned NewOpc = V.is(0) ? Hexagon::TFR_PdFalse : Hexagon::TFR_PdTrue;
2113 BuildMI(B, At, DL, HII.get(NewOpc), NewR);
2114 HBS::replaceReg(RD.Reg, NewR, MRI);
2122 bool BitSimplification::processBlock(MachineBasicBlock &B,
2123 const RegisterSet &AVs) {
2124 bool Changed = false;
2125 RegisterSet AVB = AVs;
2128 for (auto I = B.begin(), E = B.end(); I != E; ++I, AVB.insert(Defs)) {
2129 MachineInstr *MI = &*I;
2131 HBS::getInstrDefs(*MI, Defs);
2133 unsigned Opc = MI->getOpcode();
2134 if (Opc == TargetOpcode::COPY || Opc == TargetOpcode::REG_SEQUENCE)
2137 if (MI->mayStore()) {
2138 bool T = genStoreUpperHalf(MI);
2139 T = T || genStoreImmediate(MI);
2144 if (Defs.count() != 1)
2146 const MachineOperand &Op0 = MI->getOperand(0);
2147 if (!Op0.isReg() || !Op0.isDef())
2149 BitTracker::RegisterRef RD = Op0;
2150 if (!BT.has(RD.Reg))
2152 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI);
2153 const BitTracker::RegisterCell &RC = BT.lookup(RD.Reg);
2155 if (FRC->getID() == Hexagon::DoubleRegsRegClassID) {
2156 bool T = genPackhl(MI, RD, RC);
2161 if (FRC->getID() == Hexagon::IntRegsRegClassID) {
2162 bool T = genExtractHalf(MI, RD, RC);
2163 T = T || genCombineHalf(MI, RD, RC);
2164 T = T || genExtractLow(MI, RD, RC);
2169 if (FRC->getID() == Hexagon::PredRegsRegClassID) {
2170 bool T = simplifyTstbit(MI, RD, RC);
2179 bool HexagonBitSimplify::runOnMachineFunction(MachineFunction &MF) {
2180 if (skipFunction(*MF.getFunction()))
2183 auto &HST = MF.getSubtarget<HexagonSubtarget>();
2184 auto &HRI = *HST.getRegisterInfo();
2185 auto &HII = *HST.getInstrInfo();
2187 MDT = &getAnalysis<MachineDominatorTree>();
2188 MachineRegisterInfo &MRI = MF.getRegInfo();
2191 Changed = DeadCodeElimination(MF, *MDT).run();
2193 const HexagonEvaluator HE(HRI, MRI, HII, MF);
2194 BitTracker BT(HE, MF);
2195 DEBUG(BT.trace(true));
2198 MachineBasicBlock &Entry = MF.front();
2200 RegisterSet AIG; // Available registers for IG.
2201 ConstGeneration ImmG(BT, HII, MRI);
2202 Changed |= visitBlock(Entry, ImmG, AIG);
2204 RegisterSet ARE; // Available registers for RIE.
2205 RedundantInstrElimination RIE(BT, HII, MRI);
2206 Changed |= visitBlock(Entry, RIE, ARE);
2208 RegisterSet ACG; // Available registers for CG.
2209 CopyGeneration CopyG(BT, HII, MRI);
2210 Changed |= visitBlock(Entry, CopyG, ACG);
2212 RegisterSet ACP; // Available registers for CP.
2213 CopyPropagation CopyP(HRI, MRI);
2214 Changed |= visitBlock(Entry, CopyP, ACP);
2216 Changed = DeadCodeElimination(MF, *MDT).run() || Changed;
2219 RegisterSet ABS; // Available registers for BS.
2220 BitSimplification BitS(BT, HII, MRI);
2221 Changed |= visitBlock(Entry, BitS, ABS);
2223 Changed = DeadCodeElimination(MF, *MDT).run() || Changed;
2229 DeadCodeElimination(MF, *MDT).run();
2235 // Recognize loops where the code at the end of the loop matches the code
2236 // before the entry of the loop, and the matching code is such that is can
2237 // be simplified. This pass relies on the bit simplification above and only
2238 // prepares code in a way that can be handled by the bit simplifcation.
2240 // This is the motivating testcase (and explanation):
2243 // loop0(.LBB0_2, r1) // %for.body.preheader
2244 // r5:4 = memd(r0++#8)
2247 // r3 = lsr(r4, #16)
2248 // r7:6 = combine(r5, r5)
2251 // r3 = insert(r5, #16, #16)
2252 // r7:6 = vlsrw(r7:6, #16)
2257 // memh(r2+#6) = r6 # R6 is really R5.H
2262 // memh(r2+#2) = r3 # R3 is really R4.H
2265 // r5:4 = memd(r0++#8)
2267 // { # "Shuffling" code that sets up R3 and R6
2268 // r3 = lsr(r4, #16) # so that their halves can be stored in the
2269 // r7:6 = combine(r5, r5) # next iteration. This could be folded into
2270 // } # the stores if the code was at the beginning
2271 // { # of the loop iteration. Since the same code
2272 // r3 = insert(r5, #16, #16) # precedes the loop, it can actually be moved
2273 // r7:6 = vlsrw(r7:6, #16) # there.
2280 // loop0(.LBB0_2, r1)
2281 // r5:4 = memd(r0++#8)
2286 // memh(r2+#6) = r5.h
2291 // memh(r2+#2) = r4.h
2294 // r5:4 = memd(r0++#8)
2298 FunctionPass *createHexagonLoopRescheduling();
2299 void initializeHexagonLoopReschedulingPass(PassRegistry&);
2303 class HexagonLoopRescheduling : public MachineFunctionPass {
2306 HexagonLoopRescheduling() : MachineFunctionPass(ID),
2307 HII(0), HRI(0), MRI(0), BTP(0) {
2308 initializeHexagonLoopReschedulingPass(*PassRegistry::getPassRegistry());
2311 bool runOnMachineFunction(MachineFunction &MF) override;
2314 const HexagonInstrInfo *HII;
2315 const HexagonRegisterInfo *HRI;
2316 MachineRegisterInfo *MRI;
2320 LoopCand(MachineBasicBlock *lb, MachineBasicBlock *pb,
2321 MachineBasicBlock *eb) : LB(lb), PB(pb), EB(eb) {}
2322 MachineBasicBlock *LB, *PB, *EB;
2324 typedef std::vector<MachineInstr*> InstrList;
2326 BitTracker::RegisterRef Inp, Out;
2330 PhiInfo(MachineInstr &P, MachineBasicBlock &B);
2332 BitTracker::RegisterRef LR, PR;
2333 MachineBasicBlock *LB, *PB;
2336 static unsigned getDefReg(const MachineInstr *MI);
2337 bool isConst(unsigned Reg) const;
2338 bool isBitShuffle(const MachineInstr *MI, unsigned DefR) const;
2339 bool isStoreInput(const MachineInstr *MI, unsigned DefR) const;
2340 bool isShuffleOf(unsigned OutR, unsigned InpR) const;
2341 bool isSameShuffle(unsigned OutR1, unsigned InpR1, unsigned OutR2,
2342 unsigned &InpR2) const;
2343 void moveGroup(InstrGroup &G, MachineBasicBlock &LB, MachineBasicBlock &PB,
2344 MachineBasicBlock::iterator At, unsigned OldPhiR, unsigned NewPredR);
2345 bool processLoop(LoopCand &C);
2349 char HexagonLoopRescheduling::ID = 0;
2351 INITIALIZE_PASS(HexagonLoopRescheduling, "hexagon-loop-resched",
2352 "Hexagon Loop Rescheduling", false, false)
2355 HexagonLoopRescheduling::PhiInfo::PhiInfo(MachineInstr &P,
2356 MachineBasicBlock &B) {
2357 DefR = HexagonLoopRescheduling::getDefReg(&P);
2360 for (unsigned i = 1, n = P.getNumOperands(); i < n; i += 2) {
2361 const MachineOperand &OpB = P.getOperand(i+1);
2362 if (OpB.getMBB() == &B) {
2363 LR = P.getOperand(i);
2367 PR = P.getOperand(i);
2372 unsigned HexagonLoopRescheduling::getDefReg(const MachineInstr *MI) {
2374 HBS::getInstrDefs(*MI, Defs);
2375 if (Defs.count() != 1)
2377 return Defs.find_first();
2381 bool HexagonLoopRescheduling::isConst(unsigned Reg) const {
2384 const BitTracker::RegisterCell &RC = BTP->lookup(Reg);
2385 for (unsigned i = 0, w = RC.width(); i < w; ++i) {
2386 const BitTracker::BitValue &V = RC[i];
2387 if (!V.is(0) && !V.is(1))
2394 bool HexagonLoopRescheduling::isBitShuffle(const MachineInstr *MI,
2395 unsigned DefR) const {
2396 unsigned Opc = MI->getOpcode();
2398 case TargetOpcode::COPY:
2399 case Hexagon::S2_lsr_i_r:
2400 case Hexagon::S2_asr_i_r:
2401 case Hexagon::S2_asl_i_r:
2402 case Hexagon::S2_lsr_i_p:
2403 case Hexagon::S2_asr_i_p:
2404 case Hexagon::S2_asl_i_p:
2405 case Hexagon::S2_insert:
2406 case Hexagon::A2_or:
2407 case Hexagon::A2_orp:
2408 case Hexagon::A2_and:
2409 case Hexagon::A2_andp:
2410 case Hexagon::A2_combinew:
2411 case Hexagon::A4_combineri:
2412 case Hexagon::A4_combineir:
2413 case Hexagon::A2_combineii:
2414 case Hexagon::A4_combineii:
2415 case Hexagon::A2_combine_ll:
2416 case Hexagon::A2_combine_lh:
2417 case Hexagon::A2_combine_hl:
2418 case Hexagon::A2_combine_hh:
2425 bool HexagonLoopRescheduling::isStoreInput(const MachineInstr *MI,
2426 unsigned InpR) const {
2427 for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) {
2428 const MachineOperand &Op = MI->getOperand(i);
2431 if (Op.getReg() == InpR)
2438 bool HexagonLoopRescheduling::isShuffleOf(unsigned OutR, unsigned InpR) const {
2439 if (!BTP->has(OutR) || !BTP->has(InpR))
2441 const BitTracker::RegisterCell &OutC = BTP->lookup(OutR);
2442 for (unsigned i = 0, w = OutC.width(); i < w; ++i) {
2443 const BitTracker::BitValue &V = OutC[i];
2444 if (V.Type != BitTracker::BitValue::Ref)
2446 if (V.RefI.Reg != InpR)
2453 bool HexagonLoopRescheduling::isSameShuffle(unsigned OutR1, unsigned InpR1,
2454 unsigned OutR2, unsigned &InpR2) const {
2455 if (!BTP->has(OutR1) || !BTP->has(InpR1) || !BTP->has(OutR2))
2457 const BitTracker::RegisterCell &OutC1 = BTP->lookup(OutR1);
2458 const BitTracker::RegisterCell &OutC2 = BTP->lookup(OutR2);
2459 unsigned W = OutC1.width();
2460 unsigned MatchR = 0;
2461 if (W != OutC2.width())
2463 for (unsigned i = 0; i < W; ++i) {
2464 const BitTracker::BitValue &V1 = OutC1[i], &V2 = OutC2[i];
2465 if (V1.Type != V2.Type || V1.Type == BitTracker::BitValue::One)
2467 if (V1.Type != BitTracker::BitValue::Ref)
2469 if (V1.RefI.Pos != V2.RefI.Pos)
2471 if (V1.RefI.Reg != InpR1)
2473 if (V2.RefI.Reg == 0 || V2.RefI.Reg == OutR2)
2476 MatchR = V2.RefI.Reg;
2477 else if (V2.RefI.Reg != MatchR)
2485 void HexagonLoopRescheduling::moveGroup(InstrGroup &G, MachineBasicBlock &LB,
2486 MachineBasicBlock &PB, MachineBasicBlock::iterator At, unsigned OldPhiR,
2487 unsigned NewPredR) {
2488 DenseMap<unsigned,unsigned> RegMap;
2490 const TargetRegisterClass *PhiRC = MRI->getRegClass(NewPredR);
2491 unsigned PhiR = MRI->createVirtualRegister(PhiRC);
2492 BuildMI(LB, At, At->getDebugLoc(), HII->get(TargetOpcode::PHI), PhiR)
2497 RegMap.insert(std::make_pair(G.Inp.Reg, PhiR));
2499 for (unsigned i = G.Ins.size(); i > 0; --i) {
2500 const MachineInstr *SI = G.Ins[i-1];
2501 unsigned DR = getDefReg(SI);
2502 const TargetRegisterClass *RC = MRI->getRegClass(DR);
2503 unsigned NewDR = MRI->createVirtualRegister(RC);
2504 DebugLoc DL = SI->getDebugLoc();
2506 auto MIB = BuildMI(LB, At, DL, HII->get(SI->getOpcode()), NewDR);
2507 for (unsigned j = 0, m = SI->getNumOperands(); j < m; ++j) {
2508 const MachineOperand &Op = SI->getOperand(j);
2515 unsigned UseR = RegMap[Op.getReg()];
2516 MIB.addReg(UseR, 0, Op.getSubReg());
2518 RegMap.insert(std::make_pair(DR, NewDR));
2521 HBS::replaceReg(OldPhiR, RegMap[G.Out.Reg], *MRI);
2525 bool HexagonLoopRescheduling::processLoop(LoopCand &C) {
2526 DEBUG(dbgs() << "Processing loop in BB#" << C.LB->getNumber() << "\n");
2527 std::vector<PhiInfo> Phis;
2528 for (auto &I : *C.LB) {
2531 unsigned PR = getDefReg(&I);
2534 bool BadUse = false, GoodUse = false;
2535 for (auto UI = MRI->use_begin(PR), UE = MRI->use_end(); UI != UE; ++UI) {
2536 MachineInstr *UseI = UI->getParent();
2537 if (UseI->getParent() != C.LB) {
2541 if (isBitShuffle(UseI, PR) || isStoreInput(UseI, PR))
2544 if (BadUse || !GoodUse)
2547 Phis.push_back(PhiInfo(I, *C.LB));
2551 dbgs() << "Phis: {";
2552 for (auto &I : Phis) {
2553 dbgs() << ' ' << PrintReg(I.DefR, HRI) << "=phi("
2554 << PrintReg(I.PR.Reg, HRI, I.PR.Sub) << ":b" << I.PB->getNumber()
2555 << ',' << PrintReg(I.LR.Reg, HRI, I.LR.Sub) << ":b"
2556 << I.LB->getNumber() << ')';
2564 bool Changed = false;
2567 // Go backwards in the block: for each bit shuffling instruction, check
2568 // if that instruction could potentially be moved to the front of the loop:
2569 // the output of the loop cannot be used in a non-shuffling instruction
2571 for (auto I = C.LB->rbegin(), E = C.LB->rend(); I != E; ++I) {
2572 if (I->isTerminator())
2578 HBS::getInstrDefs(*I, Defs);
2579 if (Defs.count() != 1)
2581 unsigned DefR = Defs.find_first();
2582 if (!TargetRegisterInfo::isVirtualRegister(DefR))
2584 if (!isBitShuffle(&*I, DefR))
2587 bool BadUse = false;
2588 for (auto UI = MRI->use_begin(DefR), UE = MRI->use_end(); UI != UE; ++UI) {
2589 MachineInstr *UseI = UI->getParent();
2590 if (UseI->getParent() == C.LB) {
2591 if (UseI->isPHI()) {
2592 // If the use is in a phi node in this loop, then it should be
2593 // the value corresponding to the back edge.
2594 unsigned Idx = UI.getOperandNo();
2595 if (UseI->getOperand(Idx+1).getMBB() != C.LB)
2598 auto F = std::find(ShufIns.begin(), ShufIns.end(), UseI);
2599 if (F == ShufIns.end())
2603 // There is a use outside of the loop, but there is no epilog block
2604 // suitable for a copy-out.
2605 if (C.EB == nullptr)
2614 ShufIns.push_back(&*I);
2617 // Partition the list of shuffling instructions into instruction groups,
2618 // where each group has to be moved as a whole (i.e. a group is a chain of
2619 // dependent instructions). A group produces a single live output register,
2620 // which is meant to be the input of the loop phi node (although this is
2621 // not checked here yet). It also uses a single register as its input,
2622 // which is some value produced in the loop body. After moving the group
2623 // to the beginning of the loop, that input register would need to be
2624 // the loop-carried register (through a phi node) instead of the (currently
2625 // loop-carried) output register.
2626 typedef std::vector<InstrGroup> InstrGroupList;
2627 InstrGroupList Groups;
2629 for (unsigned i = 0, n = ShufIns.size(); i < n; ++i) {
2630 MachineInstr *SI = ShufIns[i];
2635 G.Ins.push_back(SI);
2636 G.Out.Reg = getDefReg(SI);
2638 HBS::getInstrUses(*SI, Inputs);
2640 for (unsigned j = i+1; j < n; ++j) {
2641 MachineInstr *MI = ShufIns[j];
2645 HBS::getInstrDefs(*MI, Defs);
2646 // If this instruction does not define any pending inputs, skip it.
2647 if (!Defs.intersects(Inputs))
2649 // Otherwise, add it to the current group and remove the inputs that
2650 // are defined by MI.
2651 G.Ins.push_back(MI);
2652 Inputs.remove(Defs);
2653 // Then add all registers used by MI.
2654 HBS::getInstrUses(*MI, Inputs);
2655 ShufIns[j] = nullptr;
2658 // Only add a group if it requires at most one register.
2659 if (Inputs.count() > 1)
2661 auto LoopInpEq = [G] (const PhiInfo &P) -> bool {
2662 return G.Out.Reg == P.LR.Reg;
2664 if (std::find_if(Phis.begin(), Phis.end(), LoopInpEq) == Phis.end())
2667 G.Inp.Reg = Inputs.find_first();
2668 Groups.push_back(G);
2672 for (unsigned i = 0, n = Groups.size(); i < n; ++i) {
2673 InstrGroup &G = Groups[i];
2674 dbgs() << "Group[" << i << "] inp: "
2675 << PrintReg(G.Inp.Reg, HRI, G.Inp.Sub)
2676 << " out: " << PrintReg(G.Out.Reg, HRI, G.Out.Sub) << "\n";
2677 for (unsigned j = 0, m = G.Ins.size(); j < m; ++j)
2678 dbgs() << " " << *G.Ins[j];
2682 for (unsigned i = 0, n = Groups.size(); i < n; ++i) {
2683 InstrGroup &G = Groups[i];
2684 if (!isShuffleOf(G.Out.Reg, G.Inp.Reg))
2686 auto LoopInpEq = [G] (const PhiInfo &P) -> bool {
2687 return G.Out.Reg == P.LR.Reg;
2689 auto F = std::find_if(Phis.begin(), Phis.end(), LoopInpEq);
2690 if (F == Phis.end())
2693 if (!isSameShuffle(G.Out.Reg, G.Inp.Reg, F->PR.Reg, PredR)) {
2694 const MachineInstr *DefPredR = MRI->getVRegDef(F->PR.Reg);
2695 unsigned Opc = DefPredR->getOpcode();
2696 if (Opc != Hexagon::A2_tfrsi && Opc != Hexagon::A2_tfrpi)
2698 if (!DefPredR->getOperand(1).isImm())
2700 if (DefPredR->getOperand(1).getImm() != 0)
2702 const TargetRegisterClass *RC = MRI->getRegClass(G.Inp.Reg);
2703 if (RC != MRI->getRegClass(F->PR.Reg)) {
2704 PredR = MRI->createVirtualRegister(RC);
2705 unsigned TfrI = (RC == &Hexagon::IntRegsRegClass) ? Hexagon::A2_tfrsi
2706 : Hexagon::A2_tfrpi;
2707 auto T = C.PB->getFirstTerminator();
2708 DebugLoc DL = (T != C.PB->end()) ? T->getDebugLoc() : DebugLoc();
2709 BuildMI(*C.PB, T, DL, HII->get(TfrI), PredR)
2715 assert(MRI->getRegClass(PredR) == MRI->getRegClass(G.Inp.Reg));
2716 moveGroup(G, *F->LB, *F->PB, F->LB->getFirstNonPHI(), F->DefR, PredR);
2724 bool HexagonLoopRescheduling::runOnMachineFunction(MachineFunction &MF) {
2725 if (skipFunction(*MF.getFunction()))
2728 auto &HST = MF.getSubtarget<HexagonSubtarget>();
2729 HII = HST.getInstrInfo();
2730 HRI = HST.getRegisterInfo();
2731 MRI = &MF.getRegInfo();
2732 const HexagonEvaluator HE(*HRI, *MRI, *HII, MF);
2733 BitTracker BT(HE, MF);
2734 DEBUG(BT.trace(true));
2738 std::vector<LoopCand> Cand;
2740 for (auto &B : MF) {
2741 if (B.pred_size() != 2 || B.succ_size() != 2)
2743 MachineBasicBlock *PB = nullptr;
2744 bool IsLoop = false;
2745 for (auto PI = B.pred_begin(), PE = B.pred_end(); PI != PE; ++PI) {
2754 MachineBasicBlock *EB = nullptr;
2755 for (auto SI = B.succ_begin(), SE = B.succ_end(); SI != SE; ++SI) {
2758 // Set EP to the epilog block, if it has only 1 predecessor (i.e. the
2759 // edge from B to EP is non-critical.
2760 if ((*SI)->pred_size() == 1)
2765 Cand.push_back(LoopCand(&B, PB, EB));
2768 bool Changed = false;
2769 for (auto &C : Cand)
2770 Changed |= processLoop(C);
2775 //===----------------------------------------------------------------------===//
2776 // Public Constructor Functions
2777 //===----------------------------------------------------------------------===//
2779 FunctionPass *llvm::createHexagonLoopRescheduling() {
2780 return new HexagonLoopRescheduling();
2783 FunctionPass *llvm::createHexagonBitSimplify() {
2784 return new HexagonBitSimplify();