1 //===- HexagonBitSimplify.cpp ---------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "BitTracker.h"
11 #include "HexagonBitTracker.h"
12 #include "HexagonInstrInfo.h"
13 #include "HexagonRegisterInfo.h"
14 #include "HexagonSubtarget.h"
15 #include "llvm/ADT/BitVector.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/GraphTraits.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringRef.h"
21 #include "llvm/CodeGen/MachineBasicBlock.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineFunctionPass.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineOperand.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetRegisterInfo.h"
30 #include "llvm/IR/DebugLoc.h"
31 #include "llvm/MC/MCInstrDesc.h"
32 #include "llvm/Pass.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Compiler.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Support/raw_ostream.h"
47 #define DEBUG_TYPE "hexbit"
51 static cl::opt<bool> PreserveTiedOps("hexbit-keep-tied", cl::Hidden,
52 cl::init(true), cl::desc("Preserve subregisters in tied operands"));
53 static cl::opt<bool> GenExtract("hexbit-extract", cl::Hidden,
54 cl::init(true), cl::desc("Generate extract instructions"));
55 static cl::opt<bool> GenBitSplit("hexbit-bitsplit", cl::Hidden,
56 cl::init(true), cl::desc("Generate bitsplit instructions"));
58 static cl::opt<unsigned> MaxExtract("hexbit-max-extract", cl::Hidden,
59 cl::init(std::numeric_limits<unsigned>::max()));
60 static unsigned CountExtract = 0;
61 static cl::opt<unsigned> MaxBitSplit("hexbit-max-bitsplit", cl::Hidden,
62 cl::init(std::numeric_limits<unsigned>::max()));
63 static unsigned CountBitSplit = 0;
67 void initializeHexagonBitSimplifyPass(PassRegistry& Registry);
68 FunctionPass *createHexagonBitSimplify();
70 } // end namespace llvm
74 // Set of virtual registers, based on BitVector.
75 struct RegisterSet : private BitVector {
76 RegisterSet() = default;
77 explicit RegisterSet(unsigned s, bool t = false) : BitVector(s, t) {}
78 RegisterSet(const RegisterSet &RS) = default;
80 using BitVector::clear;
81 using BitVector::count;
83 unsigned find_first() const {
84 int First = BitVector::find_first();
90 unsigned find_next(unsigned Prev) const {
91 int Next = BitVector::find_next(v2x(Prev));
97 RegisterSet &insert(unsigned R) {
98 unsigned Idx = v2x(R);
100 return static_cast<RegisterSet&>(BitVector::set(Idx));
102 RegisterSet &remove(unsigned R) {
103 unsigned Idx = v2x(R);
106 return static_cast<RegisterSet&>(BitVector::reset(Idx));
109 RegisterSet &insert(const RegisterSet &Rs) {
110 return static_cast<RegisterSet&>(BitVector::operator|=(Rs));
112 RegisterSet &remove(const RegisterSet &Rs) {
113 return static_cast<RegisterSet&>(BitVector::reset(Rs));
116 reference operator[](unsigned R) {
117 unsigned Idx = v2x(R);
119 return BitVector::operator[](Idx);
121 bool operator[](unsigned R) const {
122 unsigned Idx = v2x(R);
123 assert(Idx < size());
124 return BitVector::operator[](Idx);
126 bool has(unsigned R) const {
127 unsigned Idx = v2x(R);
130 return BitVector::test(Idx);
134 return !BitVector::any();
136 bool includes(const RegisterSet &Rs) const {
137 // A.BitVector::test(B) <=> A-B != {}
138 return !Rs.BitVector::test(*this);
140 bool intersects(const RegisterSet &Rs) const {
141 return BitVector::anyCommon(Rs);
145 void ensure(unsigned Idx) {
147 resize(std::max(Idx+1, 32U));
150 static inline unsigned v2x(unsigned v) {
151 return TargetRegisterInfo::virtReg2Index(v);
154 static inline unsigned x2v(unsigned x) {
155 return TargetRegisterInfo::index2VirtReg(x);
160 PrintRegSet(const RegisterSet &S, const TargetRegisterInfo *RI)
163 friend raw_ostream &operator<< (raw_ostream &OS,
164 const PrintRegSet &P);
167 const RegisterSet &RS;
168 const TargetRegisterInfo *TRI;
171 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P)
172 LLVM_ATTRIBUTE_UNUSED;
173 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) {
175 for (unsigned R = P.RS.find_first(); R; R = P.RS.find_next(R))
176 OS << ' ' << printReg(R, P.TRI);
181 class Transformation;
183 class HexagonBitSimplify : public MachineFunctionPass {
187 HexagonBitSimplify() : MachineFunctionPass(ID) {}
189 StringRef getPassName() const override {
190 return "Hexagon bit simplification";
193 void getAnalysisUsage(AnalysisUsage &AU) const override {
194 AU.addRequired<MachineDominatorTree>();
195 AU.addPreserved<MachineDominatorTree>();
196 MachineFunctionPass::getAnalysisUsage(AU);
199 bool runOnMachineFunction(MachineFunction &MF) override;
201 static void getInstrDefs(const MachineInstr &MI, RegisterSet &Defs);
202 static void getInstrUses(const MachineInstr &MI, RegisterSet &Uses);
203 static bool isEqual(const BitTracker::RegisterCell &RC1, uint16_t B1,
204 const BitTracker::RegisterCell &RC2, uint16_t B2, uint16_t W);
205 static bool isZero(const BitTracker::RegisterCell &RC, uint16_t B,
207 static bool getConst(const BitTracker::RegisterCell &RC, uint16_t B,
208 uint16_t W, uint64_t &U);
209 static bool replaceReg(unsigned OldR, unsigned NewR,
210 MachineRegisterInfo &MRI);
211 static bool getSubregMask(const BitTracker::RegisterRef &RR,
212 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI);
213 static bool replaceRegWithSub(unsigned OldR, unsigned NewR,
214 unsigned NewSR, MachineRegisterInfo &MRI);
215 static bool replaceSubWithSub(unsigned OldR, unsigned OldSR,
216 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI);
217 static bool parseRegSequence(const MachineInstr &I,
218 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH,
219 const MachineRegisterInfo &MRI);
221 static bool getUsedBitsInStore(unsigned Opc, BitVector &Bits,
223 static bool getUsedBits(unsigned Opc, unsigned OpN, BitVector &Bits,
224 uint16_t Begin, const HexagonInstrInfo &HII);
226 static const TargetRegisterClass *getFinalVRegClass(
227 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI);
228 static bool isTransparentCopy(const BitTracker::RegisterRef &RD,
229 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI);
232 MachineDominatorTree *MDT = nullptr;
234 bool visitBlock(MachineBasicBlock &B, Transformation &T, RegisterSet &AVs);
235 static bool hasTiedUse(unsigned Reg, MachineRegisterInfo &MRI,
236 unsigned NewSub = Hexagon::NoSubRegister);
239 using HBS = HexagonBitSimplify;
241 // The purpose of this class is to provide a common facility to traverse
242 // the function top-down or bottom-up via the dominator tree, and keep
243 // track of the available registers.
244 class Transformation {
248 Transformation(bool TD) : TopDown(TD) {}
249 virtual ~Transformation() = default;
251 virtual bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) = 0;
254 } // end anonymous namespace
256 char HexagonBitSimplify::ID = 0;
258 INITIALIZE_PASS_BEGIN(HexagonBitSimplify, "hexagon-bit-simplify",
259 "Hexagon bit simplification", false, false)
260 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
261 INITIALIZE_PASS_END(HexagonBitSimplify, "hexagon-bit-simplify",
262 "Hexagon bit simplification", false, false)
264 bool HexagonBitSimplify::visitBlock(MachineBasicBlock &B, Transformation &T,
266 bool Changed = false;
269 Changed = T.processBlock(B, AVs);
273 getInstrDefs(I, Defs);
274 RegisterSet NewAVs = AVs;
277 for (auto *DTN : children<MachineDomTreeNode*>(MDT->getNode(&B)))
278 Changed |= visitBlock(*(DTN->getBlock()), T, NewAVs);
281 Changed |= T.processBlock(B, AVs);
287 // Utility functions:
289 void HexagonBitSimplify::getInstrDefs(const MachineInstr &MI,
291 for (auto &Op : MI.operands()) {
292 if (!Op.isReg() || !Op.isDef())
294 unsigned R = Op.getReg();
295 if (!TargetRegisterInfo::isVirtualRegister(R))
301 void HexagonBitSimplify::getInstrUses(const MachineInstr &MI,
303 for (auto &Op : MI.operands()) {
304 if (!Op.isReg() || !Op.isUse())
306 unsigned R = Op.getReg();
307 if (!TargetRegisterInfo::isVirtualRegister(R))
313 // Check if all the bits in range [B, E) in both cells are equal.
314 bool HexagonBitSimplify::isEqual(const BitTracker::RegisterCell &RC1,
315 uint16_t B1, const BitTracker::RegisterCell &RC2, uint16_t B2,
317 for (uint16_t i = 0; i < W; ++i) {
318 // If RC1[i] is "bottom", it cannot be proven equal to RC2[i].
319 if (RC1[B1+i].Type == BitTracker::BitValue::Ref && RC1[B1+i].RefI.Reg == 0)
322 if (RC2[B2+i].Type == BitTracker::BitValue::Ref && RC2[B2+i].RefI.Reg == 0)
324 if (RC1[B1+i] != RC2[B2+i])
330 bool HexagonBitSimplify::isZero(const BitTracker::RegisterCell &RC,
331 uint16_t B, uint16_t W) {
332 assert(B < RC.width() && B+W <= RC.width());
333 for (uint16_t i = B; i < B+W; ++i)
339 bool HexagonBitSimplify::getConst(const BitTracker::RegisterCell &RC,
340 uint16_t B, uint16_t W, uint64_t &U) {
341 assert(B < RC.width() && B+W <= RC.width());
343 for (uint16_t i = B+W; i > B; --i) {
344 const BitTracker::BitValue &BV = RC[i-1];
355 bool HexagonBitSimplify::replaceReg(unsigned OldR, unsigned NewR,
356 MachineRegisterInfo &MRI) {
357 if (!TargetRegisterInfo::isVirtualRegister(OldR) ||
358 !TargetRegisterInfo::isVirtualRegister(NewR))
360 auto Begin = MRI.use_begin(OldR), End = MRI.use_end();
362 for (auto I = Begin; I != End; I = NextI) {
363 NextI = std::next(I);
369 bool HexagonBitSimplify::replaceRegWithSub(unsigned OldR, unsigned NewR,
370 unsigned NewSR, MachineRegisterInfo &MRI) {
371 if (!TargetRegisterInfo::isVirtualRegister(OldR) ||
372 !TargetRegisterInfo::isVirtualRegister(NewR))
374 if (hasTiedUse(OldR, MRI, NewSR))
376 auto Begin = MRI.use_begin(OldR), End = MRI.use_end();
378 for (auto I = Begin; I != End; I = NextI) {
379 NextI = std::next(I);
386 bool HexagonBitSimplify::replaceSubWithSub(unsigned OldR, unsigned OldSR,
387 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI) {
388 if (!TargetRegisterInfo::isVirtualRegister(OldR) ||
389 !TargetRegisterInfo::isVirtualRegister(NewR))
391 if (OldSR != NewSR && hasTiedUse(OldR, MRI, NewSR))
393 auto Begin = MRI.use_begin(OldR), End = MRI.use_end();
395 for (auto I = Begin; I != End; I = NextI) {
396 NextI = std::next(I);
397 if (I->getSubReg() != OldSR)
405 // For a register ref (pair Reg:Sub), set Begin to the position of the LSB
406 // of Sub in Reg, and set Width to the size of Sub in bits. Return true,
407 // if this succeeded, otherwise return false.
408 bool HexagonBitSimplify::getSubregMask(const BitTracker::RegisterRef &RR,
409 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI) {
410 const TargetRegisterClass *RC = MRI.getRegClass(RR.Reg);
413 Width = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC);
419 switch (RC->getID()) {
420 case Hexagon::DoubleRegsRegClassID:
421 case Hexagon::HvxWRRegClassID:
422 Width = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 2;
423 if (RR.Sub == Hexagon::isub_hi || RR.Sub == Hexagon::vsub_hi)
433 // For a REG_SEQUENCE, set SL to the low subregister and SH to the high
435 bool HexagonBitSimplify::parseRegSequence(const MachineInstr &I,
436 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH,
437 const MachineRegisterInfo &MRI) {
438 assert(I.getOpcode() == TargetOpcode::REG_SEQUENCE);
439 unsigned Sub1 = I.getOperand(2).getImm(), Sub2 = I.getOperand(4).getImm();
440 auto &DstRC = *MRI.getRegClass(I.getOperand(0).getReg());
441 auto &HRI = static_cast<const HexagonRegisterInfo&>(
442 *MRI.getTargetRegisterInfo());
443 unsigned SubLo = HRI.getHexagonSubRegIndex(DstRC, Hexagon::ps_sub_lo);
444 unsigned SubHi = HRI.getHexagonSubRegIndex(DstRC, Hexagon::ps_sub_hi);
445 assert((Sub1 == SubLo && Sub2 == SubHi) || (Sub1 == SubHi && Sub2 == SubLo));
446 if (Sub1 == SubLo && Sub2 == SubHi) {
447 SL = I.getOperand(1);
448 SH = I.getOperand(3);
451 if (Sub1 == SubHi && Sub2 == SubLo) {
452 SH = I.getOperand(1);
453 SL = I.getOperand(3);
459 // All stores (except 64-bit stores) take a 32-bit register as the source
460 // of the value to be stored. If the instruction stores into a location
461 // that is shorter than 32 bits, some bits of the source register are not
462 // used. For each store instruction, calculate the set of used bits in
463 // the source register, and set appropriate bits in Bits. Return true if
464 // the bits are calculated, false otherwise.
465 bool HexagonBitSimplify::getUsedBitsInStore(unsigned Opc, BitVector &Bits,
467 using namespace Hexagon;
471 case S2_storerb_io: // memb(Rs32+#s11:0)=Rt32
472 case S2_storerbnew_io: // memb(Rs32+#s11:0)=Nt8.new
473 case S2_pstorerbt_io: // if (Pv4) memb(Rs32+#u6:0)=Rt32
474 case S2_pstorerbf_io: // if (!Pv4) memb(Rs32+#u6:0)=Rt32
475 case S4_pstorerbtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Rt32
476 case S4_pstorerbfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Rt32
477 case S2_pstorerbnewt_io: // if (Pv4) memb(Rs32+#u6:0)=Nt8.new
478 case S2_pstorerbnewf_io: // if (!Pv4) memb(Rs32+#u6:0)=Nt8.new
479 case S4_pstorerbnewtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Nt8.new
480 case S4_pstorerbnewfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Nt8.new
481 case S2_storerb_pi: // memb(Rx32++#s4:0)=Rt32
482 case S2_storerbnew_pi: // memb(Rx32++#s4:0)=Nt8.new
483 case S2_pstorerbt_pi: // if (Pv4) memb(Rx32++#s4:0)=Rt32
484 case S2_pstorerbf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Rt32
485 case S2_pstorerbtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Rt32
486 case S2_pstorerbfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Rt32
487 case S2_pstorerbnewt_pi: // if (Pv4) memb(Rx32++#s4:0)=Nt8.new
488 case S2_pstorerbnewf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Nt8.new
489 case S2_pstorerbnewtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Nt8.new
490 case S2_pstorerbnewfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Nt8.new
491 case S4_storerb_ap: // memb(Re32=#U6)=Rt32
492 case S4_storerbnew_ap: // memb(Re32=#U6)=Nt8.new
493 case S2_storerb_pr: // memb(Rx32++Mu2)=Rt32
494 case S2_storerbnew_pr: // memb(Rx32++Mu2)=Nt8.new
495 case S4_storerb_ur: // memb(Ru32<<#u2+#U6)=Rt32
496 case S4_storerbnew_ur: // memb(Ru32<<#u2+#U6)=Nt8.new
497 case S2_storerb_pbr: // memb(Rx32++Mu2:brev)=Rt32
498 case S2_storerbnew_pbr: // memb(Rx32++Mu2:brev)=Nt8.new
499 case S2_storerb_pci: // memb(Rx32++#s4:0:circ(Mu2))=Rt32
500 case S2_storerbnew_pci: // memb(Rx32++#s4:0:circ(Mu2))=Nt8.new
501 case S2_storerb_pcr: // memb(Rx32++I:circ(Mu2))=Rt32
502 case S2_storerbnew_pcr: // memb(Rx32++I:circ(Mu2))=Nt8.new
503 case S4_storerb_rr: // memb(Rs32+Ru32<<#u2)=Rt32
504 case S4_storerbnew_rr: // memb(Rs32+Ru32<<#u2)=Nt8.new
505 case S4_pstorerbt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Rt32
506 case S4_pstorerbf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Rt32
507 case S4_pstorerbtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32
508 case S4_pstorerbfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32
509 case S4_pstorerbnewt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new
510 case S4_pstorerbnewf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new
511 case S4_pstorerbnewtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new
512 case S4_pstorerbnewfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new
513 case S2_storerbgp: // memb(gp+#u16:0)=Rt32
514 case S2_storerbnewgp: // memb(gp+#u16:0)=Nt8.new
515 case S4_pstorerbt_abs: // if (Pv4) memb(#u6)=Rt32
516 case S4_pstorerbf_abs: // if (!Pv4) memb(#u6)=Rt32
517 case S4_pstorerbtnew_abs: // if (Pv4.new) memb(#u6)=Rt32
518 case S4_pstorerbfnew_abs: // if (!Pv4.new) memb(#u6)=Rt32
519 case S4_pstorerbnewt_abs: // if (Pv4) memb(#u6)=Nt8.new
520 case S4_pstorerbnewf_abs: // if (!Pv4) memb(#u6)=Nt8.new
521 case S4_pstorerbnewtnew_abs: // if (Pv4.new) memb(#u6)=Nt8.new
522 case S4_pstorerbnewfnew_abs: // if (!Pv4.new) memb(#u6)=Nt8.new
523 Bits.set(Begin, Begin+8);
527 case S2_storerh_io: // memh(Rs32+#s11:1)=Rt32
528 case S2_storerhnew_io: // memh(Rs32+#s11:1)=Nt8.new
529 case S2_pstorerht_io: // if (Pv4) memh(Rs32+#u6:1)=Rt32
530 case S2_pstorerhf_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt32
531 case S4_pstorerhtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt32
532 case S4_pstorerhfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt32
533 case S2_pstorerhnewt_io: // if (Pv4) memh(Rs32+#u6:1)=Nt8.new
534 case S2_pstorerhnewf_io: // if (!Pv4) memh(Rs32+#u6:1)=Nt8.new
535 case S4_pstorerhnewtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Nt8.new
536 case S4_pstorerhnewfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Nt8.new
537 case S2_storerh_pi: // memh(Rx32++#s4:1)=Rt32
538 case S2_storerhnew_pi: // memh(Rx32++#s4:1)=Nt8.new
539 case S2_pstorerht_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt32
540 case S2_pstorerhf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt32
541 case S2_pstorerhtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt32
542 case S2_pstorerhfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt32
543 case S2_pstorerhnewt_pi: // if (Pv4) memh(Rx32++#s4:1)=Nt8.new
544 case S2_pstorerhnewf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Nt8.new
545 case S2_pstorerhnewtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Nt8.new
546 case S2_pstorerhnewfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Nt8.new
547 case S4_storerh_ap: // memh(Re32=#U6)=Rt32
548 case S4_storerhnew_ap: // memh(Re32=#U6)=Nt8.new
549 case S2_storerh_pr: // memh(Rx32++Mu2)=Rt32
550 case S2_storerhnew_pr: // memh(Rx32++Mu2)=Nt8.new
551 case S4_storerh_ur: // memh(Ru32<<#u2+#U6)=Rt32
552 case S4_storerhnew_ur: // memh(Ru32<<#u2+#U6)=Nt8.new
553 case S2_storerh_pbr: // memh(Rx32++Mu2:brev)=Rt32
554 case S2_storerhnew_pbr: // memh(Rx32++Mu2:brev)=Nt8.new
555 case S2_storerh_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt32
556 case S2_storerhnew_pci: // memh(Rx32++#s4:1:circ(Mu2))=Nt8.new
557 case S2_storerh_pcr: // memh(Rx32++I:circ(Mu2))=Rt32
558 case S2_storerhnew_pcr: // memh(Rx32++I:circ(Mu2))=Nt8.new
559 case S4_storerh_rr: // memh(Rs32+Ru32<<#u2)=Rt32
560 case S4_pstorerht_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt32
561 case S4_pstorerhf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt32
562 case S4_pstorerhtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32
563 case S4_pstorerhfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32
564 case S4_storerhnew_rr: // memh(Rs32+Ru32<<#u2)=Nt8.new
565 case S4_pstorerhnewt_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new
566 case S4_pstorerhnewf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new
567 case S4_pstorerhnewtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new
568 case S4_pstorerhnewfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new
569 case S2_storerhgp: // memh(gp+#u16:1)=Rt32
570 case S2_storerhnewgp: // memh(gp+#u16:1)=Nt8.new
571 case S4_pstorerht_abs: // if (Pv4) memh(#u6)=Rt32
572 case S4_pstorerhf_abs: // if (!Pv4) memh(#u6)=Rt32
573 case S4_pstorerhtnew_abs: // if (Pv4.new) memh(#u6)=Rt32
574 case S4_pstorerhfnew_abs: // if (!Pv4.new) memh(#u6)=Rt32
575 case S4_pstorerhnewt_abs: // if (Pv4) memh(#u6)=Nt8.new
576 case S4_pstorerhnewf_abs: // if (!Pv4) memh(#u6)=Nt8.new
577 case S4_pstorerhnewtnew_abs: // if (Pv4.new) memh(#u6)=Nt8.new
578 case S4_pstorerhnewfnew_abs: // if (!Pv4.new) memh(#u6)=Nt8.new
579 Bits.set(Begin, Begin+16);
583 case S2_storerf_io: // memh(Rs32+#s11:1)=Rt.H32
584 case S2_pstorerft_io: // if (Pv4) memh(Rs32+#u6:1)=Rt.H32
585 case S2_pstorerff_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt.H32
586 case S4_pstorerftnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt.H32
587 case S4_pstorerffnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt.H32
588 case S2_storerf_pi: // memh(Rx32++#s4:1)=Rt.H32
589 case S2_pstorerft_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt.H32
590 case S2_pstorerff_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt.H32
591 case S2_pstorerftnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt.H32
592 case S2_pstorerffnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt.H32
593 case S4_storerf_ap: // memh(Re32=#U6)=Rt.H32
594 case S2_storerf_pr: // memh(Rx32++Mu2)=Rt.H32
595 case S4_storerf_ur: // memh(Ru32<<#u2+#U6)=Rt.H32
596 case S2_storerf_pbr: // memh(Rx32++Mu2:brev)=Rt.H32
597 case S2_storerf_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt.H32
598 case S2_storerf_pcr: // memh(Rx32++I:circ(Mu2))=Rt.H32
599 case S4_storerf_rr: // memh(Rs32+Ru32<<#u2)=Rt.H32
600 case S4_pstorerft_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32
601 case S4_pstorerff_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32
602 case S4_pstorerftnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32
603 case S4_pstorerffnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32
604 case S2_storerfgp: // memh(gp+#u16:1)=Rt.H32
605 case S4_pstorerft_abs: // if (Pv4) memh(#u6)=Rt.H32
606 case S4_pstorerff_abs: // if (!Pv4) memh(#u6)=Rt.H32
607 case S4_pstorerftnew_abs: // if (Pv4.new) memh(#u6)=Rt.H32
608 case S4_pstorerffnew_abs: // if (!Pv4.new) memh(#u6)=Rt.H32
609 Bits.set(Begin+16, Begin+32);
616 // For an instruction with opcode Opc, calculate the set of bits that it
617 // uses in a register in operand OpN. This only calculates the set of used
618 // bits for cases where it does not depend on any operands (as is the case
619 // in shifts, for example). For concrete instructions from a program, the
620 // operand may be a subregister of a larger register, while Bits would
621 // correspond to the larger register in its entirety. Because of that,
622 // the parameter Begin can be used to indicate which bit of Bits should be
623 // considered the LSB of the operand.
624 bool HexagonBitSimplify::getUsedBits(unsigned Opc, unsigned OpN,
625 BitVector &Bits, uint16_t Begin, const HexagonInstrInfo &HII) {
626 using namespace Hexagon;
628 const MCInstrDesc &D = HII.get(Opc);
630 if (OpN == D.getNumOperands()-1)
631 return getUsedBitsInStore(Opc, Bits, Begin);
636 // One register source. Used bits: R1[0-7].
643 Bits.set(Begin, Begin+8);
648 // One register source. Used bits: R1[0-15].
656 Bits.set(Begin, Begin+16);
661 // One register source. Used bits: R1[16-31].
664 Bits.set(Begin+16, Begin+32);
669 // Two register sources. Used bits: R1[0-7], R2[0-7].
674 Bits.set(Begin, Begin+8);
679 // Two register sources. Used bits: R1[0-15], R2[0-15].
684 case A2_addh_h16_sat_ll:
686 case A2_addh_l16_sat_ll:
689 case A2_subh_h16_sat_ll:
691 case A2_subh_l16_sat_ll:
692 case M2_mpy_acc_ll_s0:
693 case M2_mpy_acc_ll_s1:
694 case M2_mpy_acc_sat_ll_s0:
695 case M2_mpy_acc_sat_ll_s1:
698 case M2_mpy_nac_ll_s0:
699 case M2_mpy_nac_ll_s1:
700 case M2_mpy_nac_sat_ll_s0:
701 case M2_mpy_nac_sat_ll_s1:
702 case M2_mpy_rnd_ll_s0:
703 case M2_mpy_rnd_ll_s1:
704 case M2_mpy_sat_ll_s0:
705 case M2_mpy_sat_ll_s1:
706 case M2_mpy_sat_rnd_ll_s0:
707 case M2_mpy_sat_rnd_ll_s1:
708 case M2_mpyd_acc_ll_s0:
709 case M2_mpyd_acc_ll_s1:
712 case M2_mpyd_nac_ll_s0:
713 case M2_mpyd_nac_ll_s1:
714 case M2_mpyd_rnd_ll_s0:
715 case M2_mpyd_rnd_ll_s1:
716 case M2_mpyu_acc_ll_s0:
717 case M2_mpyu_acc_ll_s1:
720 case M2_mpyu_nac_ll_s0:
721 case M2_mpyu_nac_ll_s1:
722 case M2_mpyud_acc_ll_s0:
723 case M2_mpyud_acc_ll_s1:
726 case M2_mpyud_nac_ll_s0:
727 case M2_mpyud_nac_ll_s1:
728 if (OpN == 1 || OpN == 2) {
729 Bits.set(Begin, Begin+16);
734 // Two register sources. Used bits: R1[0-15], R2[16-31].
736 case A2_addh_h16_sat_lh:
739 case A2_subh_h16_sat_lh:
740 case M2_mpy_acc_lh_s0:
741 case M2_mpy_acc_lh_s1:
742 case M2_mpy_acc_sat_lh_s0:
743 case M2_mpy_acc_sat_lh_s1:
746 case M2_mpy_nac_lh_s0:
747 case M2_mpy_nac_lh_s1:
748 case M2_mpy_nac_sat_lh_s0:
749 case M2_mpy_nac_sat_lh_s1:
750 case M2_mpy_rnd_lh_s0:
751 case M2_mpy_rnd_lh_s1:
752 case M2_mpy_sat_lh_s0:
753 case M2_mpy_sat_lh_s1:
754 case M2_mpy_sat_rnd_lh_s0:
755 case M2_mpy_sat_rnd_lh_s1:
756 case M2_mpyd_acc_lh_s0:
757 case M2_mpyd_acc_lh_s1:
760 case M2_mpyd_nac_lh_s0:
761 case M2_mpyd_nac_lh_s1:
762 case M2_mpyd_rnd_lh_s0:
763 case M2_mpyd_rnd_lh_s1:
764 case M2_mpyu_acc_lh_s0:
765 case M2_mpyu_acc_lh_s1:
768 case M2_mpyu_nac_lh_s0:
769 case M2_mpyu_nac_lh_s1:
770 case M2_mpyud_acc_lh_s0:
771 case M2_mpyud_acc_lh_s1:
774 case M2_mpyud_nac_lh_s0:
775 case M2_mpyud_nac_lh_s1:
776 // These four are actually LH.
778 case A2_addh_l16_sat_hl:
780 case A2_subh_l16_sat_hl:
782 Bits.set(Begin, Begin+16);
786 Bits.set(Begin+16, Begin+32);
791 // Two register sources, used bits: R1[16-31], R2[0-15].
793 case A2_addh_h16_sat_hl:
796 case A2_subh_h16_sat_hl:
797 case M2_mpy_acc_hl_s0:
798 case M2_mpy_acc_hl_s1:
799 case M2_mpy_acc_sat_hl_s0:
800 case M2_mpy_acc_sat_hl_s1:
803 case M2_mpy_nac_hl_s0:
804 case M2_mpy_nac_hl_s1:
805 case M2_mpy_nac_sat_hl_s0:
806 case M2_mpy_nac_sat_hl_s1:
807 case M2_mpy_rnd_hl_s0:
808 case M2_mpy_rnd_hl_s1:
809 case M2_mpy_sat_hl_s0:
810 case M2_mpy_sat_hl_s1:
811 case M2_mpy_sat_rnd_hl_s0:
812 case M2_mpy_sat_rnd_hl_s1:
813 case M2_mpyd_acc_hl_s0:
814 case M2_mpyd_acc_hl_s1:
817 case M2_mpyd_nac_hl_s0:
818 case M2_mpyd_nac_hl_s1:
819 case M2_mpyd_rnd_hl_s0:
820 case M2_mpyd_rnd_hl_s1:
821 case M2_mpyu_acc_hl_s0:
822 case M2_mpyu_acc_hl_s1:
825 case M2_mpyu_nac_hl_s0:
826 case M2_mpyu_nac_hl_s1:
827 case M2_mpyud_acc_hl_s0:
828 case M2_mpyud_acc_hl_s1:
831 case M2_mpyud_nac_hl_s0:
832 case M2_mpyud_nac_hl_s1:
834 Bits.set(Begin+16, Begin+32);
838 Bits.set(Begin, Begin+16);
843 // Two register sources, used bits: R1[16-31], R2[16-31].
845 case A2_addh_h16_sat_hh:
848 case A2_subh_h16_sat_hh:
849 case M2_mpy_acc_hh_s0:
850 case M2_mpy_acc_hh_s1:
851 case M2_mpy_acc_sat_hh_s0:
852 case M2_mpy_acc_sat_hh_s1:
855 case M2_mpy_nac_hh_s0:
856 case M2_mpy_nac_hh_s1:
857 case M2_mpy_nac_sat_hh_s0:
858 case M2_mpy_nac_sat_hh_s1:
859 case M2_mpy_rnd_hh_s0:
860 case M2_mpy_rnd_hh_s1:
861 case M2_mpy_sat_hh_s0:
862 case M2_mpy_sat_hh_s1:
863 case M2_mpy_sat_rnd_hh_s0:
864 case M2_mpy_sat_rnd_hh_s1:
865 case M2_mpyd_acc_hh_s0:
866 case M2_mpyd_acc_hh_s1:
869 case M2_mpyd_nac_hh_s0:
870 case M2_mpyd_nac_hh_s1:
871 case M2_mpyd_rnd_hh_s0:
872 case M2_mpyd_rnd_hh_s1:
873 case M2_mpyu_acc_hh_s0:
874 case M2_mpyu_acc_hh_s1:
877 case M2_mpyu_nac_hh_s0:
878 case M2_mpyu_nac_hh_s1:
879 case M2_mpyud_acc_hh_s0:
880 case M2_mpyud_acc_hh_s1:
883 case M2_mpyud_nac_hh_s0:
884 case M2_mpyud_nac_hh_s1:
885 if (OpN == 1 || OpN == 2) {
886 Bits.set(Begin+16, Begin+32);
895 // Calculate the register class that matches Reg:Sub. For example, if
896 // %1 is a double register, then %1:isub_hi would match the "int"
898 const TargetRegisterClass *HexagonBitSimplify::getFinalVRegClass(
899 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI) {
900 if (!TargetRegisterInfo::isVirtualRegister(RR.Reg))
902 auto *RC = MRI.getRegClass(RR.Reg);
905 auto &HRI = static_cast<const HexagonRegisterInfo&>(
906 *MRI.getTargetRegisterInfo());
908 auto VerifySR = [&HRI] (const TargetRegisterClass *RC, unsigned Sub) -> void {
910 assert(Sub == HRI.getHexagonSubRegIndex(*RC, Hexagon::ps_sub_lo) ||
911 Sub == HRI.getHexagonSubRegIndex(*RC, Hexagon::ps_sub_hi));
914 switch (RC->getID()) {
915 case Hexagon::DoubleRegsRegClassID:
916 VerifySR(RC, RR.Sub);
917 return &Hexagon::IntRegsRegClass;
918 case Hexagon::HvxWRRegClassID:
919 VerifySR(RC, RR.Sub);
920 return &Hexagon::HvxVRRegClass;
925 // Check if RD could be replaced with RS at any possible use of RD.
926 // For example a predicate register cannot be replaced with a integer
927 // register, but a 64-bit register with a subregister can be replaced
928 // with a 32-bit register.
929 bool HexagonBitSimplify::isTransparentCopy(const BitTracker::RegisterRef &RD,
930 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI) {
931 if (!TargetRegisterInfo::isVirtualRegister(RD.Reg) ||
932 !TargetRegisterInfo::isVirtualRegister(RS.Reg))
934 // Return false if one (or both) classes are nullptr.
935 auto *DRC = getFinalVRegClass(RD, MRI);
939 return DRC == getFinalVRegClass(RS, MRI);
942 bool HexagonBitSimplify::hasTiedUse(unsigned Reg, MachineRegisterInfo &MRI,
944 if (!PreserveTiedOps)
946 return llvm::any_of(MRI.use_operands(Reg),
947 [NewSub] (const MachineOperand &Op) -> bool {
948 return Op.getSubReg() != NewSub && Op.isTied();
954 class DeadCodeElimination {
956 DeadCodeElimination(MachineFunction &mf, MachineDominatorTree &mdt)
957 : MF(mf), HII(*MF.getSubtarget<HexagonSubtarget>().getInstrInfo()),
958 MDT(mdt), MRI(mf.getRegInfo()) {}
961 return runOnNode(MDT.getRootNode());
965 bool isDead(unsigned R) const;
966 bool runOnNode(MachineDomTreeNode *N);
969 const HexagonInstrInfo &HII;
970 MachineDominatorTree &MDT;
971 MachineRegisterInfo &MRI;
974 } // end anonymous namespace
976 bool DeadCodeElimination::isDead(unsigned R) const {
977 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) {
978 MachineInstr *UseI = I->getParent();
979 if (UseI->isDebugValue())
982 assert(!UseI->getOperand(0).getSubReg());
983 unsigned DR = UseI->getOperand(0).getReg();
992 bool DeadCodeElimination::runOnNode(MachineDomTreeNode *N) {
993 bool Changed = false;
995 for (auto *DTN : children<MachineDomTreeNode*>(N))
996 Changed |= runOnNode(DTN);
998 MachineBasicBlock *B = N->getBlock();
999 std::vector<MachineInstr*> Instrs;
1000 for (auto I = B->rbegin(), E = B->rend(); I != E; ++I)
1001 Instrs.push_back(&*I);
1003 for (auto MI : Instrs) {
1004 unsigned Opc = MI->getOpcode();
1005 // Do not touch lifetime markers. This is why the target-independent DCE
1007 if (Opc == TargetOpcode::LIFETIME_START ||
1008 Opc == TargetOpcode::LIFETIME_END)
1011 if (MI->isInlineAsm())
1013 // Delete PHIs if possible.
1014 if (!MI->isPHI() && !MI->isSafeToMove(nullptr, Store))
1017 bool AllDead = true;
1018 SmallVector<unsigned,2> Regs;
1019 for (auto &Op : MI->operands()) {
1020 if (!Op.isReg() || !Op.isDef())
1022 unsigned R = Op.getReg();
1023 if (!TargetRegisterInfo::isVirtualRegister(R) || !isDead(R)) {
1033 for (unsigned i = 0, n = Regs.size(); i != n; ++i)
1034 MRI.markUsesInDebugValueAsUndef(Regs[i]);
1043 // Eliminate redundant instructions
1045 // This transformation will identify instructions where the output register
1046 // is the same as one of its input registers. This only works on instructions
1047 // that define a single register (unlike post-increment loads, for example).
1048 // The equality check is actually more detailed: the code calculates which
1049 // bits of the output are used, and only compares these bits with the input
1051 // If the output matches an input, the instruction is replaced with COPY.
1052 // The copies will be removed by another transformation.
1053 class RedundantInstrElimination : public Transformation {
1055 RedundantInstrElimination(BitTracker &bt, const HexagonInstrInfo &hii,
1056 const HexagonRegisterInfo &hri, MachineRegisterInfo &mri)
1057 : Transformation(true), HII(hii), HRI(hri), MRI(mri), BT(bt) {}
1059 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1062 bool isLossyShiftLeft(const MachineInstr &MI, unsigned OpN,
1063 unsigned &LostB, unsigned &LostE);
1064 bool isLossyShiftRight(const MachineInstr &MI, unsigned OpN,
1065 unsigned &LostB, unsigned &LostE);
1066 bool computeUsedBits(unsigned Reg, BitVector &Bits);
1067 bool computeUsedBits(const MachineInstr &MI, unsigned OpN, BitVector &Bits,
1069 bool usedBitsEqual(BitTracker::RegisterRef RD, BitTracker::RegisterRef RS);
1071 const HexagonInstrInfo &HII;
1072 const HexagonRegisterInfo &HRI;
1073 MachineRegisterInfo &MRI;
1077 } // end anonymous namespace
1079 // Check if the instruction is a lossy shift left, where the input being
1080 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range
1081 // of bit indices that are lost.
1082 bool RedundantInstrElimination::isLossyShiftLeft(const MachineInstr &MI,
1083 unsigned OpN, unsigned &LostB, unsigned &LostE) {
1084 using namespace Hexagon;
1086 unsigned Opc = MI.getOpcode();
1087 unsigned ImN, RegN, Width;
1094 case S2_asl_i_p_acc:
1095 case S2_asl_i_p_and:
1096 case S2_asl_i_p_nac:
1098 case S2_asl_i_p_xacc:
1108 case S2_addasl_rrri:
1109 case S4_andi_asl_ri:
1111 case S4_addi_asl_ri:
1112 case S4_subi_asl_ri:
1113 case S2_asl_i_r_acc:
1114 case S2_asl_i_r_and:
1115 case S2_asl_i_r_nac:
1117 case S2_asl_i_r_sat:
1118 case S2_asl_i_r_xacc:
1130 assert(MI.getOperand(ImN).isImm());
1131 unsigned S = MI.getOperand(ImN).getImm();
1139 // Check if the instruction is a lossy shift right, where the input being
1140 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range
1141 // of bit indices that are lost.
1142 bool RedundantInstrElimination::isLossyShiftRight(const MachineInstr &MI,
1143 unsigned OpN, unsigned &LostB, unsigned &LostE) {
1144 using namespace Hexagon;
1146 unsigned Opc = MI.getOpcode();
1154 case S2_asr_i_p_acc:
1155 case S2_asr_i_p_and:
1156 case S2_asr_i_p_nac:
1158 case S2_lsr_i_p_acc:
1159 case S2_lsr_i_p_and:
1160 case S2_lsr_i_p_nac:
1162 case S2_lsr_i_p_xacc:
1171 case S4_andi_lsr_ri:
1173 case S4_addi_lsr_ri:
1174 case S4_subi_lsr_ri:
1175 case S2_asr_i_r_acc:
1176 case S2_asr_i_r_and:
1177 case S2_asr_i_r_nac:
1179 case S2_lsr_i_r_acc:
1180 case S2_lsr_i_r_and:
1181 case S2_lsr_i_r_nac:
1183 case S2_lsr_i_r_xacc:
1195 assert(MI.getOperand(ImN).isImm());
1196 unsigned S = MI.getOperand(ImN).getImm();
1202 // Calculate the bit vector that corresponds to the used bits of register Reg.
1203 // The vector Bits has the same size, as the size of Reg in bits. If the cal-
1204 // culation fails (i.e. the used bits are unknown), it returns false. Other-
1205 // wise, it returns true and sets the corresponding bits in Bits.
1206 bool RedundantInstrElimination::computeUsedBits(unsigned Reg, BitVector &Bits) {
1207 BitVector Used(Bits.size());
1208 RegisterSet Visited;
1209 std::vector<unsigned> Pending;
1210 Pending.push_back(Reg);
1212 for (unsigned i = 0; i < Pending.size(); ++i) {
1213 unsigned R = Pending[i];
1217 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) {
1218 BitTracker::RegisterRef UR = *I;
1220 if (!HBS::getSubregMask(UR, B, W, MRI))
1222 MachineInstr &UseI = *I->getParent();
1223 if (UseI.isPHI() || UseI.isCopy()) {
1224 unsigned DefR = UseI.getOperand(0).getReg();
1225 if (!TargetRegisterInfo::isVirtualRegister(DefR))
1227 Pending.push_back(DefR);
1229 if (!computeUsedBits(UseI, I.getOperandNo(), Used, B))
1238 // Calculate the bits used by instruction MI in a register in operand OpN.
1239 // Return true/false if the calculation succeeds/fails. If is succeeds, set
1240 // used bits in Bits. This function does not reset any bits in Bits, so
1241 // subsequent calls over different instructions will result in the union
1242 // of the used bits in all these instructions.
1243 // The register in question may be used with a sub-register, whereas Bits
1244 // holds the bits for the entire register. To keep track of that, the
1245 // argument Begin indicates where in Bits is the lowest-significant bit
1246 // of the register used in operand OpN. For example, in instruction:
1247 // %1 = S2_lsr_i_r %2:isub_hi, 10
1248 // the operand 1 is a 32-bit register, which happens to be a subregister
1249 // of the 64-bit register %2, and that subregister starts at position 32.
1250 // In this case Begin=32, since Bits[32] would be the lowest-significant bit
1252 bool RedundantInstrElimination::computeUsedBits(const MachineInstr &MI,
1253 unsigned OpN, BitVector &Bits, uint16_t Begin) {
1254 unsigned Opc = MI.getOpcode();
1255 BitVector T(Bits.size());
1256 bool GotBits = HBS::getUsedBits(Opc, OpN, T, Begin, HII);
1257 // Even if we don't have bits yet, we could still provide some information
1258 // if the instruction is a lossy shift: the lost bits will be marked as
1261 if (isLossyShiftLeft(MI, OpN, LB, LE) || isLossyShiftRight(MI, OpN, LB, LE)) {
1262 assert(MI.getOperand(OpN).isReg());
1263 BitTracker::RegisterRef RR = MI.getOperand(OpN);
1264 const TargetRegisterClass *RC = HBS::getFinalVRegClass(RR, MRI);
1265 uint16_t Width = HRI.getRegSizeInBits(*RC);
1268 T.set(Begin, Begin+Width);
1269 assert(LB <= LE && LB < Width && LE <= Width);
1270 T.reset(Begin+LB, Begin+LE);
1278 // Calculates the used bits in RD ("defined register"), and checks if these
1279 // bits in RS ("used register") and RD are identical.
1280 bool RedundantInstrElimination::usedBitsEqual(BitTracker::RegisterRef RD,
1281 BitTracker::RegisterRef RS) {
1282 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg);
1283 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg);
1286 if (!HBS::getSubregMask(RD, DB, DW, MRI))
1289 if (!HBS::getSubregMask(RS, SB, SW, MRI))
1294 BitVector Used(DC.width());
1295 if (!computeUsedBits(RD.Reg, Used))
1298 for (unsigned i = 0; i != DW; ++i)
1299 if (Used[i+DB] && DC[DB+i] != SC[SB+i])
1304 bool RedundantInstrElimination::processBlock(MachineBasicBlock &B,
1305 const RegisterSet&) {
1306 if (!BT.reached(&B))
1308 bool Changed = false;
1310 for (auto I = B.begin(), E = B.end(), NextI = I; I != E; ++I) {
1311 NextI = std::next(I);
1312 MachineInstr *MI = &*I;
1314 if (MI->getOpcode() == TargetOpcode::COPY)
1316 if (MI->isPHI() || MI->hasUnmodeledSideEffects() || MI->isInlineAsm())
1318 unsigned NumD = MI->getDesc().getNumDefs();
1322 BitTracker::RegisterRef RD = MI->getOperand(0);
1323 if (!BT.has(RD.Reg))
1325 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg);
1326 auto At = MachineBasicBlock::iterator(MI);
1328 // Find a source operand that is equal to the result.
1329 for (auto &Op : MI->uses()) {
1332 BitTracker::RegisterRef RS = Op;
1333 if (!BT.has(RS.Reg))
1335 if (!HBS::isTransparentCopy(RD, RS, MRI))
1339 if (!HBS::getSubregMask(RS, BN, BW, MRI))
1342 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg);
1343 if (!usedBitsEqual(RD, RS) && !HBS::isEqual(DC, 0, SC, BN, BW))
1346 // If found, replace the instruction with a COPY.
1347 const DebugLoc &DL = MI->getDebugLoc();
1348 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI);
1349 unsigned NewR = MRI.createVirtualRegister(FRC);
1350 MachineInstr *CopyI =
1351 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR)
1352 .addReg(RS.Reg, 0, RS.Sub);
1353 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
1354 // This pass can create copies between registers that don't have the
1355 // exact same values. Updating the tracker has to involve updating
1356 // all dependent cells. Example:
1357 // %1 = inst %2 ; %1 != %2, but used bits are equal
1359 // %3 = copy %2 ; <- inserted
1360 // ... = %3 ; <- replaced from %2
1361 // Indirectly, we can create a "copy" between %1 and %2 even
1362 // though their exact values do not match.
1374 // Recognize instructions that produce constant values known at compile-time.
1375 // Replace them with register definitions that load these constants directly.
1376 class ConstGeneration : public Transformation {
1378 ConstGeneration(BitTracker &bt, const HexagonInstrInfo &hii,
1379 MachineRegisterInfo &mri)
1380 : Transformation(true), HII(hii), MRI(mri), BT(bt) {}
1382 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1383 static bool isTfrConst(const MachineInstr &MI);
1386 unsigned genTfrConst(const TargetRegisterClass *RC, int64_t C,
1387 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL);
1389 const HexagonInstrInfo &HII;
1390 MachineRegisterInfo &MRI;
1394 } // end anonymous namespace
1396 bool ConstGeneration::isTfrConst(const MachineInstr &MI) {
1397 unsigned Opc = MI.getOpcode();
1399 case Hexagon::A2_combineii:
1400 case Hexagon::A4_combineii:
1401 case Hexagon::A2_tfrsi:
1402 case Hexagon::A2_tfrpi:
1403 case Hexagon::PS_true:
1404 case Hexagon::PS_false:
1405 case Hexagon::CONST32:
1406 case Hexagon::CONST64:
1412 // Generate a transfer-immediate instruction that is appropriate for the
1413 // register class and the actual value being transferred.
1414 unsigned ConstGeneration::genTfrConst(const TargetRegisterClass *RC, int64_t C,
1415 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL) {
1416 unsigned Reg = MRI.createVirtualRegister(RC);
1417 if (RC == &Hexagon::IntRegsRegClass) {
1418 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrsi), Reg)
1419 .addImm(int32_t(C));
1423 if (RC == &Hexagon::DoubleRegsRegClass) {
1425 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrpi), Reg)
1430 unsigned Lo = Lo_32(C), Hi = Hi_32(C);
1431 if (isInt<8>(Lo) || isInt<8>(Hi)) {
1432 unsigned Opc = isInt<8>(Lo) ? Hexagon::A2_combineii
1433 : Hexagon::A4_combineii;
1434 BuildMI(B, At, DL, HII.get(Opc), Reg)
1435 .addImm(int32_t(Hi))
1436 .addImm(int32_t(Lo));
1440 BuildMI(B, At, DL, HII.get(Hexagon::CONST64), Reg)
1445 if (RC == &Hexagon::PredRegsRegClass) {
1448 Opc = Hexagon::PS_false;
1449 else if ((C & 0xFF) == 0xFF)
1450 Opc = Hexagon::PS_true;
1453 BuildMI(B, At, DL, HII.get(Opc), Reg);
1460 bool ConstGeneration::processBlock(MachineBasicBlock &B, const RegisterSet&) {
1461 if (!BT.reached(&B))
1463 bool Changed = false;
1466 for (auto I = B.begin(), E = B.end(); I != E; ++I) {
1470 HBS::getInstrDefs(*I, Defs);
1471 if (Defs.count() != 1)
1473 unsigned DR = Defs.find_first();
1474 if (!TargetRegisterInfo::isVirtualRegister(DR))
1477 const BitTracker::RegisterCell &DRC = BT.lookup(DR);
1478 if (HBS::getConst(DRC, 0, DRC.width(), U)) {
1480 DebugLoc DL = I->getDebugLoc();
1481 auto At = I->isPHI() ? B.getFirstNonPHI() : I;
1482 unsigned ImmReg = genTfrConst(MRI.getRegClass(DR), C, B, At, DL);
1484 HBS::replaceReg(DR, ImmReg, MRI);
1485 BT.put(ImmReg, DRC);
1495 // Identify pairs of available registers which hold identical values.
1496 // In such cases, only one of them needs to be calculated, the other one
1497 // will be defined as a copy of the first.
1498 class CopyGeneration : public Transformation {
1500 CopyGeneration(BitTracker &bt, const HexagonInstrInfo &hii,
1501 const HexagonRegisterInfo &hri, MachineRegisterInfo &mri)
1502 : Transformation(true), HII(hii), HRI(hri), MRI(mri), BT(bt) {}
1504 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1507 bool findMatch(const BitTracker::RegisterRef &Inp,
1508 BitTracker::RegisterRef &Out, const RegisterSet &AVs);
1510 const HexagonInstrInfo &HII;
1511 const HexagonRegisterInfo &HRI;
1512 MachineRegisterInfo &MRI;
1514 RegisterSet Forbidden;
1517 // Eliminate register copies RD = RS, by replacing the uses of RD with
1519 class CopyPropagation : public Transformation {
1521 CopyPropagation(const HexagonRegisterInfo &hri, MachineRegisterInfo &mri)
1522 : Transformation(false), HRI(hri), MRI(mri) {}
1524 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1526 static bool isCopyReg(unsigned Opc, bool NoConv);
1529 bool propagateRegCopy(MachineInstr &MI);
1531 const HexagonRegisterInfo &HRI;
1532 MachineRegisterInfo &MRI;
1535 } // end anonymous namespace
1537 /// Check if there is a register in AVs that is identical to Inp. If so,
1538 /// set Out to the found register. The output may be a pair Reg:Sub.
1539 bool CopyGeneration::findMatch(const BitTracker::RegisterRef &Inp,
1540 BitTracker::RegisterRef &Out, const RegisterSet &AVs) {
1541 if (!BT.has(Inp.Reg))
1543 const BitTracker::RegisterCell &InpRC = BT.lookup(Inp.Reg);
1544 auto *FRC = HBS::getFinalVRegClass(Inp, MRI);
1546 if (!HBS::getSubregMask(Inp, B, W, MRI))
1549 for (unsigned R = AVs.find_first(); R; R = AVs.find_next(R)) {
1550 if (!BT.has(R) || Forbidden[R])
1552 const BitTracker::RegisterCell &RC = BT.lookup(R);
1553 unsigned RW = RC.width();
1555 if (FRC != MRI.getRegClass(R))
1557 if (!HBS::isTransparentCopy(R, Inp, MRI))
1559 if (!HBS::isEqual(InpRC, B, RC, 0, W))
1565 // Check if there is a super-register, whose part (with a subregister)
1566 // is equal to the input.
1567 // Only do double registers for now.
1570 if (MRI.getRegClass(R) != &Hexagon::DoubleRegsRegClass)
1573 if (HBS::isEqual(InpRC, B, RC, 0, W))
1574 Out.Sub = Hexagon::isub_lo;
1575 else if (HBS::isEqual(InpRC, B, RC, W, W))
1576 Out.Sub = Hexagon::isub_hi;
1580 if (HBS::isTransparentCopy(Out, Inp, MRI))
1586 bool CopyGeneration::processBlock(MachineBasicBlock &B,
1587 const RegisterSet &AVs) {
1588 if (!BT.reached(&B))
1590 RegisterSet AVB(AVs);
1591 bool Changed = false;
1594 for (auto I = B.begin(), E = B.end(), NextI = I; I != E;
1595 ++I, AVB.insert(Defs)) {
1596 NextI = std::next(I);
1598 HBS::getInstrDefs(*I, Defs);
1600 unsigned Opc = I->getOpcode();
1601 if (CopyPropagation::isCopyReg(Opc, false) ||
1602 ConstGeneration::isTfrConst(*I))
1605 DebugLoc DL = I->getDebugLoc();
1606 auto At = I->isPHI() ? B.getFirstNonPHI() : I;
1608 for (unsigned R = Defs.find_first(); R; R = Defs.find_next(R)) {
1609 BitTracker::RegisterRef MR;
1610 auto *FRC = HBS::getFinalVRegClass(R, MRI);
1612 if (findMatch(R, MR, AVB)) {
1613 unsigned NewR = MRI.createVirtualRegister(FRC);
1614 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR)
1615 .addReg(MR.Reg, 0, MR.Sub);
1616 BT.put(BitTracker::RegisterRef(NewR), BT.get(MR));
1617 HBS::replaceReg(R, NewR, MRI);
1618 Forbidden.insert(R);
1622 if (FRC == &Hexagon::DoubleRegsRegClass ||
1623 FRC == &Hexagon::HvxWRRegClass) {
1624 // Try to generate REG_SEQUENCE.
1625 unsigned SubLo = HRI.getHexagonSubRegIndex(*FRC, Hexagon::ps_sub_lo);
1626 unsigned SubHi = HRI.getHexagonSubRegIndex(*FRC, Hexagon::ps_sub_hi);
1627 BitTracker::RegisterRef TL = { R, SubLo };
1628 BitTracker::RegisterRef TH = { R, SubHi };
1629 BitTracker::RegisterRef ML, MH;
1630 if (findMatch(TL, ML, AVB) && findMatch(TH, MH, AVB)) {
1631 auto *FRC = HBS::getFinalVRegClass(R, MRI);
1632 unsigned NewR = MRI.createVirtualRegister(FRC);
1633 BuildMI(B, At, DL, HII.get(TargetOpcode::REG_SEQUENCE), NewR)
1634 .addReg(ML.Reg, 0, ML.Sub)
1636 .addReg(MH.Reg, 0, MH.Sub)
1638 BT.put(BitTracker::RegisterRef(NewR), BT.get(R));
1639 HBS::replaceReg(R, NewR, MRI);
1640 Forbidden.insert(R);
1649 bool CopyPropagation::isCopyReg(unsigned Opc, bool NoConv) {
1651 case TargetOpcode::COPY:
1652 case TargetOpcode::REG_SEQUENCE:
1653 case Hexagon::A4_combineir:
1654 case Hexagon::A4_combineri:
1656 case Hexagon::A2_tfr:
1657 case Hexagon::A2_tfrp:
1658 case Hexagon::A2_combinew:
1659 case Hexagon::V6_vcombine:
1667 bool CopyPropagation::propagateRegCopy(MachineInstr &MI) {
1668 bool Changed = false;
1669 unsigned Opc = MI.getOpcode();
1670 BitTracker::RegisterRef RD = MI.getOperand(0);
1671 assert(MI.getOperand(0).getSubReg() == 0);
1674 case TargetOpcode::COPY:
1675 case Hexagon::A2_tfr:
1676 case Hexagon::A2_tfrp: {
1677 BitTracker::RegisterRef RS = MI.getOperand(1);
1678 if (!HBS::isTransparentCopy(RD, RS, MRI))
1681 Changed = HBS::replaceRegWithSub(RD.Reg, RS.Reg, RS.Sub, MRI);
1683 Changed = HBS::replaceReg(RD.Reg, RS.Reg, MRI);
1686 case TargetOpcode::REG_SEQUENCE: {
1687 BitTracker::RegisterRef SL, SH;
1688 if (HBS::parseRegSequence(MI, SL, SH, MRI)) {
1689 const TargetRegisterClass &RC = *MRI.getRegClass(RD.Reg);
1690 unsigned SubLo = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo);
1691 unsigned SubHi = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi);
1692 Changed = HBS::replaceSubWithSub(RD.Reg, SubLo, SL.Reg, SL.Sub, MRI);
1693 Changed |= HBS::replaceSubWithSub(RD.Reg, SubHi, SH.Reg, SH.Sub, MRI);
1697 case Hexagon::A2_combinew:
1698 case Hexagon::V6_vcombine: {
1699 const TargetRegisterClass &RC = *MRI.getRegClass(RD.Reg);
1700 unsigned SubLo = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo);
1701 unsigned SubHi = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi);
1702 BitTracker::RegisterRef RH = MI.getOperand(1), RL = MI.getOperand(2);
1703 Changed = HBS::replaceSubWithSub(RD.Reg, SubLo, RL.Reg, RL.Sub, MRI);
1704 Changed |= HBS::replaceSubWithSub(RD.Reg, SubHi, RH.Reg, RH.Sub, MRI);
1707 case Hexagon::A4_combineir:
1708 case Hexagon::A4_combineri: {
1709 unsigned SrcX = (Opc == Hexagon::A4_combineir) ? 2 : 1;
1710 unsigned Sub = (Opc == Hexagon::A4_combineir) ? Hexagon::isub_lo
1712 BitTracker::RegisterRef RS = MI.getOperand(SrcX);
1713 Changed = HBS::replaceSubWithSub(RD.Reg, Sub, RS.Reg, RS.Sub, MRI);
1720 bool CopyPropagation::processBlock(MachineBasicBlock &B, const RegisterSet&) {
1721 std::vector<MachineInstr*> Instrs;
1722 for (auto I = B.rbegin(), E = B.rend(); I != E; ++I)
1723 Instrs.push_back(&*I);
1725 bool Changed = false;
1726 for (auto I : Instrs) {
1727 unsigned Opc = I->getOpcode();
1728 if (!CopyPropagation::isCopyReg(Opc, true))
1730 Changed |= propagateRegCopy(*I);
1738 // Recognize patterns that can be simplified and replace them with the
1740 // This is by no means complete
1741 class BitSimplification : public Transformation {
1743 BitSimplification(BitTracker &bt, const MachineDominatorTree &mdt,
1744 const HexagonInstrInfo &hii, const HexagonRegisterInfo &hri,
1745 MachineRegisterInfo &mri, MachineFunction &mf)
1746 : Transformation(true), MDT(mdt), HII(hii), HRI(hri), MRI(mri),
1749 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1752 struct RegHalf : public BitTracker::RegisterRef {
1753 bool Low; // Low/High halfword.
1756 bool matchHalf(unsigned SelfR, const BitTracker::RegisterCell &RC,
1757 unsigned B, RegHalf &RH);
1758 bool validateReg(BitTracker::RegisterRef R, unsigned Opc, unsigned OpNum);
1760 bool matchPackhl(unsigned SelfR, const BitTracker::RegisterCell &RC,
1761 BitTracker::RegisterRef &Rs, BitTracker::RegisterRef &Rt);
1762 unsigned getCombineOpcode(bool HLow, bool LLow);
1764 bool genStoreUpperHalf(MachineInstr *MI);
1765 bool genStoreImmediate(MachineInstr *MI);
1766 bool genPackhl(MachineInstr *MI, BitTracker::RegisterRef RD,
1767 const BitTracker::RegisterCell &RC);
1768 bool genExtractHalf(MachineInstr *MI, BitTracker::RegisterRef RD,
1769 const BitTracker::RegisterCell &RC);
1770 bool genCombineHalf(MachineInstr *MI, BitTracker::RegisterRef RD,
1771 const BitTracker::RegisterCell &RC);
1772 bool genExtractLow(MachineInstr *MI, BitTracker::RegisterRef RD,
1773 const BitTracker::RegisterCell &RC);
1774 bool genBitSplit(MachineInstr *MI, BitTracker::RegisterRef RD,
1775 const BitTracker::RegisterCell &RC, const RegisterSet &AVs);
1776 bool simplifyTstbit(MachineInstr *MI, BitTracker::RegisterRef RD,
1777 const BitTracker::RegisterCell &RC);
1778 bool simplifyExtractLow(MachineInstr *MI, BitTracker::RegisterRef RD,
1779 const BitTracker::RegisterCell &RC, const RegisterSet &AVs);
1780 bool simplifyRCmp0(MachineInstr *MI, BitTracker::RegisterRef RD);
1782 // Cache of created instructions to avoid creating duplicates.
1783 // XXX Currently only used by genBitSplit.
1784 std::vector<MachineInstr*> NewMIs;
1786 const MachineDominatorTree &MDT;
1787 const HexagonInstrInfo &HII;
1788 const HexagonRegisterInfo &HRI;
1789 MachineRegisterInfo &MRI;
1790 MachineFunction &MF;
1794 } // end anonymous namespace
1796 // Check if the bits [B..B+16) in register cell RC form a valid halfword,
1797 // i.e. [0..16), [16..32), etc. of some register. If so, return true and
1798 // set the information about the found register in RH.
1799 bool BitSimplification::matchHalf(unsigned SelfR,
1800 const BitTracker::RegisterCell &RC, unsigned B, RegHalf &RH) {
1801 // XXX This could be searching in the set of available registers, in case
1802 // the match is not exact.
1804 // Match 16-bit chunks, where the RC[B..B+15] references exactly one
1805 // register and all the bits B..B+15 match between RC and the register.
1806 // This is meant to match "v1[0-15]", where v1 = { [0]:0 [1-15]:v1... },
1807 // and RC = { [0]:0 [1-15]:v1[1-15]... }.
1810 while (I < B+16 && RC[I].num())
1815 unsigned Reg = RC[I].RefI.Reg;
1816 unsigned P = RC[I].RefI.Pos; // The RefI.Pos will be advanced by I-B.
1819 unsigned Pos = P - (I-B);
1821 if (Reg == 0 || Reg == SelfR) // Don't match "self".
1823 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1828 const BitTracker::RegisterCell &SC = BT.lookup(Reg);
1829 if (Pos+16 > SC.width())
1832 for (unsigned i = 0; i < 16; ++i) {
1833 const BitTracker::BitValue &RV = RC[i+B];
1834 if (RV.Type == BitTracker::BitValue::Ref) {
1835 if (RV.RefI.Reg != Reg)
1837 if (RV.RefI.Pos != i+Pos)
1841 if (RC[i+B] != SC[i+Pos])
1848 Sub = Hexagon::isub_lo;
1852 Sub = Hexagon::isub_lo;
1856 Sub = Hexagon::isub_hi;
1860 Sub = Hexagon::isub_hi;
1870 // If the subregister is not valid with the register, set it to 0.
1871 if (!HBS::getFinalVRegClass(RH, MRI))
1877 bool BitSimplification::validateReg(BitTracker::RegisterRef R, unsigned Opc,
1879 auto *OpRC = HII.getRegClass(HII.get(Opc), OpNum, &HRI, MF);
1880 auto *RRC = HBS::getFinalVRegClass(R, MRI);
1881 return OpRC->hasSubClassEq(RRC);
1884 // Check if RC matches the pattern of a S2_packhl. If so, return true and
1885 // set the inputs Rs and Rt.
1886 bool BitSimplification::matchPackhl(unsigned SelfR,
1887 const BitTracker::RegisterCell &RC, BitTracker::RegisterRef &Rs,
1888 BitTracker::RegisterRef &Rt) {
1889 RegHalf L1, H1, L2, H2;
1891 if (!matchHalf(SelfR, RC, 0, L2) || !matchHalf(SelfR, RC, 16, L1))
1893 if (!matchHalf(SelfR, RC, 32, H2) || !matchHalf(SelfR, RC, 48, H1))
1896 // Rs = H1.L1, Rt = H2.L2
1897 if (H1.Reg != L1.Reg || H1.Sub != L1.Sub || H1.Low || !L1.Low)
1899 if (H2.Reg != L2.Reg || H2.Sub != L2.Sub || H2.Low || !L2.Low)
1907 unsigned BitSimplification::getCombineOpcode(bool HLow, bool LLow) {
1908 return HLow ? LLow ? Hexagon::A2_combine_ll
1909 : Hexagon::A2_combine_lh
1910 : LLow ? Hexagon::A2_combine_hl
1911 : Hexagon::A2_combine_hh;
1914 // If MI stores the upper halfword of a register (potentially obtained via
1915 // shifts or extracts), replace it with a storerf instruction. This could
1916 // cause the "extraction" code to become dead.
1917 bool BitSimplification::genStoreUpperHalf(MachineInstr *MI) {
1918 unsigned Opc = MI->getOpcode();
1919 if (Opc != Hexagon::S2_storerh_io)
1922 MachineOperand &ValOp = MI->getOperand(2);
1923 BitTracker::RegisterRef RS = ValOp;
1924 if (!BT.has(RS.Reg))
1926 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg);
1928 if (!matchHalf(0, RC, 0, H))
1932 MI->setDesc(HII.get(Hexagon::S2_storerf_io));
1933 ValOp.setReg(H.Reg);
1934 ValOp.setSubReg(H.Sub);
1938 // If MI stores a value known at compile-time, and the value is within a range
1939 // that avoids using constant-extenders, replace it with a store-immediate.
1940 bool BitSimplification::genStoreImmediate(MachineInstr *MI) {
1941 unsigned Opc = MI->getOpcode();
1944 case Hexagon::S2_storeri_io:
1947 case Hexagon::S2_storerh_io:
1950 case Hexagon::S2_storerb_io:
1956 // Avoid stores to frame-indices (due to an unknown offset).
1957 if (!MI->getOperand(0).isReg())
1959 MachineOperand &OffOp = MI->getOperand(1);
1963 int64_t Off = OffOp.getImm();
1964 // Offset is u6:a. Sadly, there is no isShiftedUInt(n,x).
1965 if (!isUIntN(6+Align, Off) || (Off & ((1<<Align)-1)))
1968 BitTracker::RegisterRef RS = MI->getOperand(2);
1969 if (!BT.has(RS.Reg))
1971 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg);
1973 if (!HBS::getConst(RC, 0, RC.width(), U))
1976 // Only consider 8-bit values to avoid constant-extenders.
1979 case Hexagon::S2_storerb_io:
1982 case Hexagon::S2_storerh_io:
1985 case Hexagon::S2_storeri_io:
1992 MI->RemoveOperand(2);
1994 case Hexagon::S2_storerb_io:
1995 MI->setDesc(HII.get(Hexagon::S4_storeirb_io));
1997 case Hexagon::S2_storerh_io:
1998 MI->setDesc(HII.get(Hexagon::S4_storeirh_io));
2000 case Hexagon::S2_storeri_io:
2001 MI->setDesc(HII.get(Hexagon::S4_storeiri_io));
2004 MI->addOperand(MachineOperand::CreateImm(V));
2008 // If MI is equivalent o S2_packhl, generate the S2_packhl. MI could be the
2009 // last instruction in a sequence that results in something equivalent to
2010 // the pack-halfwords. The intent is to cause the entire sequence to become
2012 bool BitSimplification::genPackhl(MachineInstr *MI,
2013 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
2014 unsigned Opc = MI->getOpcode();
2015 if (Opc == Hexagon::S2_packhl)
2017 BitTracker::RegisterRef Rs, Rt;
2018 if (!matchPackhl(RD.Reg, RC, Rs, Rt))
2020 if (!validateReg(Rs, Hexagon::S2_packhl, 1) ||
2021 !validateReg(Rt, Hexagon::S2_packhl, 2))
2024 MachineBasicBlock &B = *MI->getParent();
2025 unsigned NewR = MRI.createVirtualRegister(&Hexagon::DoubleRegsRegClass);
2026 DebugLoc DL = MI->getDebugLoc();
2027 auto At = MI->isPHI() ? B.getFirstNonPHI()
2028 : MachineBasicBlock::iterator(MI);
2029 BuildMI(B, At, DL, HII.get(Hexagon::S2_packhl), NewR)
2030 .addReg(Rs.Reg, 0, Rs.Sub)
2031 .addReg(Rt.Reg, 0, Rt.Sub);
2032 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
2033 BT.put(BitTracker::RegisterRef(NewR), RC);
2037 // If MI produces halfword of the input in the low half of the output,
2038 // replace it with zero-extend or extractu.
2039 bool BitSimplification::genExtractHalf(MachineInstr *MI,
2040 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
2042 // Check for halfword in low 16 bits, zeros elsewhere.
2043 if (!matchHalf(RD.Reg, RC, 0, L) || !HBS::isZero(RC, 16, 16))
2046 unsigned Opc = MI->getOpcode();
2047 MachineBasicBlock &B = *MI->getParent();
2048 DebugLoc DL = MI->getDebugLoc();
2050 // Prefer zxth, since zxth can go in any slot, while extractu only in
2053 auto At = MI->isPHI() ? B.getFirstNonPHI()
2054 : MachineBasicBlock::iterator(MI);
2055 if (L.Low && Opc != Hexagon::A2_zxth) {
2056 if (validateReg(L, Hexagon::A2_zxth, 1)) {
2057 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
2058 BuildMI(B, At, DL, HII.get(Hexagon::A2_zxth), NewR)
2059 .addReg(L.Reg, 0, L.Sub);
2061 } else if (!L.Low && Opc != Hexagon::S2_lsr_i_r) {
2062 if (validateReg(L, Hexagon::S2_lsr_i_r, 1)) {
2063 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
2064 BuildMI(B, MI, DL, HII.get(Hexagon::S2_lsr_i_r), NewR)
2065 .addReg(L.Reg, 0, L.Sub)
2071 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
2072 BT.put(BitTracker::RegisterRef(NewR), RC);
2076 // If MI is equivalent to a combine(.L/.H, .L/.H) replace with with the
2078 bool BitSimplification::genCombineHalf(MachineInstr *MI,
2079 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
2081 // Check for combine h/l
2082 if (!matchHalf(RD.Reg, RC, 0, L) || !matchHalf(RD.Reg, RC, 16, H))
2084 // Do nothing if this is just a reg copy.
2085 if (L.Reg == H.Reg && L.Sub == H.Sub && !H.Low && L.Low)
2088 unsigned Opc = MI->getOpcode();
2089 unsigned COpc = getCombineOpcode(H.Low, L.Low);
2092 if (!validateReg(H, COpc, 1) || !validateReg(L, COpc, 2))
2095 MachineBasicBlock &B = *MI->getParent();
2096 DebugLoc DL = MI->getDebugLoc();
2097 unsigned NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
2098 auto At = MI->isPHI() ? B.getFirstNonPHI()
2099 : MachineBasicBlock::iterator(MI);
2100 BuildMI(B, At, DL, HII.get(COpc), NewR)
2101 .addReg(H.Reg, 0, H.Sub)
2102 .addReg(L.Reg, 0, L.Sub);
2103 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
2104 BT.put(BitTracker::RegisterRef(NewR), RC);
2108 // If MI resets high bits of a register and keeps the lower ones, replace it
2109 // with zero-extend byte/half, and-immediate, or extractu, as appropriate.
2110 bool BitSimplification::genExtractLow(MachineInstr *MI,
2111 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
2112 unsigned Opc = MI->getOpcode();
2114 case Hexagon::A2_zxtb:
2115 case Hexagon::A2_zxth:
2116 case Hexagon::S2_extractu:
2119 if (Opc == Hexagon::A2_andir && MI->getOperand(2).isImm()) {
2120 int32_t Imm = MI->getOperand(2).getImm();
2125 if (MI->hasUnmodeledSideEffects() || MI->isInlineAsm())
2127 unsigned W = RC.width();
2128 while (W > 0 && RC[W-1].is(0))
2130 if (W == 0 || W == RC.width())
2132 unsigned NewOpc = (W == 8) ? Hexagon::A2_zxtb
2133 : (W == 16) ? Hexagon::A2_zxth
2134 : (W < 10) ? Hexagon::A2_andir
2135 : Hexagon::S2_extractu;
2136 MachineBasicBlock &B = *MI->getParent();
2137 DebugLoc DL = MI->getDebugLoc();
2139 for (auto &Op : MI->uses()) {
2142 BitTracker::RegisterRef RS = Op;
2143 if (!BT.has(RS.Reg))
2145 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg);
2147 if (!HBS::getSubregMask(RS, BN, BW, MRI))
2149 if (BW < W || !HBS::isEqual(RC, 0, SC, BN, W))
2151 if (!validateReg(RS, NewOpc, 1))
2154 unsigned NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
2155 auto At = MI->isPHI() ? B.getFirstNonPHI()
2156 : MachineBasicBlock::iterator(MI);
2157 auto MIB = BuildMI(B, At, DL, HII.get(NewOpc), NewR)
2158 .addReg(RS.Reg, 0, RS.Sub);
2159 if (NewOpc == Hexagon::A2_andir)
2160 MIB.addImm((1 << W) - 1);
2161 else if (NewOpc == Hexagon::S2_extractu)
2162 MIB.addImm(W).addImm(0);
2163 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
2164 BT.put(BitTracker::RegisterRef(NewR), RC);
2170 bool BitSimplification::genBitSplit(MachineInstr *MI,
2171 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC,
2172 const RegisterSet &AVs) {
2175 if (MaxBitSplit.getNumOccurrences()) {
2176 if (CountBitSplit >= MaxBitSplit)
2180 unsigned Opc = MI->getOpcode();
2182 case Hexagon::A4_bitsplit:
2183 case Hexagon::A4_bitspliti:
2187 unsigned W = RC.width();
2191 auto ctlz = [] (const BitTracker::RegisterCell &C) -> unsigned {
2192 unsigned Z = C.width();
2193 while (Z > 0 && C[Z-1].is(0))
2195 return C.width() - Z;
2198 // Count the number of leading zeros in the target RC.
2199 unsigned Z = ctlz(RC);
2200 if (Z == 0 || Z == W)
2203 // A simplistic analysis: assume the source register (the one being split)
2204 // is fully unknown, and that all its bits are self-references.
2205 const BitTracker::BitValue &B0 = RC[0];
2206 if (B0.Type != BitTracker::BitValue::Ref)
2209 unsigned SrcR = B0.RefI.Reg;
2211 unsigned Pos = B0.RefI.Pos;
2213 // All the non-zero bits should be consecutive bits from the same register.
2214 for (unsigned i = 1; i < W-Z; ++i) {
2215 const BitTracker::BitValue &V = RC[i];
2216 if (V.Type != BitTracker::BitValue::Ref)
2218 if (V.RefI.Reg != SrcR || V.RefI.Pos != Pos+i)
2222 // Now, find the other bitfield among AVs.
2223 for (unsigned S = AVs.find_first(); S; S = AVs.find_next(S)) {
2224 // The number of leading zeros here should be the number of trailing
2228 const BitTracker::RegisterCell &SC = BT.lookup(S);
2229 if (SC.width() != W || ctlz(SC) != W-Z)
2231 // The Z lower bits should now match SrcR.
2232 const BitTracker::BitValue &S0 = SC[0];
2233 if (S0.Type != BitTracker::BitValue::Ref || S0.RefI.Reg != SrcR)
2235 unsigned P = S0.RefI.Pos;
2237 if (Pos <= P && (Pos + W-Z) != P)
2239 if (P < Pos && (P + Z) != Pos)
2241 // The starting bitfield position must be at a subregister boundary.
2242 if (std::min(P, Pos) != 0 && std::min(P, Pos) != 32)
2246 for (I = 1; I < Z; ++I) {
2247 const BitTracker::BitValue &V = SC[I];
2248 if (V.Type != BitTracker::BitValue::Ref)
2250 if (V.RefI.Reg != SrcR || V.RefI.Pos != P+I)
2256 // Generate bitsplit where S is defined.
2257 if (MaxBitSplit.getNumOccurrences())
2259 MachineInstr *DefS = MRI.getVRegDef(S);
2260 assert(DefS != nullptr);
2261 DebugLoc DL = DefS->getDebugLoc();
2262 MachineBasicBlock &B = *DefS->getParent();
2263 auto At = DefS->isPHI() ? B.getFirstNonPHI()
2264 : MachineBasicBlock::iterator(DefS);
2265 if (MRI.getRegClass(SrcR)->getID() == Hexagon::DoubleRegsRegClassID)
2266 SrcSR = (std::min(Pos, P) == 32) ? Hexagon::isub_hi : Hexagon::isub_lo;
2267 if (!validateReg({SrcR,SrcSR}, Hexagon::A4_bitspliti, 1))
2269 unsigned ImmOp = Pos <= P ? W-Z : Z;
2271 // Find an existing bitsplit instruction if one already exists.
2273 for (MachineInstr *In : NewMIs) {
2274 if (In->getOpcode() != Hexagon::A4_bitspliti)
2276 MachineOperand &Op1 = In->getOperand(1);
2277 if (Op1.getReg() != SrcR || Op1.getSubReg() != SrcSR)
2279 if (In->getOperand(2).getImm() != ImmOp)
2281 // Check if the target register is available here.
2282 MachineOperand &Op0 = In->getOperand(0);
2283 MachineInstr *DefI = MRI.getVRegDef(Op0.getReg());
2284 assert(DefI != nullptr);
2285 if (!MDT.dominates(DefI, &*At))
2288 // Found one that can be reused.
2289 assert(Op0.getSubReg() == 0);
2290 NewR = Op0.getReg();
2294 NewR = MRI.createVirtualRegister(&Hexagon::DoubleRegsRegClass);
2295 auto NewBS = BuildMI(B, At, DL, HII.get(Hexagon::A4_bitspliti), NewR)
2296 .addReg(SrcR, 0, SrcSR)
2298 NewMIs.push_back(NewBS);
2301 HBS::replaceRegWithSub(RD.Reg, NewR, Hexagon::isub_lo, MRI);
2302 HBS::replaceRegWithSub(S, NewR, Hexagon::isub_hi, MRI);
2304 HBS::replaceRegWithSub(S, NewR, Hexagon::isub_lo, MRI);
2305 HBS::replaceRegWithSub(RD.Reg, NewR, Hexagon::isub_hi, MRI);
2313 // Check for tstbit simplification opportunity, where the bit being checked
2314 // can be tracked back to another register. For example:
2315 // %2 = S2_lsr_i_r %1, 5
2316 // %3 = S2_tstbit_i %2, 0
2318 // %3 = S2_tstbit_i %1, 5
2319 bool BitSimplification::simplifyTstbit(MachineInstr *MI,
2320 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
2321 unsigned Opc = MI->getOpcode();
2322 if (Opc != Hexagon::S2_tstbit_i)
2325 unsigned BN = MI->getOperand(2).getImm();
2326 BitTracker::RegisterRef RS = MI->getOperand(1);
2328 DebugLoc DL = MI->getDebugLoc();
2329 if (!BT.has(RS.Reg) || !HBS::getSubregMask(RS, F, W, MRI))
2331 MachineBasicBlock &B = *MI->getParent();
2332 auto At = MI->isPHI() ? B.getFirstNonPHI()
2333 : MachineBasicBlock::iterator(MI);
2335 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg);
2336 const BitTracker::BitValue &V = SC[F+BN];
2337 if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg != RS.Reg) {
2338 const TargetRegisterClass *TC = MRI.getRegClass(V.RefI.Reg);
2339 // Need to map V.RefI.Reg to a 32-bit register, i.e. if it is
2340 // a double register, need to use a subregister and adjust bit
2342 unsigned P = std::numeric_limits<unsigned>::max();
2343 BitTracker::RegisterRef RR(V.RefI.Reg, 0);
2344 if (TC == &Hexagon::DoubleRegsRegClass) {
2346 RR.Sub = Hexagon::isub_lo;
2349 RR.Sub = Hexagon::isub_hi;
2351 } else if (TC == &Hexagon::IntRegsRegClass) {
2354 if (P != std::numeric_limits<unsigned>::max()) {
2355 unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
2356 BuildMI(B, At, DL, HII.get(Hexagon::S2_tstbit_i), NewR)
2357 .addReg(RR.Reg, 0, RR.Sub)
2359 HBS::replaceReg(RD.Reg, NewR, MRI);
2363 } else if (V.is(0) || V.is(1)) {
2364 unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
2365 unsigned NewOpc = V.is(0) ? Hexagon::PS_false : Hexagon::PS_true;
2366 BuildMI(B, At, DL, HII.get(NewOpc), NewR);
2367 HBS::replaceReg(RD.Reg, NewR, MRI);
2374 // Detect whether RD is a bitfield extract (sign- or zero-extended) of
2375 // some register from the AVs set. Create a new corresponding instruction
2376 // at the location of MI. The intent is to recognize situations where
2377 // a sequence of instructions performs an operation that is equivalent to
2378 // an extract operation, such as a shift left followed by a shift right.
2379 bool BitSimplification::simplifyExtractLow(MachineInstr *MI,
2380 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC,
2381 const RegisterSet &AVs) {
2384 if (MaxExtract.getNumOccurrences()) {
2385 if (CountExtract >= MaxExtract)
2390 unsigned W = RC.width();
2395 // The code is mostly class-independent, except for the part that generates
2396 // the extract instruction, and establishes the source register (in case it
2397 // needs to use a subregister).
2398 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI);
2399 if (FRC != &Hexagon::IntRegsRegClass && FRC != &Hexagon::DoubleRegsRegClass)
2401 assert(RD.Sub == 0);
2404 // If the cell has a form of 00..0xx..x with k zeros and n remaining
2405 // bits, this could be an extractu of the n bits, but it could also be
2406 // an extractu of a longer field which happens to have 0s in the top
2408 // The same logic applies to sign-extended fields.
2410 // Do not check for the extended extracts, since it would expand the
2411 // search space quite a bit. The search may be expensive as it is.
2413 const BitTracker::BitValue &TopV = RC[W-1];
2415 // Eliminate candidates that have self-referential bits, since they
2416 // cannot be extracts from other registers. Also, skip registers that
2417 // have compile-time constant values.
2418 bool IsConst = true;
2419 for (unsigned I = 0; I != W; ++I) {
2420 const BitTracker::BitValue &V = RC[I];
2421 if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg == RD.Reg)
2423 IsConst = IsConst && (V.is(0) || V.is(1));
2428 if (TopV.is(0) || TopV.is(1)) {
2429 bool S = TopV.is(1);
2430 for (--W; W > 0 && RC[W-1].is(S); --W)
2434 // The sign bit must be a part of the field being extended.
2438 // This could still be a sign-extended extract.
2439 assert(TopV.Type == BitTracker::BitValue::Ref);
2440 if (TopV.RefI.Reg == RD.Reg || TopV.RefI.Pos == W-1)
2442 for (--W; W > 0 && RC[W-1] == TopV; --W)
2444 // The top bits of RC are copies of TopV. One occurrence of TopV will
2445 // be a part of the field.
2450 // This would be just a copy. It should be handled elsewhere.
2455 dbgs() << __func__ << " on reg: " << printReg(RD.Reg, &HRI, RD.Sub)
2457 dbgs() << "Cell: " << RC << '\n';
2458 dbgs() << "Expected bitfield size: " << Len << " bits, "
2459 << (Signed ? "sign" : "zero") << "-extended\n";
2462 bool Changed = false;
2464 for (unsigned R = AVs.find_first(); R != 0; R = AVs.find_next(R)) {
2467 const BitTracker::RegisterCell &SC = BT.lookup(R);
2468 unsigned SW = SC.width();
2470 // The source can be longer than the destination, as long as its size is
2471 // a multiple of the size of the destination. Also, we would need to be
2472 // able to refer to the subregister in the source that would be of the
2473 // same size as the destination, but only check the sizes here.
2474 if (SW < RW || (SW % RW) != 0)
2477 // The field can start at any offset in SC as long as it contains Len
2478 // bits and does not cross subregister boundary (if the source register
2479 // is longer than the destination).
2481 while (Off <= SW-Len) {
2482 unsigned OE = (Off+Len)/RW;
2484 // The assumption here is that if the source (R) is longer than the
2485 // destination, then the destination is a sequence of words of
2486 // size RW, and each such word in R can be accessed via a subregister.
2488 // If the beginning and the end of the field cross the subregister
2489 // boundary, advance to the next subregister.
2493 if (HBS::isEqual(RC, 0, SC, Off, Len))
2502 unsigned ExtOpc = 0;
2505 ExtOpc = Signed ? Hexagon::A2_sxtb : Hexagon::A2_zxtb;
2507 ExtOpc = Signed ? Hexagon::A2_sxth : Hexagon::A2_zxth;
2508 else if (Len < 10 && !Signed)
2509 ExtOpc = Hexagon::A2_andir;
2513 Signed ? (RW == 32 ? Hexagon::S4_extract : Hexagon::S4_extractp)
2514 : (RW == 32 ? Hexagon::S2_extractu : Hexagon::S2_extractup);
2517 // This only recognizes isub_lo and isub_hi.
2518 if (RW != SW && RW*2 != SW)
2521 SR = (Off/RW == 0) ? Hexagon::isub_lo : Hexagon::isub_hi;
2524 if (!validateReg({R,SR}, ExtOpc, 1))
2527 // Don't generate the same instruction as the one being optimized.
2528 if (MI->getOpcode() == ExtOpc) {
2529 // All possible ExtOpc's have the source in operand(1).
2530 const MachineOperand &SrcOp = MI->getOperand(1);
2531 if (SrcOp.getReg() == R)
2535 DebugLoc DL = MI->getDebugLoc();
2536 MachineBasicBlock &B = *MI->getParent();
2537 unsigned NewR = MRI.createVirtualRegister(FRC);
2538 auto At = MI->isPHI() ? B.getFirstNonPHI()
2539 : MachineBasicBlock::iterator(MI);
2540 auto MIB = BuildMI(B, At, DL, HII.get(ExtOpc), NewR)
2543 case Hexagon::A2_sxtb:
2544 case Hexagon::A2_zxtb:
2545 case Hexagon::A2_sxth:
2546 case Hexagon::A2_zxth:
2548 case Hexagon::A2_andir:
2549 MIB.addImm((1u << Len) - 1);
2551 case Hexagon::S4_extract:
2552 case Hexagon::S2_extractu:
2553 case Hexagon::S4_extractp:
2554 case Hexagon::S2_extractup:
2559 llvm_unreachable("Unexpected opcode");
2562 HBS::replaceReg(RD.Reg, NewR, MRI);
2563 BT.put(BitTracker::RegisterRef(NewR), RC);
2571 bool BitSimplification::simplifyRCmp0(MachineInstr *MI,
2572 BitTracker::RegisterRef RD) {
2573 unsigned Opc = MI->getOpcode();
2574 if (Opc != Hexagon::A4_rcmpeqi && Opc != Hexagon::A4_rcmpneqi)
2576 MachineOperand &CmpOp = MI->getOperand(2);
2577 if (!CmpOp.isImm() || CmpOp.getImm() != 0)
2580 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI);
2581 if (FRC != &Hexagon::IntRegsRegClass && FRC != &Hexagon::DoubleRegsRegClass)
2583 assert(RD.Sub == 0);
2585 MachineBasicBlock &B = *MI->getParent();
2586 const DebugLoc &DL = MI->getDebugLoc();
2587 auto At = MI->isPHI() ? B.getFirstNonPHI()
2588 : MachineBasicBlock::iterator(MI);
2590 bool KnownNZ = false;
2592 BitTracker::RegisterRef SR = MI->getOperand(1);
2593 if (!BT.has(SR.Reg))
2595 const BitTracker::RegisterCell &SC = BT.lookup(SR.Reg);
2597 if (!HBS::getSubregMask(SR, F, W, MRI))
2600 for (uint16_t I = F; I != F+W; ++I) {
2601 const BitTracker::BitValue &V = SC[I];
2608 auto ReplaceWithConst = [&] (int C) {
2609 unsigned NewR = MRI.createVirtualRegister(FRC);
2610 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrsi), NewR)
2612 HBS::replaceReg(RD.Reg, NewR, MRI);
2613 BitTracker::RegisterCell NewRC(W);
2614 for (uint16_t I = 0; I != W; ++I) {
2615 NewRC[I] = BitTracker::BitValue(C & 1);
2616 C = unsigned(C) >> 1;
2618 BT.put(BitTracker::RegisterRef(NewR), NewRC);
2622 auto IsNonZero = [] (const MachineOperand &Op) {
2623 if (Op.isGlobal() || Op.isBlockAddress())
2626 return Op.getImm() != 0;
2628 return !Op.getCImm()->isZero();
2630 return !Op.getFPImm()->isZero();
2634 auto IsZero = [] (const MachineOperand &Op) {
2635 if (Op.isGlobal() || Op.isBlockAddress())
2638 return Op.getImm() == 0;
2640 return Op.getCImm()->isZero();
2642 return Op.getFPImm()->isZero();
2646 // If the source register is known to be 0 or non-0, the comparison can
2647 // be folded to a load of a constant.
2648 if (KnownZ || KnownNZ) {
2649 assert(KnownZ != KnownNZ && "Register cannot be both 0 and non-0");
2650 return ReplaceWithConst(KnownZ == (Opc == Hexagon::A4_rcmpeqi));
2653 // Special case: if the compare comes from a C2_muxii, then we know the
2654 // two possible constants that can be the source value.
2655 MachineInstr *InpDef = MRI.getVRegDef(SR.Reg);
2658 if (SR.Sub == 0 && InpDef->getOpcode() == Hexagon::C2_muxii) {
2659 MachineOperand &Src1 = InpDef->getOperand(2);
2660 MachineOperand &Src2 = InpDef->getOperand(3);
2661 // Check if both are non-zero.
2662 bool KnownNZ1 = IsNonZero(Src1), KnownNZ2 = IsNonZero(Src2);
2663 if (KnownNZ1 && KnownNZ2)
2664 return ReplaceWithConst(Opc == Hexagon::A4_rcmpneqi);
2665 // Check if both are zero.
2666 bool KnownZ1 = IsZero(Src1), KnownZ2 = IsZero(Src2);
2667 if (KnownZ1 && KnownZ2)
2668 return ReplaceWithConst(Opc == Hexagon::A4_rcmpeqi);
2670 // If for both operands we know that they are either 0 or non-0,
2671 // replace the comparison with a C2_muxii, using the same predicate
2672 // register, but with operands substituted with 0/1 accordingly.
2673 if ((KnownZ1 || KnownNZ1) && (KnownZ2 || KnownNZ2)) {
2674 unsigned NewR = MRI.createVirtualRegister(FRC);
2675 BuildMI(B, At, DL, HII.get(Hexagon::C2_muxii), NewR)
2676 .addReg(InpDef->getOperand(1).getReg())
2677 .addImm(KnownZ1 == (Opc == Hexagon::A4_rcmpeqi))
2678 .addImm(KnownZ2 == (Opc == Hexagon::A4_rcmpeqi));
2679 HBS::replaceReg(RD.Reg, NewR, MRI);
2680 // Create a new cell with only the least significant bit unknown.
2681 BitTracker::RegisterCell NewRC(W);
2682 NewRC[0] = BitTracker::BitValue::self();
2683 NewRC.fill(1, W, BitTracker::BitValue::Zero);
2684 BT.put(BitTracker::RegisterRef(NewR), NewRC);
2692 bool BitSimplification::processBlock(MachineBasicBlock &B,
2693 const RegisterSet &AVs) {
2694 if (!BT.reached(&B))
2696 bool Changed = false;
2697 RegisterSet AVB = AVs;
2700 for (auto I = B.begin(), E = B.end(); I != E; ++I, AVB.insert(Defs)) {
2701 MachineInstr *MI = &*I;
2703 HBS::getInstrDefs(*MI, Defs);
2705 unsigned Opc = MI->getOpcode();
2706 if (Opc == TargetOpcode::COPY || Opc == TargetOpcode::REG_SEQUENCE)
2709 if (MI->mayStore()) {
2710 bool T = genStoreUpperHalf(MI);
2711 T = T || genStoreImmediate(MI);
2716 if (Defs.count() != 1)
2718 const MachineOperand &Op0 = MI->getOperand(0);
2719 if (!Op0.isReg() || !Op0.isDef())
2721 BitTracker::RegisterRef RD = Op0;
2722 if (!BT.has(RD.Reg))
2724 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI);
2725 const BitTracker::RegisterCell &RC = BT.lookup(RD.Reg);
2727 if (FRC->getID() == Hexagon::DoubleRegsRegClassID) {
2728 bool T = genPackhl(MI, RD, RC);
2729 T = T || simplifyExtractLow(MI, RD, RC, AVB);
2734 if (FRC->getID() == Hexagon::IntRegsRegClassID) {
2735 bool T = genBitSplit(MI, RD, RC, AVB);
2736 T = T || simplifyExtractLow(MI, RD, RC, AVB);
2737 T = T || genExtractHalf(MI, RD, RC);
2738 T = T || genCombineHalf(MI, RD, RC);
2739 T = T || genExtractLow(MI, RD, RC);
2740 T = T || simplifyRCmp0(MI, RD);
2745 if (FRC->getID() == Hexagon::PredRegsRegClassID) {
2746 bool T = simplifyTstbit(MI, RD, RC);
2754 bool HexagonBitSimplify::runOnMachineFunction(MachineFunction &MF) {
2755 if (skipFunction(MF.getFunction()))
2758 auto &HST = MF.getSubtarget<HexagonSubtarget>();
2759 auto &HRI = *HST.getRegisterInfo();
2760 auto &HII = *HST.getInstrInfo();
2762 MDT = &getAnalysis<MachineDominatorTree>();
2763 MachineRegisterInfo &MRI = MF.getRegInfo();
2766 Changed = DeadCodeElimination(MF, *MDT).run();
2768 const HexagonEvaluator HE(HRI, MRI, HII, MF);
2769 BitTracker BT(HE, MF);
2770 LLVM_DEBUG(BT.trace(true));
2773 MachineBasicBlock &Entry = MF.front();
2775 RegisterSet AIG; // Available registers for IG.
2776 ConstGeneration ImmG(BT, HII, MRI);
2777 Changed |= visitBlock(Entry, ImmG, AIG);
2779 RegisterSet ARE; // Available registers for RIE.
2780 RedundantInstrElimination RIE(BT, HII, HRI, MRI);
2781 bool Ried = visitBlock(Entry, RIE, ARE);
2787 RegisterSet ACG; // Available registers for CG.
2788 CopyGeneration CopyG(BT, HII, HRI, MRI);
2789 Changed |= visitBlock(Entry, CopyG, ACG);
2791 RegisterSet ACP; // Available registers for CP.
2792 CopyPropagation CopyP(HRI, MRI);
2793 Changed |= visitBlock(Entry, CopyP, ACP);
2795 Changed = DeadCodeElimination(MF, *MDT).run() || Changed;
2798 RegisterSet ABS; // Available registers for BS.
2799 BitSimplification BitS(BT, *MDT, HII, HRI, MRI, MF);
2800 Changed |= visitBlock(Entry, BitS, ABS);
2802 Changed = DeadCodeElimination(MF, *MDT).run() || Changed;
2808 DeadCodeElimination(MF, *MDT).run();
2813 // Recognize loops where the code at the end of the loop matches the code
2814 // before the entry of the loop, and the matching code is such that is can
2815 // be simplified. This pass relies on the bit simplification above and only
2816 // prepares code in a way that can be handled by the bit simplifcation.
2818 // This is the motivating testcase (and explanation):
2821 // loop0(.LBB0_2, r1) // %for.body.preheader
2822 // r5:4 = memd(r0++#8)
2825 // r3 = lsr(r4, #16)
2826 // r7:6 = combine(r5, r5)
2829 // r3 = insert(r5, #16, #16)
2830 // r7:6 = vlsrw(r7:6, #16)
2835 // memh(r2+#6) = r6 # R6 is really R5.H
2840 // memh(r2+#2) = r3 # R3 is really R4.H
2843 // r5:4 = memd(r0++#8)
2845 // { # "Shuffling" code that sets up R3 and R6
2846 // r3 = lsr(r4, #16) # so that their halves can be stored in the
2847 // r7:6 = combine(r5, r5) # next iteration. This could be folded into
2848 // } # the stores if the code was at the beginning
2849 // { # of the loop iteration. Since the same code
2850 // r3 = insert(r5, #16, #16) # precedes the loop, it can actually be moved
2851 // r7:6 = vlsrw(r7:6, #16) # there.
2858 // loop0(.LBB0_2, r1)
2859 // r5:4 = memd(r0++#8)
2864 // memh(r2+#6) = r5.h
2869 // memh(r2+#2) = r4.h
2872 // r5:4 = memd(r0++#8)
2877 FunctionPass *createHexagonLoopRescheduling();
2878 void initializeHexagonLoopReschedulingPass(PassRegistry&);
2880 } // end namespace llvm
2884 class HexagonLoopRescheduling : public MachineFunctionPass {
2888 HexagonLoopRescheduling() : MachineFunctionPass(ID) {
2889 initializeHexagonLoopReschedulingPass(*PassRegistry::getPassRegistry());
2892 bool runOnMachineFunction(MachineFunction &MF) override;
2895 const HexagonInstrInfo *HII = nullptr;
2896 const HexagonRegisterInfo *HRI = nullptr;
2897 MachineRegisterInfo *MRI = nullptr;
2898 BitTracker *BTP = nullptr;
2901 LoopCand(MachineBasicBlock *lb, MachineBasicBlock *pb,
2902 MachineBasicBlock *eb) : LB(lb), PB(pb), EB(eb) {}
2904 MachineBasicBlock *LB, *PB, *EB;
2906 using InstrList = std::vector<MachineInstr *>;
2908 BitTracker::RegisterRef Inp, Out;
2912 PhiInfo(MachineInstr &P, MachineBasicBlock &B);
2915 BitTracker::RegisterRef LR, PR; // Loop Register, Preheader Register
2916 MachineBasicBlock *LB, *PB; // Loop Block, Preheader Block
2919 static unsigned getDefReg(const MachineInstr *MI);
2920 bool isConst(unsigned Reg) const;
2921 bool isBitShuffle(const MachineInstr *MI, unsigned DefR) const;
2922 bool isStoreInput(const MachineInstr *MI, unsigned DefR) const;
2923 bool isShuffleOf(unsigned OutR, unsigned InpR) const;
2924 bool isSameShuffle(unsigned OutR1, unsigned InpR1, unsigned OutR2,
2925 unsigned &InpR2) const;
2926 void moveGroup(InstrGroup &G, MachineBasicBlock &LB, MachineBasicBlock &PB,
2927 MachineBasicBlock::iterator At, unsigned OldPhiR, unsigned NewPredR);
2928 bool processLoop(LoopCand &C);
2931 } // end anonymous namespace
2933 char HexagonLoopRescheduling::ID = 0;
2935 INITIALIZE_PASS(HexagonLoopRescheduling, "hexagon-loop-resched",
2936 "Hexagon Loop Rescheduling", false, false)
2938 HexagonLoopRescheduling::PhiInfo::PhiInfo(MachineInstr &P,
2939 MachineBasicBlock &B) {
2940 DefR = HexagonLoopRescheduling::getDefReg(&P);
2943 for (unsigned i = 1, n = P.getNumOperands(); i < n; i += 2) {
2944 const MachineOperand &OpB = P.getOperand(i+1);
2945 if (OpB.getMBB() == &B) {
2946 LR = P.getOperand(i);
2950 PR = P.getOperand(i);
2954 unsigned HexagonLoopRescheduling::getDefReg(const MachineInstr *MI) {
2956 HBS::getInstrDefs(*MI, Defs);
2957 if (Defs.count() != 1)
2959 return Defs.find_first();
2962 bool HexagonLoopRescheduling::isConst(unsigned Reg) const {
2965 const BitTracker::RegisterCell &RC = BTP->lookup(Reg);
2966 for (unsigned i = 0, w = RC.width(); i < w; ++i) {
2967 const BitTracker::BitValue &V = RC[i];
2968 if (!V.is(0) && !V.is(1))
2974 bool HexagonLoopRescheduling::isBitShuffle(const MachineInstr *MI,
2975 unsigned DefR) const {
2976 unsigned Opc = MI->getOpcode();
2978 case TargetOpcode::COPY:
2979 case Hexagon::S2_lsr_i_r:
2980 case Hexagon::S2_asr_i_r:
2981 case Hexagon::S2_asl_i_r:
2982 case Hexagon::S2_lsr_i_p:
2983 case Hexagon::S2_asr_i_p:
2984 case Hexagon::S2_asl_i_p:
2985 case Hexagon::S2_insert:
2986 case Hexagon::A2_or:
2987 case Hexagon::A2_orp:
2988 case Hexagon::A2_and:
2989 case Hexagon::A2_andp:
2990 case Hexagon::A2_combinew:
2991 case Hexagon::A4_combineri:
2992 case Hexagon::A4_combineir:
2993 case Hexagon::A2_combineii:
2994 case Hexagon::A4_combineii:
2995 case Hexagon::A2_combine_ll:
2996 case Hexagon::A2_combine_lh:
2997 case Hexagon::A2_combine_hl:
2998 case Hexagon::A2_combine_hh:
3004 bool HexagonLoopRescheduling::isStoreInput(const MachineInstr *MI,
3005 unsigned InpR) const {
3006 for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) {
3007 const MachineOperand &Op = MI->getOperand(i);
3010 if (Op.getReg() == InpR)
3016 bool HexagonLoopRescheduling::isShuffleOf(unsigned OutR, unsigned InpR) const {
3017 if (!BTP->has(OutR) || !BTP->has(InpR))
3019 const BitTracker::RegisterCell &OutC = BTP->lookup(OutR);
3020 for (unsigned i = 0, w = OutC.width(); i < w; ++i) {
3021 const BitTracker::BitValue &V = OutC[i];
3022 if (V.Type != BitTracker::BitValue::Ref)
3024 if (V.RefI.Reg != InpR)
3030 bool HexagonLoopRescheduling::isSameShuffle(unsigned OutR1, unsigned InpR1,
3031 unsigned OutR2, unsigned &InpR2) const {
3032 if (!BTP->has(OutR1) || !BTP->has(InpR1) || !BTP->has(OutR2))
3034 const BitTracker::RegisterCell &OutC1 = BTP->lookup(OutR1);
3035 const BitTracker::RegisterCell &OutC2 = BTP->lookup(OutR2);
3036 unsigned W = OutC1.width();
3037 unsigned MatchR = 0;
3038 if (W != OutC2.width())
3040 for (unsigned i = 0; i < W; ++i) {
3041 const BitTracker::BitValue &V1 = OutC1[i], &V2 = OutC2[i];
3042 if (V1.Type != V2.Type || V1.Type == BitTracker::BitValue::One)
3044 if (V1.Type != BitTracker::BitValue::Ref)
3046 if (V1.RefI.Pos != V2.RefI.Pos)
3048 if (V1.RefI.Reg != InpR1)
3050 if (V2.RefI.Reg == 0 || V2.RefI.Reg == OutR2)
3053 MatchR = V2.RefI.Reg;
3054 else if (V2.RefI.Reg != MatchR)
3061 void HexagonLoopRescheduling::moveGroup(InstrGroup &G, MachineBasicBlock &LB,
3062 MachineBasicBlock &PB, MachineBasicBlock::iterator At, unsigned OldPhiR,
3063 unsigned NewPredR) {
3064 DenseMap<unsigned,unsigned> RegMap;
3066 const TargetRegisterClass *PhiRC = MRI->getRegClass(NewPredR);
3067 unsigned PhiR = MRI->createVirtualRegister(PhiRC);
3068 BuildMI(LB, At, At->getDebugLoc(), HII->get(TargetOpcode::PHI), PhiR)
3073 RegMap.insert(std::make_pair(G.Inp.Reg, PhiR));
3075 for (unsigned i = G.Ins.size(); i > 0; --i) {
3076 const MachineInstr *SI = G.Ins[i-1];
3077 unsigned DR = getDefReg(SI);
3078 const TargetRegisterClass *RC = MRI->getRegClass(DR);
3079 unsigned NewDR = MRI->createVirtualRegister(RC);
3080 DebugLoc DL = SI->getDebugLoc();
3082 auto MIB = BuildMI(LB, At, DL, HII->get(SI->getOpcode()), NewDR);
3083 for (unsigned j = 0, m = SI->getNumOperands(); j < m; ++j) {
3084 const MachineOperand &Op = SI->getOperand(j);
3091 unsigned UseR = RegMap[Op.getReg()];
3092 MIB.addReg(UseR, 0, Op.getSubReg());
3094 RegMap.insert(std::make_pair(DR, NewDR));
3097 HBS::replaceReg(OldPhiR, RegMap[G.Out.Reg], *MRI);
3100 bool HexagonLoopRescheduling::processLoop(LoopCand &C) {
3101 LLVM_DEBUG(dbgs() << "Processing loop in " << printMBBReference(*C.LB)
3103 std::vector<PhiInfo> Phis;
3104 for (auto &I : *C.LB) {
3107 unsigned PR = getDefReg(&I);
3110 bool BadUse = false, GoodUse = false;
3111 for (auto UI = MRI->use_begin(PR), UE = MRI->use_end(); UI != UE; ++UI) {
3112 MachineInstr *UseI = UI->getParent();
3113 if (UseI->getParent() != C.LB) {
3117 if (isBitShuffle(UseI, PR) || isStoreInput(UseI, PR))
3120 if (BadUse || !GoodUse)
3123 Phis.push_back(PhiInfo(I, *C.LB));
3127 dbgs() << "Phis: {";
3128 for (auto &I : Phis) {
3129 dbgs() << ' ' << printReg(I.DefR, HRI) << "=phi("
3130 << printReg(I.PR.Reg, HRI, I.PR.Sub) << ":b" << I.PB->getNumber()
3131 << ',' << printReg(I.LR.Reg, HRI, I.LR.Sub) << ":b"
3132 << I.LB->getNumber() << ')';
3140 bool Changed = false;
3143 // Go backwards in the block: for each bit shuffling instruction, check
3144 // if that instruction could potentially be moved to the front of the loop:
3145 // the output of the loop cannot be used in a non-shuffling instruction
3147 for (auto I = C.LB->rbegin(), E = C.LB->rend(); I != E; ++I) {
3148 if (I->isTerminator())
3154 HBS::getInstrDefs(*I, Defs);
3155 if (Defs.count() != 1)
3157 unsigned DefR = Defs.find_first();
3158 if (!TargetRegisterInfo::isVirtualRegister(DefR))
3160 if (!isBitShuffle(&*I, DefR))
3163 bool BadUse = false;
3164 for (auto UI = MRI->use_begin(DefR), UE = MRI->use_end(); UI != UE; ++UI) {
3165 MachineInstr *UseI = UI->getParent();
3166 if (UseI->getParent() == C.LB) {
3167 if (UseI->isPHI()) {
3168 // If the use is in a phi node in this loop, then it should be
3169 // the value corresponding to the back edge.
3170 unsigned Idx = UI.getOperandNo();
3171 if (UseI->getOperand(Idx+1).getMBB() != C.LB)
3174 auto F = find(ShufIns, UseI);
3175 if (F == ShufIns.end())
3179 // There is a use outside of the loop, but there is no epilog block
3180 // suitable for a copy-out.
3181 if (C.EB == nullptr)
3190 ShufIns.push_back(&*I);
3193 // Partition the list of shuffling instructions into instruction groups,
3194 // where each group has to be moved as a whole (i.e. a group is a chain of
3195 // dependent instructions). A group produces a single live output register,
3196 // which is meant to be the input of the loop phi node (although this is
3197 // not checked here yet). It also uses a single register as its input,
3198 // which is some value produced in the loop body. After moving the group
3199 // to the beginning of the loop, that input register would need to be
3200 // the loop-carried register (through a phi node) instead of the (currently
3201 // loop-carried) output register.
3202 using InstrGroupList = std::vector<InstrGroup>;
3203 InstrGroupList Groups;
3205 for (unsigned i = 0, n = ShufIns.size(); i < n; ++i) {
3206 MachineInstr *SI = ShufIns[i];
3211 G.Ins.push_back(SI);
3212 G.Out.Reg = getDefReg(SI);
3214 HBS::getInstrUses(*SI, Inputs);
3216 for (unsigned j = i+1; j < n; ++j) {
3217 MachineInstr *MI = ShufIns[j];
3221 HBS::getInstrDefs(*MI, Defs);
3222 // If this instruction does not define any pending inputs, skip it.
3223 if (!Defs.intersects(Inputs))
3225 // Otherwise, add it to the current group and remove the inputs that
3226 // are defined by MI.
3227 G.Ins.push_back(MI);
3228 Inputs.remove(Defs);
3229 // Then add all registers used by MI.
3230 HBS::getInstrUses(*MI, Inputs);
3231 ShufIns[j] = nullptr;
3234 // Only add a group if it requires at most one register.
3235 if (Inputs.count() > 1)
3237 auto LoopInpEq = [G] (const PhiInfo &P) -> bool {
3238 return G.Out.Reg == P.LR.Reg;
3240 if (llvm::find_if(Phis, LoopInpEq) == Phis.end())
3243 G.Inp.Reg = Inputs.find_first();
3244 Groups.push_back(G);
3248 for (unsigned i = 0, n = Groups.size(); i < n; ++i) {
3249 InstrGroup &G = Groups[i];
3250 dbgs() << "Group[" << i << "] inp: "
3251 << printReg(G.Inp.Reg, HRI, G.Inp.Sub)
3252 << " out: " << printReg(G.Out.Reg, HRI, G.Out.Sub) << "\n";
3253 for (unsigned j = 0, m = G.Ins.size(); j < m; ++j)
3254 dbgs() << " " << *G.Ins[j];
3258 for (unsigned i = 0, n = Groups.size(); i < n; ++i) {
3259 InstrGroup &G = Groups[i];
3260 if (!isShuffleOf(G.Out.Reg, G.Inp.Reg))
3262 auto LoopInpEq = [G] (const PhiInfo &P) -> bool {
3263 return G.Out.Reg == P.LR.Reg;
3265 auto F = llvm::find_if(Phis, LoopInpEq);
3266 if (F == Phis.end())
3269 if (!isSameShuffle(G.Out.Reg, G.Inp.Reg, F->PR.Reg, PrehR)) {
3270 const MachineInstr *DefPrehR = MRI->getVRegDef(F->PR.Reg);
3271 unsigned Opc = DefPrehR->getOpcode();
3272 if (Opc != Hexagon::A2_tfrsi && Opc != Hexagon::A2_tfrpi)
3274 if (!DefPrehR->getOperand(1).isImm())
3276 if (DefPrehR->getOperand(1).getImm() != 0)
3278 const TargetRegisterClass *RC = MRI->getRegClass(G.Inp.Reg);
3279 if (RC != MRI->getRegClass(F->PR.Reg)) {
3280 PrehR = MRI->createVirtualRegister(RC);
3281 unsigned TfrI = (RC == &Hexagon::IntRegsRegClass) ? Hexagon::A2_tfrsi
3282 : Hexagon::A2_tfrpi;
3283 auto T = C.PB->getFirstTerminator();
3284 DebugLoc DL = (T != C.PB->end()) ? T->getDebugLoc() : DebugLoc();
3285 BuildMI(*C.PB, T, DL, HII->get(TfrI), PrehR)
3291 // isSameShuffle could match with PrehR being of a wider class than
3292 // G.Inp.Reg, for example if G shuffles the low 32 bits of its input,
3293 // it would match for the input being a 32-bit register, and PrehR
3294 // being a 64-bit register (where the low 32 bits match). This could
3295 // be handled, but for now skip these cases.
3296 if (MRI->getRegClass(PrehR) != MRI->getRegClass(G.Inp.Reg))
3298 moveGroup(G, *F->LB, *F->PB, F->LB->getFirstNonPHI(), F->DefR, PrehR);
3305 bool HexagonLoopRescheduling::runOnMachineFunction(MachineFunction &MF) {
3306 if (skipFunction(MF.getFunction()))
3309 auto &HST = MF.getSubtarget<HexagonSubtarget>();
3310 HII = HST.getInstrInfo();
3311 HRI = HST.getRegisterInfo();
3312 MRI = &MF.getRegInfo();
3313 const HexagonEvaluator HE(*HRI, *MRI, *HII, MF);
3314 BitTracker BT(HE, MF);
3315 LLVM_DEBUG(BT.trace(true));
3319 std::vector<LoopCand> Cand;
3321 for (auto &B : MF) {
3322 if (B.pred_size() != 2 || B.succ_size() != 2)
3324 MachineBasicBlock *PB = nullptr;
3325 bool IsLoop = false;
3326 for (auto PI = B.pred_begin(), PE = B.pred_end(); PI != PE; ++PI) {
3335 MachineBasicBlock *EB = nullptr;
3336 for (auto SI = B.succ_begin(), SE = B.succ_end(); SI != SE; ++SI) {
3339 // Set EP to the epilog block, if it has only 1 predecessor (i.e. the
3340 // edge from B to EP is non-critical.
3341 if ((*SI)->pred_size() == 1)
3346 Cand.push_back(LoopCand(&B, PB, EB));
3349 bool Changed = false;
3350 for (auto &C : Cand)
3351 Changed |= processLoop(C);
3356 //===----------------------------------------------------------------------===//
3357 // Public Constructor Functions
3358 //===----------------------------------------------------------------------===//
3360 FunctionPass *llvm::createHexagonLoopRescheduling() {
3361 return new HexagonLoopRescheduling();
3364 FunctionPass *llvm::createHexagonBitSimplify() {
3365 return new HexagonBitSimplify();