1 //===--- HexagonLoopIdiomRecognition.cpp ----------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "hexagon-lir"
12 #include "llvm/ADT/SetVector.h"
13 #include "llvm/ADT/SmallSet.h"
14 #include "llvm/Analysis/AliasAnalysis.h"
15 #include "llvm/Analysis/InstructionSimplify.h"
16 #include "llvm/Analysis/LoopPass.h"
17 #include "llvm/Analysis/ScalarEvolution.h"
18 #include "llvm/Analysis/ScalarEvolutionExpander.h"
19 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
20 #include "llvm/Analysis/TargetLibraryInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Dominators.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/Transforms/Scalar.h"
27 #include "llvm/Transforms/Utils/Local.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/KnownBits.h"
30 #include "llvm/Support/raw_ostream.h"
37 static cl::opt<bool> DisableMemcpyIdiom("disable-memcpy-idiom",
38 cl::Hidden, cl::init(false),
39 cl::desc("Disable generation of memcpy in loop idiom recognition"));
41 static cl::opt<bool> DisableMemmoveIdiom("disable-memmove-idiom",
42 cl::Hidden, cl::init(false),
43 cl::desc("Disable generation of memmove in loop idiom recognition"));
45 static cl::opt<unsigned> RuntimeMemSizeThreshold("runtime-mem-idiom-threshold",
46 cl::Hidden, cl::init(0), cl::desc("Threshold (in bytes) for the runtime "
47 "check guarding the memmove."));
49 static cl::opt<unsigned> CompileTimeMemSizeThreshold(
50 "compile-time-mem-idiom-threshold", cl::Hidden, cl::init(64),
51 cl::desc("Threshold (in bytes) to perform the transformation, if the "
52 "runtime loop count (mem transfer size) is known at compile-time."));
54 static cl::opt<bool> OnlyNonNestedMemmove("only-nonnested-memmove-idiom",
55 cl::Hidden, cl::init(true),
56 cl::desc("Only enable generating memmove in non-nested loops"));
58 cl::opt<bool> HexagonVolatileMemcpy("disable-hexagon-volatile-memcpy",
59 cl::Hidden, cl::init(false),
60 cl::desc("Enable Hexagon-specific memcpy for volatile destination."));
62 static const char *HexagonVolatileMemcpyName
63 = "hexagon_memcpy_forward_vp4cp4n2";
67 void initializeHexagonLoopIdiomRecognizePass(PassRegistry&);
68 Pass *createHexagonLoopIdiomPass();
72 class HexagonLoopIdiomRecognize : public LoopPass {
75 explicit HexagonLoopIdiomRecognize() : LoopPass(ID) {
76 initializeHexagonLoopIdiomRecognizePass(*PassRegistry::getPassRegistry());
78 StringRef getPassName() const override {
79 return "Recognize Hexagon-specific loop idioms";
82 void getAnalysisUsage(AnalysisUsage &AU) const override {
83 AU.addRequired<LoopInfoWrapperPass>();
84 AU.addRequiredID(LoopSimplifyID);
85 AU.addRequiredID(LCSSAID);
86 AU.addRequired<AAResultsWrapperPass>();
87 AU.addPreserved<AAResultsWrapperPass>();
88 AU.addRequired<ScalarEvolutionWrapperPass>();
89 AU.addRequired<DominatorTreeWrapperPass>();
90 AU.addRequired<TargetLibraryInfoWrapperPass>();
91 AU.addPreserved<TargetLibraryInfoWrapperPass>();
94 bool runOnLoop(Loop *L, LPPassManager &LPM) override;
97 unsigned getStoreSizeInBytes(StoreInst *SI);
98 int getSCEVStride(const SCEVAddRecExpr *StoreEv);
99 bool isLegalStore(Loop *CurLoop, StoreInst *SI);
100 void collectStores(Loop *CurLoop, BasicBlock *BB,
101 SmallVectorImpl<StoreInst*> &Stores);
102 bool processCopyingStore(Loop *CurLoop, StoreInst *SI, const SCEV *BECount);
103 bool coverLoop(Loop *L, SmallVectorImpl<Instruction*> &Insts) const;
104 bool runOnLoopBlock(Loop *CurLoop, BasicBlock *BB, const SCEV *BECount,
105 SmallVectorImpl<BasicBlock*> &ExitBlocks);
106 bool runOnCountableLoop(Loop *L);
109 const DataLayout *DL;
112 const TargetLibraryInfo *TLI;
114 bool HasMemcpy, HasMemmove;
118 char HexagonLoopIdiomRecognize::ID = 0;
120 INITIALIZE_PASS_BEGIN(HexagonLoopIdiomRecognize, "hexagon-loop-idiom",
121 "Recognize Hexagon-specific loop idioms", false, false)
122 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
123 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
124 INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass)
125 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
126 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
127 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
128 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
129 INITIALIZE_PASS_END(HexagonLoopIdiomRecognize, "hexagon-loop-idiom",
130 "Recognize Hexagon-specific loop idioms", false, false)
135 typedef std::function<Value* (Instruction*, LLVMContext&)> Rule;
137 void addRule(const Rule &R) { Rules.push_back(R); }
140 struct WorkListType {
141 WorkListType() = default;
143 void push_back(Value* V) {
144 // Do not push back duplicates.
145 if (!S.count(V)) { Q.push_back(V); S.insert(V); }
147 Value *pop_front_val() {
148 Value *V = Q.front(); Q.pop_front(); S.erase(V);
151 bool empty() const { return Q.empty(); }
154 std::deque<Value*> Q;
158 typedef std::set<Value*> ValueSetType;
159 std::vector<Rule> Rules;
163 typedef DenseMap<Value*,Value*> ValueMapType;
166 ValueSetType Used; // The set of all cloned values used by Root.
167 ValueSetType Clones; // The set of all cloned values.
170 Context(Instruction *Exp)
171 : Ctx(Exp->getParent()->getParent()->getContext()) {
174 ~Context() { cleanup(); }
175 void print(raw_ostream &OS, const Value *V) const;
177 Value *materialize(BasicBlock *B, BasicBlock::iterator At);
180 void initialize(Instruction *Exp);
183 template <typename FuncT> void traverse(Value *V, FuncT F);
184 void record(Value *V);
186 void unuse(Value *V);
188 bool equal(const Instruction *I, const Instruction *J) const;
189 Value *find(Value *Tree, Value *Sub) const;
190 Value *subst(Value *Tree, Value *OldV, Value *NewV);
191 void replace(Value *OldV, Value *NewV);
192 void link(Instruction *I, BasicBlock *B, BasicBlock::iterator At);
194 friend struct Simplifier;
197 Value *simplify(Context &C);
201 PE(const Simplifier::Context &c, Value *v = nullptr) : C(c), V(v) {}
202 const Simplifier::Context &C;
206 raw_ostream &operator<< (raw_ostream &OS, const PE &P) LLVM_ATTRIBUTE_USED;
207 raw_ostream &operator<< (raw_ostream &OS, const PE &P) {
208 P.C.print(OS, P.V ? P.V : P.C.Root);
214 template <typename FuncT>
215 void Simplifier::Context::traverse(Value *V, FuncT F) {
220 Instruction *U = dyn_cast<Instruction>(Q.pop_front_val());
221 if (!U || U->getParent())
225 for (Value *Op : U->operands())
231 void Simplifier::Context::print(raw_ostream &OS, const Value *V) const {
232 const auto *U = dyn_cast<const Instruction>(V);
234 OS << V << '(' << *V << ')';
238 if (U->getParent()) {
240 U->printAsOperand(OS, true);
245 unsigned N = U->getNumOperands();
248 OS << U->getOpcodeName();
249 for (const Value *Op : U->operands()) {
258 void Simplifier::Context::initialize(Instruction *Exp) {
259 // Perform a deep clone of the expression, set Root to the root
260 // of the clone, and build a map from the cloned values to the
263 BasicBlock *Block = Exp->getParent();
268 Value *V = Q.pop_front_val();
269 if (M.find(V) != M.end())
271 if (Instruction *U = dyn_cast<Instruction>(V)) {
272 if (isa<PHINode>(U) || U->getParent() != Block)
274 for (Value *Op : U->operands())
276 M.insert({U, U->clone()});
280 for (std::pair<Value*,Value*> P : M) {
281 Instruction *U = cast<Instruction>(P.second);
282 for (unsigned i = 0, n = U->getNumOperands(); i != n; ++i) {
283 auto F = M.find(U->getOperand(i));
285 U->setOperand(i, F->second);
289 auto R = M.find(Exp);
290 assert(R != M.end());
298 void Simplifier::Context::record(Value *V) {
299 auto Record = [this](Instruction *U) -> bool {
307 void Simplifier::Context::use(Value *V) {
308 auto Use = [this](Instruction *U) -> bool {
316 void Simplifier::Context::unuse(Value *V) {
317 if (!isa<Instruction>(V) || cast<Instruction>(V)->getParent() != nullptr)
320 auto Unuse = [this](Instruction *U) -> bool {
330 Value *Simplifier::Context::subst(Value *Tree, Value *OldV, Value *NewV) {
339 Instruction *U = dyn_cast<Instruction>(Q.pop_front_val());
340 // If U is not an instruction, or it's not a clone, skip it.
341 if (!U || U->getParent())
343 for (unsigned i = 0, n = U->getNumOperands(); i != n; ++i) {
344 Value *Op = U->getOperand(i);
346 U->setOperand(i, NewV);
357 void Simplifier::Context::replace(Value *OldV, Value *NewV) {
364 // NewV may be a complex tree that has just been created by one of the
365 // transformation rules. We need to make sure that it is commoned with
366 // the existing Root to the maximum extent possible.
367 // Identify all subtrees of NewV (including NewV itself) that have
368 // equivalent counterparts in Root, and replace those subtrees with
369 // these counterparts.
373 Value *V = Q.pop_front_val();
374 Instruction *U = dyn_cast<Instruction>(V);
375 if (!U || U->getParent())
377 if (Value *DupV = find(Root, V)) {
379 NewV = subst(NewV, V, DupV);
381 for (Value *Op : U->operands())
386 // Now, simply replace OldV with NewV in Root.
387 Root = subst(Root, OldV, NewV);
392 void Simplifier::Context::cleanup() {
393 for (Value *V : Clones) {
394 Instruction *U = cast<Instruction>(V);
396 U->dropAllReferences();
399 for (Value *V : Clones) {
400 Instruction *U = cast<Instruction>(V);
407 bool Simplifier::Context::equal(const Instruction *I,
408 const Instruction *J) const {
411 if (!I->isSameOperationAs(J))
414 return I->isIdenticalTo(J);
416 for (unsigned i = 0, n = I->getNumOperands(); i != n; ++i) {
417 Value *OpI = I->getOperand(i), *OpJ = J->getOperand(i);
420 auto *InI = dyn_cast<const Instruction>(OpI);
421 auto *InJ = dyn_cast<const Instruction>(OpJ);
423 if (!equal(InI, InJ))
425 } else if (InI != InJ || !InI)
432 Value *Simplifier::Context::find(Value *Tree, Value *Sub) const {
433 Instruction *SubI = dyn_cast<Instruction>(Sub);
438 Value *V = Q.pop_front_val();
441 Instruction *U = dyn_cast<Instruction>(V);
442 if (!U || U->getParent())
444 if (SubI && equal(SubI, U))
446 assert(!isa<PHINode>(U));
447 for (Value *Op : U->operands())
454 void Simplifier::Context::link(Instruction *I, BasicBlock *B,
455 BasicBlock::iterator At) {
459 for (Value *Op : I->operands()) {
460 if (Instruction *OpI = dyn_cast<Instruction>(Op))
464 B->getInstList().insert(At, I);
468 Value *Simplifier::Context::materialize(BasicBlock *B,
469 BasicBlock::iterator At) {
470 if (Instruction *RootI = dyn_cast<Instruction>(Root))
476 Value *Simplifier::simplify(Context &C) {
480 const unsigned Limit = 100000;
483 if (Count++ >= Limit)
485 Instruction *U = dyn_cast<Instruction>(Q.pop_front_val());
486 if (!U || U->getParent() || !C.Used.count(U))
488 bool Changed = false;
489 for (Rule &R : Rules) {
490 Value *W = R(U, C.Ctx);
500 for (Value *Op : U->operands())
504 assert(Count < Limit && "Infinite loop in HLIR/simplify?");
509 //===----------------------------------------------------------------------===//
511 // Implementation of PolynomialMultiplyRecognize
513 //===----------------------------------------------------------------------===//
516 class PolynomialMultiplyRecognize {
518 explicit PolynomialMultiplyRecognize(Loop *loop, const DataLayout &dl,
519 const DominatorTree &dt, const TargetLibraryInfo &tli,
521 : CurLoop(loop), DL(dl), DT(dt), TLI(tli), SE(se) {}
525 typedef SetVector<Value*> ValueSeq;
527 IntegerType *getPmpyType() const {
528 LLVMContext &Ctx = CurLoop->getHeader()->getParent()->getContext();
529 return IntegerType::get(Ctx, 32);
531 bool isPromotableTo(Value *V, IntegerType *Ty);
532 void promoteTo(Instruction *In, IntegerType *DestTy, BasicBlock *LoopB);
533 bool promoteTypes(BasicBlock *LoopB, BasicBlock *ExitB);
535 Value *getCountIV(BasicBlock *BB);
536 bool findCycle(Value *Out, Value *In, ValueSeq &Cycle);
537 void classifyCycle(Instruction *DivI, ValueSeq &Cycle, ValueSeq &Early,
539 bool classifyInst(Instruction *UseI, ValueSeq &Early, ValueSeq &Late);
540 bool commutesWithShift(Instruction *I);
541 bool highBitsAreZero(Value *V, unsigned IterCount);
542 bool keepsHighBitsZero(Value *V, unsigned IterCount);
543 bool isOperandShifted(Instruction *I, Value *Op);
544 bool convertShiftsToLeft(BasicBlock *LoopB, BasicBlock *ExitB,
546 void cleanupLoopBody(BasicBlock *LoopB);
548 struct ParsedValues {
549 ParsedValues() : M(nullptr), P(nullptr), Q(nullptr), R(nullptr),
550 X(nullptr), Res(nullptr), IterCount(0), Left(false), Inv(false) {}
551 Value *M, *P, *Q, *R, *X;
557 bool matchLeftShift(SelectInst *SelI, Value *CIV, ParsedValues &PV);
558 bool matchRightShift(SelectInst *SelI, ParsedValues &PV);
559 bool scanSelect(SelectInst *SI, BasicBlock *LoopB, BasicBlock *PrehB,
560 Value *CIV, ParsedValues &PV, bool PreScan);
561 unsigned getInverseMxN(unsigned QP);
562 Value *generate(BasicBlock::iterator At, ParsedValues &PV);
564 void setupSimplifier();
568 const DataLayout &DL;
569 const DominatorTree &DT;
570 const TargetLibraryInfo &TLI;
576 Value *PolynomialMultiplyRecognize::getCountIV(BasicBlock *BB) {
577 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
578 if (std::distance(PI, PE) != 2)
580 BasicBlock *PB = (*PI == BB) ? *std::next(PI) : *PI;
582 for (auto I = BB->begin(), E = BB->end(); I != E && isa<PHINode>(I); ++I) {
583 auto *PN = cast<PHINode>(I);
584 Value *InitV = PN->getIncomingValueForBlock(PB);
585 if (!isa<ConstantInt>(InitV) || !cast<ConstantInt>(InitV)->isZero())
587 Value *IterV = PN->getIncomingValueForBlock(BB);
588 if (!isa<BinaryOperator>(IterV))
590 auto *BO = dyn_cast<BinaryOperator>(IterV);
591 if (BO->getOpcode() != Instruction::Add)
593 Value *IncV = nullptr;
594 if (BO->getOperand(0) == PN)
595 IncV = BO->getOperand(1);
596 else if (BO->getOperand(1) == PN)
597 IncV = BO->getOperand(0);
601 if (auto *T = dyn_cast<ConstantInt>(IncV))
602 if (T->getZExtValue() == 1)
609 static void replaceAllUsesOfWithIn(Value *I, Value *J, BasicBlock *BB) {
610 for (auto UI = I->user_begin(), UE = I->user_end(); UI != UE;) {
611 Use &TheUse = UI.getUse();
613 if (auto *II = dyn_cast<Instruction>(TheUse.getUser()))
614 if (BB == II->getParent())
615 II->replaceUsesOfWith(I, J);
620 bool PolynomialMultiplyRecognize::matchLeftShift(SelectInst *SelI,
621 Value *CIV, ParsedValues &PV) {
622 // Match the following:
623 // select (X & (1 << i)) != 0 ? R ^ (Q << i) : R
624 // select (X & (1 << i)) == 0 ? R : R ^ (Q << i)
625 // The condition may also check for equality with the masked value, i.e
626 // select (X & (1 << i)) == (1 << i) ? R ^ (Q << i) : R
627 // select (X & (1 << i)) != (1 << i) ? R : R ^ (Q << i);
629 Value *CondV = SelI->getCondition();
630 Value *TrueV = SelI->getTrueValue();
631 Value *FalseV = SelI->getFalseValue();
633 using namespace PatternMatch;
635 CmpInst::Predicate P;
636 Value *A = nullptr, *B = nullptr, *C = nullptr;
638 if (!match(CondV, m_ICmp(P, m_And(m_Value(A), m_Value(B)), m_Value(C))) &&
639 !match(CondV, m_ICmp(P, m_Value(C), m_And(m_Value(A), m_Value(B)))))
641 if (P != CmpInst::ICMP_EQ && P != CmpInst::ICMP_NE)
643 // Matched: select (A & B) == C ? ... : ...
644 // select (A & B) != C ? ... : ...
646 Value *X = nullptr, *Sh1 = nullptr;
647 // Check (A & B) for (X & (1 << i)):
648 if (match(A, m_Shl(m_One(), m_Specific(CIV)))) {
651 } else if (match(B, m_Shl(m_One(), m_Specific(CIV)))) {
655 // TODO: Could also check for an induction variable containing single
656 // bit shifted left by 1 in each iteration.
662 // Check C against the possible values for comparison: 0 and (1 << i):
663 if (match(C, m_Zero()))
664 TrueIfZero = (P == CmpInst::ICMP_EQ);
666 TrueIfZero = (P == CmpInst::ICMP_NE);
671 // select (X & (1 << i)) ? ... : ...
672 // including variations of the check against zero/non-zero value.
674 Value *ShouldSameV = nullptr, *ShouldXoredV = nullptr;
677 ShouldXoredV = FalseV;
679 ShouldSameV = FalseV;
680 ShouldXoredV = TrueV;
683 Value *Q = nullptr, *R = nullptr, *Y = nullptr, *Z = nullptr;
685 if (match(ShouldXoredV, m_Xor(m_Value(Y), m_Value(Z)))) {
686 // Matched: select +++ ? ... : Y ^ Z
687 // select +++ ? Y ^ Z : ...
688 // where +++ denotes previously checked matches.
689 if (ShouldSameV == Y)
691 else if (ShouldSameV == Z)
696 // Matched: select +++ ? R : R ^ T
697 // select +++ ? R ^ T : R
698 // depending on TrueIfZero.
700 } else if (match(ShouldSameV, m_Zero())) {
701 // Matched: select +++ ? 0 : ...
702 // select +++ ? ... : 0
703 if (!SelI->hasOneUse())
706 // Matched: select +++ ? 0 : T
707 // select +++ ? T : 0
709 Value *U = *SelI->user_begin();
710 if (!match(U, m_Xor(m_Specific(SelI), m_Value(R))) &&
711 !match(U, m_Xor(m_Value(R), m_Specific(SelI))))
713 // Matched: xor (select +++ ? 0 : T), R
714 // xor (select +++ ? T : 0), R
718 // The xor input value T is isolated into its own match so that it could
719 // be checked against an induction variable containing a shifted bit
721 // For now, check against (Q << i).
722 if (!match(T, m_Shl(m_Value(Q), m_Specific(CIV))) &&
723 !match(T, m_Shl(m_ZExt(m_Value(Q)), m_ZExt(m_Specific(CIV)))))
725 // Matched: select +++ ? R : R ^ (Q << i)
726 // select +++ ? R ^ (Q << i) : R
736 bool PolynomialMultiplyRecognize::matchRightShift(SelectInst *SelI,
738 // Match the following:
739 // select (X & 1) != 0 ? (R >> 1) ^ Q : (R >> 1)
740 // select (X & 1) == 0 ? (R >> 1) : (R >> 1) ^ Q
741 // The condition may also check for equality with the masked value, i.e
742 // select (X & 1) == 1 ? (R >> 1) ^ Q : (R >> 1)
743 // select (X & 1) != 1 ? (R >> 1) : (R >> 1) ^ Q
745 Value *CondV = SelI->getCondition();
746 Value *TrueV = SelI->getTrueValue();
747 Value *FalseV = SelI->getFalseValue();
749 using namespace PatternMatch;
752 CmpInst::Predicate P;
755 if (match(CondV, m_ICmp(P, m_Value(C), m_Zero())) ||
756 match(CondV, m_ICmp(P, m_Zero(), m_Value(C)))) {
757 if (P != CmpInst::ICMP_EQ && P != CmpInst::ICMP_NE)
759 // Matched: select C == 0 ? ... : ...
760 // select C != 0 ? ... : ...
761 TrueIfZero = (P == CmpInst::ICMP_EQ);
762 } else if (match(CondV, m_ICmp(P, m_Value(C), m_One())) ||
763 match(CondV, m_ICmp(P, m_One(), m_Value(C)))) {
764 if (P != CmpInst::ICMP_EQ && P != CmpInst::ICMP_NE)
766 // Matched: select C == 1 ? ... : ...
767 // select C != 1 ? ... : ...
768 TrueIfZero = (P == CmpInst::ICMP_NE);
773 if (!match(C, m_And(m_Value(X), m_One())) &&
774 !match(C, m_And(m_One(), m_Value(X))))
776 // Matched: select (X & 1) == +++ ? ... : ...
777 // select (X & 1) != +++ ? ... : ...
779 Value *R = nullptr, *Q = nullptr;
781 // The select's condition is true if the tested bit is 0.
782 // TrueV must be the shift, FalseV must be the xor.
783 if (!match(TrueV, m_LShr(m_Value(R), m_One())))
785 // Matched: select +++ ? (R >> 1) : ...
786 if (!match(FalseV, m_Xor(m_Specific(TrueV), m_Value(Q))) &&
787 !match(FalseV, m_Xor(m_Value(Q), m_Specific(TrueV))))
789 // Matched: select +++ ? (R >> 1) : (R >> 1) ^ Q
792 // The select's condition is true if the tested bit is 1.
793 // TrueV must be the xor, FalseV must be the shift.
794 if (!match(FalseV, m_LShr(m_Value(R), m_One())))
796 // Matched: select +++ ? ... : (R >> 1)
797 if (!match(TrueV, m_Xor(m_Specific(FalseV), m_Value(Q))) &&
798 !match(TrueV, m_Xor(m_Value(Q), m_Specific(FalseV))))
800 // Matched: select +++ ? (R >> 1) ^ Q : (R >> 1)
812 bool PolynomialMultiplyRecognize::scanSelect(SelectInst *SelI,
813 BasicBlock *LoopB, BasicBlock *PrehB, Value *CIV, ParsedValues &PV,
815 using namespace PatternMatch;
816 // The basic pattern for R = P.Q is:
819 // if (P & (1 << i)) ; test-bit(P, i)
822 // Similarly, the basic pattern for R = (P/Q).Q - P
828 // There exist idioms, where instead of Q being shifted left, P is shifted
829 // right. This produces a result that is shifted right by 32 bits (the
830 // non-shifted result is 64-bit).
832 // For R = P.Q, this would be:
836 // R' = (R >> 1) ^ Q ; R is cycled through the loop, so it must
837 // else ; be shifted by 1, not i.
840 // And for the inverse:
848 // The left-shifting idioms share the same pattern:
849 // select (X & (1 << i)) ? R ^ (Q << i) : R
850 // Similarly for right-shifting idioms:
851 // select (X & 1) ? (R >> 1) ^ Q
853 if (matchLeftShift(SelI, CIV, PV)) {
854 // If this is a pre-scan, getting this far is sufficient.
858 // Need to make sure that the SelI goes back into R.
859 auto *RPhi = dyn_cast<PHINode>(PV.R);
862 if (SelI != RPhi->getIncomingValueForBlock(LoopB))
866 // If X is loop invariant, it must be the input polynomial, and the
867 // idiom is the basic polynomial multiply.
868 if (CurLoop->isLoopInvariant(PV.X)) {
872 // X is not loop invariant. If X == R, this is the inverse pmpy.
873 // Otherwise, check for an xor with an invariant value. If the
874 // variable argument to the xor is R, then this is still a valid
878 Value *Var = nullptr, *Inv = nullptr, *X1 = nullptr, *X2 = nullptr;
879 if (!match(PV.X, m_Xor(m_Value(X1), m_Value(X2))))
881 auto *I1 = dyn_cast<Instruction>(X1);
882 auto *I2 = dyn_cast<Instruction>(X2);
883 if (!I1 || I1->getParent() != LoopB) {
886 } else if (!I2 || I2->getParent() != LoopB) {
895 // The input polynomial P still needs to be determined. It will be
896 // the entry value of R.
897 Value *EntryP = RPhi->getIncomingValueForBlock(PrehB);
904 if (matchRightShift(SelI, PV)) {
905 // If this is an inverse pattern, the Q polynomial must be known at
907 if (PV.Inv && !isa<ConstantInt>(PV.Q))
911 // There is no exact matching of right-shift pmpy.
919 bool PolynomialMultiplyRecognize::isPromotableTo(Value *Val,
920 IntegerType *DestTy) {
921 IntegerType *T = dyn_cast<IntegerType>(Val->getType());
922 if (!T || T->getBitWidth() > DestTy->getBitWidth())
924 if (T->getBitWidth() == DestTy->getBitWidth())
926 // Non-instructions are promotable. The reason why an instruction may not
927 // be promotable is that it may produce a different result if its operands
928 // and the result are promoted, for example, it may produce more non-zero
929 // bits. While it would still be possible to represent the proper result
930 // in a wider type, it may require adding additional instructions (which
931 // we don't want to do).
932 Instruction *In = dyn_cast<Instruction>(Val);
935 // The bitwidth of the source type is smaller than the destination.
936 // Check if the individual operation can be promoted.
937 switch (In->getOpcode()) {
938 case Instruction::PHI:
939 case Instruction::ZExt:
940 case Instruction::And:
941 case Instruction::Or:
942 case Instruction::Xor:
943 case Instruction::LShr: // Shift right is ok.
944 case Instruction::Select:
946 case Instruction::ICmp:
947 if (CmpInst *CI = cast<CmpInst>(In))
948 return CI->isEquality() || CI->isUnsigned();
949 llvm_unreachable("Cast failed unexpectedly");
950 case Instruction::Add:
951 return In->hasNoSignedWrap() && In->hasNoUnsignedWrap();
957 void PolynomialMultiplyRecognize::promoteTo(Instruction *In,
958 IntegerType *DestTy, BasicBlock *LoopB) {
959 // Leave boolean values alone.
960 if (!In->getType()->isIntegerTy(1))
961 In->mutateType(DestTy);
962 unsigned DestBW = DestTy->getBitWidth();
965 if (PHINode *P = dyn_cast<PHINode>(In)) {
966 unsigned N = P->getNumIncomingValues();
967 for (unsigned i = 0; i != N; ++i) {
968 BasicBlock *InB = P->getIncomingBlock(i);
971 Value *InV = P->getIncomingValue(i);
972 IntegerType *Ty = cast<IntegerType>(InV->getType());
973 // Do not promote values in PHI nodes of type i1.
974 if (Ty != P->getType()) {
975 // If the value type does not match the PHI type, the PHI type
976 // must have been promoted.
977 assert(Ty->getBitWidth() < DestBW);
978 InV = IRBuilder<>(InB->getTerminator()).CreateZExt(InV, DestTy);
979 P->setIncomingValue(i, InV);
982 } else if (ZExtInst *Z = dyn_cast<ZExtInst>(In)) {
983 Value *Op = Z->getOperand(0);
984 if (Op->getType() == Z->getType())
985 Z->replaceAllUsesWith(Op);
986 Z->eraseFromParent();
990 // Promote immediates.
991 for (unsigned i = 0, n = In->getNumOperands(); i != n; ++i) {
992 if (ConstantInt *CI = dyn_cast<ConstantInt>(In->getOperand(i)))
993 if (CI->getType()->getBitWidth() < DestBW)
994 In->setOperand(i, ConstantInt::get(DestTy, CI->getZExtValue()));
999 bool PolynomialMultiplyRecognize::promoteTypes(BasicBlock *LoopB,
1000 BasicBlock *ExitB) {
1002 // Skip loops where the exit block has more than one predecessor. The values
1003 // coming from the loop block will be promoted to another type, and so the
1004 // values coming into the exit block from other predecessors would also have
1006 if (!ExitB || (ExitB->getSinglePredecessor() != LoopB))
1008 IntegerType *DestTy = getPmpyType();
1009 // Check if the exit values have types that are no wider than the type
1010 // that we want to promote to.
1011 unsigned DestBW = DestTy->getBitWidth();
1012 for (Instruction &In : *ExitB) {
1013 PHINode *P = dyn_cast<PHINode>(&In);
1016 if (P->getNumIncomingValues() != 1)
1018 assert(P->getIncomingBlock(0) == LoopB);
1019 IntegerType *T = dyn_cast<IntegerType>(P->getType());
1020 if (!T || T->getBitWidth() > DestBW)
1024 // Check all instructions in the loop.
1025 for (Instruction &In : *LoopB)
1026 if (!In.isTerminator() && !isPromotableTo(&In, DestTy))
1029 // Perform the promotion.
1030 std::vector<Instruction*> LoopIns;
1031 std::transform(LoopB->begin(), LoopB->end(), std::back_inserter(LoopIns),
1032 [](Instruction &In) { return &In; });
1033 for (Instruction *In : LoopIns)
1034 promoteTo(In, DestTy, LoopB);
1036 // Fix up the PHI nodes in the exit block.
1037 Instruction *EndI = ExitB->getFirstNonPHI();
1038 BasicBlock::iterator End = EndI ? EndI->getIterator() : ExitB->end();
1039 for (auto I = ExitB->begin(); I != End; ++I) {
1040 PHINode *P = dyn_cast<PHINode>(I);
1043 Type *Ty0 = P->getIncomingValue(0)->getType();
1044 Type *PTy = P->getType();
1046 assert(Ty0 == DestTy);
1047 // In order to create the trunc, P must have the promoted type.
1049 Value *T = IRBuilder<>(ExitB, End).CreateTrunc(P, PTy);
1050 // In order for the RAUW to work, the types of P and T must match.
1052 P->replaceAllUsesWith(T);
1053 // Final update of the P's type.
1055 cast<Instruction>(T)->setOperand(0, P);
1063 bool PolynomialMultiplyRecognize::findCycle(Value *Out, Value *In,
1065 // Out = ..., In, ...
1069 auto *BB = cast<Instruction>(Out)->getParent();
1070 bool HadPhi = false;
1072 for (auto U : Out->users()) {
1073 auto *I = dyn_cast<Instruction>(&*U);
1074 if (I == nullptr || I->getParent() != BB)
1076 // Make sure that there are no multi-iteration cycles, e.g.
1079 // The cycle p1->p2->p1 would span two loop iterations.
1080 // Check that there is only one phi in the cycle.
1081 bool IsPhi = isa<PHINode>(I);
1082 if (IsPhi && HadPhi)
1088 if (findCycle(I, In, Cycle))
1092 return !Cycle.empty();
1096 void PolynomialMultiplyRecognize::classifyCycle(Instruction *DivI,
1097 ValueSeq &Cycle, ValueSeq &Early, ValueSeq &Late) {
1098 // All the values in the cycle that are between the phi node and the
1099 // divider instruction will be classified as "early", all other values
1103 unsigned I, N = Cycle.size();
1104 for (I = 0; I < N; ++I) {
1105 Value *V = Cycle[I];
1108 else if (!isa<PHINode>(V))
1110 // Stop if found either.
1113 // "I" is the index of either DivI or the phi node, whichever was first.
1114 // "E" is "false" or "true" respectively.
1115 ValueSeq &First = !IsE ? Early : Late;
1116 for (unsigned J = 0; J < I; ++J)
1117 First.insert(Cycle[J]);
1119 ValueSeq &Second = IsE ? Early : Late;
1120 Second.insert(Cycle[I]);
1121 for (++I; I < N; ++I) {
1122 Value *V = Cycle[I];
1123 if (DivI == V || isa<PHINode>(V))
1129 First.insert(Cycle[I]);
1133 bool PolynomialMultiplyRecognize::classifyInst(Instruction *UseI,
1134 ValueSeq &Early, ValueSeq &Late) {
1135 // Select is an exception, since the condition value does not have to be
1136 // classified in the same way as the true/false values. The true/false
1137 // values do have to be both early or both late.
1138 if (UseI->getOpcode() == Instruction::Select) {
1139 Value *TV = UseI->getOperand(1), *FV = UseI->getOperand(2);
1140 if (Early.count(TV) || Early.count(FV)) {
1141 if (Late.count(TV) || Late.count(FV))
1144 } else if (Late.count(TV) || Late.count(FV)) {
1145 if (Early.count(TV) || Early.count(FV))
1152 // Not sure what would be the example of this, but the code below relies
1153 // on having at least one operand.
1154 if (UseI->getNumOperands() == 0)
1157 bool AE = true, AL = true;
1158 for (auto &I : UseI->operands()) {
1159 if (Early.count(&*I))
1161 else if (Late.count(&*I))
1164 // If the operands appear "all early" and "all late" at the same time,
1165 // then it means that none of them are actually classified as either.
1166 // This is harmless.
1169 // Conversely, if they are neither "all early" nor "all late", then
1170 // we have a mixture of early and late operands that is not a known
1175 // Check that we have covered the two special cases.
1186 bool PolynomialMultiplyRecognize::commutesWithShift(Instruction *I) {
1187 switch (I->getOpcode()) {
1188 case Instruction::And:
1189 case Instruction::Or:
1190 case Instruction::Xor:
1191 case Instruction::LShr:
1192 case Instruction::Shl:
1193 case Instruction::Select:
1194 case Instruction::ICmp:
1195 case Instruction::PHI:
1204 bool PolynomialMultiplyRecognize::highBitsAreZero(Value *V,
1205 unsigned IterCount) {
1206 auto *T = dyn_cast<IntegerType>(V->getType());
1210 KnownBits Known(T->getBitWidth());
1211 computeKnownBits(V, Known, DL);
1212 return Known.countMinLeadingZeros() >= IterCount;
1216 bool PolynomialMultiplyRecognize::keepsHighBitsZero(Value *V,
1217 unsigned IterCount) {
1218 // Assume that all inputs to the value have the high bits zero.
1219 // Check if the value itself preserves the zeros in the high bits.
1220 if (auto *C = dyn_cast<ConstantInt>(V))
1221 return C->getValue().countLeadingZeros() >= IterCount;
1223 if (auto *I = dyn_cast<Instruction>(V)) {
1224 switch (I->getOpcode()) {
1225 case Instruction::And:
1226 case Instruction::Or:
1227 case Instruction::Xor:
1228 case Instruction::LShr:
1229 case Instruction::Select:
1230 case Instruction::ICmp:
1231 case Instruction::PHI:
1232 case Instruction::ZExt:
1241 bool PolynomialMultiplyRecognize::isOperandShifted(Instruction *I, Value *Op) {
1242 unsigned Opc = I->getOpcode();
1243 if (Opc == Instruction::Shl || Opc == Instruction::LShr)
1244 return Op != I->getOperand(1);
1249 bool PolynomialMultiplyRecognize::convertShiftsToLeft(BasicBlock *LoopB,
1250 BasicBlock *ExitB, unsigned IterCount) {
1251 Value *CIV = getCountIV(LoopB);
1254 auto *CIVTy = dyn_cast<IntegerType>(CIV->getType());
1255 if (CIVTy == nullptr)
1259 ValueSeq Early, Late, Cycled;
1261 // Find all value cycles that contain logical right shifts by 1.
1262 for (Instruction &I : *LoopB) {
1263 using namespace PatternMatch;
1265 if (!match(&I, m_LShr(m_Value(V), m_One())))
1268 if (!findCycle(&I, V, C))
1273 classifyCycle(&I, C, Early, Late);
1274 Cycled.insert(C.begin(), C.end());
1278 // Find the set of all values affected by the shift cycles, i.e. all
1279 // cycled values, and (recursively) all their users.
1280 ValueSeq Users(Cycled.begin(), Cycled.end());
1281 for (unsigned i = 0; i < Users.size(); ++i) {
1282 Value *V = Users[i];
1283 if (!isa<IntegerType>(V->getType()))
1285 auto *R = cast<Instruction>(V);
1286 // If the instruction does not commute with shifts, the loop cannot
1288 if (!commutesWithShift(R))
1290 for (auto I = R->user_begin(), E = R->user_end(); I != E; ++I) {
1291 auto *T = cast<Instruction>(*I);
1292 // Skip users from outside of the loop. They will be handled later.
1293 // Also, skip the right-shifts and phi nodes, since they mix early
1295 if (T->getParent() != LoopB || RShifts.count(T) || isa<PHINode>(T))
1299 if (!classifyInst(T, Early, Late))
1304 if (Users.size() == 0)
1307 // Verify that high bits remain zero.
1308 ValueSeq Internal(Users.begin(), Users.end());
1310 for (unsigned i = 0; i < Internal.size(); ++i) {
1311 auto *R = dyn_cast<Instruction>(Internal[i]);
1314 for (Value *Op : R->operands()) {
1315 auto *T = dyn_cast<Instruction>(Op);
1316 if (T && T->getParent() != LoopB)
1319 Internal.insert(Op);
1322 for (Value *V : Inputs)
1323 if (!highBitsAreZero(V, IterCount))
1325 for (Value *V : Internal)
1326 if (!keepsHighBitsZero(V, IterCount))
1329 // Finally, the work can be done. Unshift each user.
1330 IRBuilder<> IRB(LoopB);
1331 std::map<Value*,Value*> ShiftMap;
1332 typedef std::map<std::pair<Value*,Type*>,Value*> CastMapType;
1333 CastMapType CastMap;
1335 auto upcast = [] (CastMapType &CM, IRBuilder<> &IRB, Value *V,
1336 IntegerType *Ty) -> Value* {
1337 auto H = CM.find(std::make_pair(V, Ty));
1340 Value *CV = IRB.CreateIntCast(V, Ty, false);
1341 CM.insert(std::make_pair(std::make_pair(V, Ty), CV));
1345 for (auto I = LoopB->begin(), E = LoopB->end(); I != E; ++I) {
1346 if (isa<PHINode>(I) || !Users.count(&*I))
1348 using namespace PatternMatch;
1351 if (match(&*I, m_LShr(m_Value(V), m_One()))) {
1352 replaceAllUsesOfWithIn(&*I, V, LoopB);
1355 // For each non-cycled operand, replace it with the corresponding
1356 // value shifted left.
1357 for (auto &J : I->operands()) {
1358 Value *Op = J.get();
1359 if (!isOperandShifted(&*I, Op))
1361 if (Users.count(Op))
1363 // Skip shifting zeros.
1364 if (isa<ConstantInt>(Op) && cast<ConstantInt>(Op)->isZero())
1366 // Check if we have already generated a shift for this value.
1367 auto F = ShiftMap.find(Op);
1368 Value *W = (F != ShiftMap.end()) ? F->second : nullptr;
1370 IRB.SetInsertPoint(&*I);
1371 // First, the shift amount will be CIV or CIV+1, depending on
1372 // whether the value is early or late. Instead of creating CIV+1,
1373 // do a single shift of the value.
1374 Value *ShAmt = CIV, *ShVal = Op;
1375 auto *VTy = cast<IntegerType>(ShVal->getType());
1376 auto *ATy = cast<IntegerType>(ShAmt->getType());
1377 if (Late.count(&*I))
1378 ShVal = IRB.CreateShl(Op, ConstantInt::get(VTy, 1));
1379 // Second, the types of the shifted value and the shift amount
1382 if (VTy->getBitWidth() < ATy->getBitWidth())
1383 ShVal = upcast(CastMap, IRB, ShVal, ATy);
1385 ShAmt = upcast(CastMap, IRB, ShAmt, VTy);
1387 // Ready to generate the shift and memoize it.
1388 W = IRB.CreateShl(ShVal, ShAmt);
1389 ShiftMap.insert(std::make_pair(Op, W));
1391 I->replaceUsesOfWith(Op, W);
1395 // Update the users outside of the loop to account for having left
1396 // shifts. They would normally be shifted right in the loop, so shift
1397 // them right after the loop exit.
1398 // Take advantage of the loop-closed SSA form, which has all the post-
1399 // loop values in phi nodes.
1400 IRB.SetInsertPoint(ExitB, ExitB->getFirstInsertionPt());
1401 for (auto P = ExitB->begin(), Q = ExitB->end(); P != Q; ++P) {
1402 if (!isa<PHINode>(P))
1404 auto *PN = cast<PHINode>(P);
1405 Value *U = PN->getIncomingValueForBlock(LoopB);
1406 if (!Users.count(U))
1408 Value *S = IRB.CreateLShr(PN, ConstantInt::get(PN->getType(), IterCount));
1409 PN->replaceAllUsesWith(S);
1410 // The above RAUW will create
1411 // S = lshr S, IterCount
1412 // so we need to fix it back into
1413 // S = lshr PN, IterCount
1414 cast<User>(S)->replaceUsesOfWith(S, PN);
1421 void PolynomialMultiplyRecognize::cleanupLoopBody(BasicBlock *LoopB) {
1422 for (auto &I : *LoopB)
1423 if (Value *SV = SimplifyInstruction(&I, {DL, &TLI, &DT}))
1424 I.replaceAllUsesWith(SV);
1426 for (auto I = LoopB->begin(), N = I; I != LoopB->end(); I = N) {
1428 RecursivelyDeleteTriviallyDeadInstructions(&*I, &TLI);
1433 unsigned PolynomialMultiplyRecognize::getInverseMxN(unsigned QP) {
1434 // Arrays of coefficients of Q and the inverse, C.
1435 // Q[i] = coefficient at x^i.
1436 std::array<char,32> Q, C;
1438 for (unsigned i = 0; i < 32; ++i) {
1444 // Find C, such that
1445 // (Q[n]*x^n + ... + Q[1]*x + Q[0]) * (C[n]*x^n + ... + C[1]*x + C[0]) = 1
1447 // For it to have a solution, Q[0] must be 1. Since this is Z2[x], the
1448 // operations * and + are & and ^ respectively.
1450 // Find C[i] recursively, by comparing i-th coefficient in the product
1451 // with 0 (or 1 for i=0).
1453 // C[0] = 1, since C[0] = Q[0], and Q[0] = 1.
1455 for (unsigned i = 1; i < 32; ++i) {
1456 // Solve for C[i] in:
1457 // C[0]Q[i] ^ C[1]Q[i-1] ^ ... ^ C[i-1]Q[1] ^ C[i]Q[0] = 0
1458 // This is equivalent to
1459 // C[0]Q[i] ^ C[1]Q[i-1] ^ ... ^ C[i-1]Q[1] ^ C[i] = 0
1461 // C[0]Q[i] ^ C[1]Q[i-1] ^ ... ^ C[i-1]Q[1] = C[i]
1463 for (unsigned j = 0; j < i; ++j)
1464 T = T ^ (C[j] & Q[i-j]);
1469 for (unsigned i = 0; i < 32; ++i)
1477 Value *PolynomialMultiplyRecognize::generate(BasicBlock::iterator At,
1479 IRBuilder<> B(&*At);
1480 Module *M = At->getParent()->getParent()->getParent();
1481 Value *PMF = Intrinsic::getDeclaration(M, Intrinsic::hexagon_M4_pmpyw);
1483 Value *P = PV.P, *Q = PV.Q, *P0 = P;
1484 unsigned IC = PV.IterCount;
1486 if (PV.M != nullptr)
1487 P0 = P = B.CreateXor(P, PV.M);
1489 // Create a bit mask to clear the high bits beyond IterCount.
1490 auto *BMI = ConstantInt::get(P->getType(), APInt::getLowBitsSet(32, IC));
1492 if (PV.IterCount != 32)
1493 P = B.CreateAnd(P, BMI);
1496 auto *QI = dyn_cast<ConstantInt>(PV.Q);
1497 assert(QI && QI->getBitWidth() <= 32);
1499 // Again, clearing bits beyond IterCount.
1500 unsigned M = (1 << PV.IterCount) - 1;
1501 unsigned Tmp = (QI->getZExtValue() | 1) & M;
1502 unsigned QV = getInverseMxN(Tmp) & M;
1503 auto *QVI = ConstantInt::get(QI->getType(), QV);
1504 P = B.CreateCall(PMF, {P, QVI});
1505 P = B.CreateTrunc(P, QI->getType());
1507 P = B.CreateAnd(P, BMI);
1510 Value *R = B.CreateCall(PMF, {P, Q});
1512 if (PV.M != nullptr)
1513 R = B.CreateXor(R, B.CreateIntCast(P0, R->getType(), false));
1519 void PolynomialMultiplyRecognize::setupSimplifier() {
1521 // Sink zext past bitwise operations.
1522 [](Instruction *I, LLVMContext &Ctx) -> Value* {
1523 if (I->getOpcode() != Instruction::ZExt)
1525 Instruction *T = dyn_cast<Instruction>(I->getOperand(0));
1528 switch (T->getOpcode()) {
1529 case Instruction::And:
1530 case Instruction::Or:
1531 case Instruction::Xor:
1537 return B.CreateBinOp(cast<BinaryOperator>(T)->getOpcode(),
1538 B.CreateZExt(T->getOperand(0), I->getType()),
1539 B.CreateZExt(T->getOperand(1), I->getType()));
1542 // (xor (and x a) (and y a)) -> (and (xor x y) a)
1543 [](Instruction *I, LLVMContext &Ctx) -> Value* {
1544 if (I->getOpcode() != Instruction::Xor)
1546 Instruction *And0 = dyn_cast<Instruction>(I->getOperand(0));
1547 Instruction *And1 = dyn_cast<Instruction>(I->getOperand(1));
1550 if (And0->getOpcode() != Instruction::And ||
1551 And1->getOpcode() != Instruction::And)
1553 if (And0->getOperand(1) != And1->getOperand(1))
1556 return B.CreateAnd(B.CreateXor(And0->getOperand(0), And1->getOperand(0)),
1557 And0->getOperand(1));
1560 // (Op (select c x y) z) -> (select c (Op x z) (Op y z))
1561 // (Op x (select c y z)) -> (select c (Op x y) (Op x z))
1562 [](Instruction *I, LLVMContext &Ctx) -> Value* {
1563 BinaryOperator *BO = dyn_cast<BinaryOperator>(I);
1566 Instruction::BinaryOps Op = BO->getOpcode();
1567 if (SelectInst *Sel = dyn_cast<SelectInst>(BO->getOperand(0))) {
1569 Value *X = Sel->getTrueValue(), *Y = Sel->getFalseValue();
1570 Value *Z = BO->getOperand(1);
1571 return B.CreateSelect(Sel->getCondition(),
1572 B.CreateBinOp(Op, X, Z),
1573 B.CreateBinOp(Op, Y, Z));
1575 if (SelectInst *Sel = dyn_cast<SelectInst>(BO->getOperand(1))) {
1577 Value *X = BO->getOperand(0);
1578 Value *Y = Sel->getTrueValue(), *Z = Sel->getFalseValue();
1579 return B.CreateSelect(Sel->getCondition(),
1580 B.CreateBinOp(Op, X, Y),
1581 B.CreateBinOp(Op, X, Z));
1586 // (select c (select c x y) z) -> (select c x z)
1587 // (select c x (select c y z)) -> (select c x z)
1588 [](Instruction *I, LLVMContext &Ctx) -> Value* {
1589 SelectInst *Sel = dyn_cast<SelectInst>(I);
1593 Value *C = Sel->getCondition();
1594 if (SelectInst *Sel0 = dyn_cast<SelectInst>(Sel->getTrueValue())) {
1595 if (Sel0->getCondition() == C)
1596 return B.CreateSelect(C, Sel0->getTrueValue(), Sel->getFalseValue());
1598 if (SelectInst *Sel1 = dyn_cast<SelectInst>(Sel->getFalseValue())) {
1599 if (Sel1->getCondition() == C)
1600 return B.CreateSelect(C, Sel->getTrueValue(), Sel1->getFalseValue());
1605 // (or (lshr x 1) 0x800.0) -> (xor (lshr x 1) 0x800.0)
1606 [](Instruction *I, LLVMContext &Ctx) -> Value* {
1607 if (I->getOpcode() != Instruction::Or)
1609 Instruction *LShr = dyn_cast<Instruction>(I->getOperand(0));
1610 if (!LShr || LShr->getOpcode() != Instruction::LShr)
1612 ConstantInt *One = dyn_cast<ConstantInt>(LShr->getOperand(1));
1613 if (!One || One->getZExtValue() != 1)
1615 ConstantInt *Msb = dyn_cast<ConstantInt>(I->getOperand(1));
1616 if (!Msb || Msb->getZExtValue() != Msb->getType()->getSignBit())
1618 return IRBuilder<>(Ctx).CreateXor(LShr, Msb);
1621 // (lshr (BitOp x y) c) -> (BitOp (lshr x c) (lshr y c))
1622 [](Instruction *I, LLVMContext &Ctx) -> Value* {
1623 if (I->getOpcode() != Instruction::LShr)
1625 BinaryOperator *BitOp = dyn_cast<BinaryOperator>(I->getOperand(0));
1628 switch (BitOp->getOpcode()) {
1629 case Instruction::And:
1630 case Instruction::Or:
1631 case Instruction::Xor:
1637 Value *S = I->getOperand(1);
1638 return B.CreateBinOp(BitOp->getOpcode(),
1639 B.CreateLShr(BitOp->getOperand(0), S),
1640 B.CreateLShr(BitOp->getOperand(1), S));
1643 // (BitOp1 (BitOp2 x a) b) -> (BitOp2 x (BitOp1 a b))
1644 [](Instruction *I, LLVMContext &Ctx) -> Value* {
1645 auto IsBitOp = [](unsigned Op) -> bool {
1647 case Instruction::And:
1648 case Instruction::Or:
1649 case Instruction::Xor:
1654 BinaryOperator *BitOp1 = dyn_cast<BinaryOperator>(I);
1655 if (!BitOp1 || !IsBitOp(BitOp1->getOpcode()))
1657 BinaryOperator *BitOp2 = dyn_cast<BinaryOperator>(BitOp1->getOperand(0));
1658 if (!BitOp2 || !IsBitOp(BitOp2->getOpcode()))
1660 ConstantInt *CA = dyn_cast<ConstantInt>(BitOp2->getOperand(1));
1661 ConstantInt *CB = dyn_cast<ConstantInt>(BitOp1->getOperand(1));
1665 Value *X = BitOp2->getOperand(0);
1666 return B.CreateBinOp(BitOp2->getOpcode(), X,
1667 B.CreateBinOp(BitOp1->getOpcode(), CA, CB));
1672 bool PolynomialMultiplyRecognize::recognize() {
1673 DEBUG(dbgs() << "Starting PolynomialMultiplyRecognize on loop\n"
1674 << *CurLoop << '\n');
1676 // - The loop must consist of a single block.
1677 // - The iteration count must be known at compile-time.
1678 // - The loop must have an induction variable starting from 0, and
1679 // incremented in each iteration of the loop.
1680 BasicBlock *LoopB = CurLoop->getHeader();
1681 DEBUG(dbgs() << "Loop header:\n" << *LoopB);
1683 if (LoopB != CurLoop->getLoopLatch())
1685 BasicBlock *ExitB = CurLoop->getExitBlock();
1686 if (ExitB == nullptr)
1688 BasicBlock *EntryB = CurLoop->getLoopPreheader();
1689 if (EntryB == nullptr)
1692 unsigned IterCount = 0;
1693 const SCEV *CT = SE.getBackedgeTakenCount(CurLoop);
1694 if (isa<SCEVCouldNotCompute>(CT))
1696 if (auto *CV = dyn_cast<SCEVConstant>(CT))
1697 IterCount = CV->getValue()->getZExtValue() + 1;
1699 Value *CIV = getCountIV(LoopB);
1701 PV.IterCount = IterCount;
1702 DEBUG(dbgs() << "Loop IV: " << *CIV << "\nIterCount: " << IterCount << '\n');
1706 // Perform a preliminary scan of select instructions to see if any of them
1707 // looks like a generator of the polynomial multiply steps. Assume that a
1708 // loop can only contain a single transformable operation, so stop the
1709 // traversal after the first reasonable candidate was found.
1710 // XXX: Currently this approach can modify the loop before being 100% sure
1711 // that the transformation can be carried out.
1712 bool FoundPreScan = false;
1713 for (Instruction &In : *LoopB) {
1714 SelectInst *SI = dyn_cast<SelectInst>(&In);
1718 Simplifier::Context C(SI);
1719 Value *T = Simp.simplify(C);
1720 SelectInst *SelI = (T && isa<SelectInst>(T)) ? cast<SelectInst>(T) : SI;
1721 DEBUG(dbgs() << "scanSelect(pre-scan): " << PE(C, SelI) << '\n');
1722 if (scanSelect(SelI, LoopB, EntryB, CIV, PV, true)) {
1723 FoundPreScan = true;
1725 Value *NewSel = C.materialize(LoopB, SI->getIterator());
1726 SI->replaceAllUsesWith(NewSel);
1727 RecursivelyDeleteTriviallyDeadInstructions(SI, &TLI);
1733 if (!FoundPreScan) {
1734 DEBUG(dbgs() << "Have not found candidates for pmpy\n");
1739 // The right shift version actually only returns the higher bits of
1740 // the result (each iteration discards the LSB). If we want to convert it
1741 // to a left-shifting loop, the working data type must be at least as
1742 // wide as the target's pmpy instruction.
1743 if (!promoteTypes(LoopB, ExitB))
1745 convertShiftsToLeft(LoopB, ExitB, IterCount);
1746 cleanupLoopBody(LoopB);
1749 // Scan the loop again, find the generating select instruction.
1750 bool FoundScan = false;
1751 for (Instruction &In : *LoopB) {
1752 SelectInst *SelI = dyn_cast<SelectInst>(&In);
1755 DEBUG(dbgs() << "scanSelect: " << *SelI << '\n');
1756 FoundScan = scanSelect(SelI, LoopB, EntryB, CIV, PV, false);
1763 StringRef PP = (PV.M ? "(P+M)" : "P");
1765 dbgs() << "Found pmpy idiom: R = " << PP << ".Q\n";
1767 dbgs() << "Found inverse pmpy idiom: R = (" << PP << "/Q).Q) + "
1769 dbgs() << " Res:" << *PV.Res << "\n P:" << *PV.P << "\n";
1771 dbgs() << " M:" << *PV.M << "\n";
1772 dbgs() << " Q:" << *PV.Q << "\n";
1773 dbgs() << " Iteration count:" << PV.IterCount << "\n";
1776 BasicBlock::iterator At(EntryB->getTerminator());
1777 Value *PM = generate(At, PV);
1781 if (PM->getType() != PV.Res->getType())
1782 PM = IRBuilder<>(&*At).CreateIntCast(PM, PV.Res->getType(), false);
1784 PV.Res->replaceAllUsesWith(PM);
1785 PV.Res->eraseFromParent();
1790 unsigned HexagonLoopIdiomRecognize::getStoreSizeInBytes(StoreInst *SI) {
1791 uint64_t SizeInBits = DL->getTypeSizeInBits(SI->getValueOperand()->getType());
1792 assert(((SizeInBits & 7) || (SizeInBits >> 32) == 0) &&
1793 "Don't overflow unsigned.");
1794 return (unsigned)SizeInBits >> 3;
1798 int HexagonLoopIdiomRecognize::getSCEVStride(const SCEVAddRecExpr *S) {
1799 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getOperand(1)))
1800 return SC->getAPInt().getSExtValue();
1805 bool HexagonLoopIdiomRecognize::isLegalStore(Loop *CurLoop, StoreInst *SI) {
1806 // Allow volatile stores if HexagonVolatileMemcpy is enabled.
1807 if (!(SI->isVolatile() && HexagonVolatileMemcpy) && !SI->isSimple())
1810 Value *StoredVal = SI->getValueOperand();
1811 Value *StorePtr = SI->getPointerOperand();
1813 // Reject stores that are so large that they overflow an unsigned.
1814 uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
1815 if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
1818 // See if the pointer expression is an AddRec like {base,+,1} on the current
1819 // loop, which indicates a strided store. If we have something else, it's a
1820 // random store we can't handle.
1821 auto *StoreEv = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
1822 if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
1825 // Check to see if the stride matches the size of the store. If so, then we
1826 // know that every byte is touched in the loop.
1827 int Stride = getSCEVStride(StoreEv);
1830 unsigned StoreSize = getStoreSizeInBytes(SI);
1831 if (StoreSize != unsigned(std::abs(Stride)))
1834 // The store must be feeding a non-volatile load.
1835 LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand());
1836 if (!LI || !LI->isSimple())
1839 // See if the pointer expression is an AddRec like {base,+,1} on the current
1840 // loop, which indicates a strided load. If we have something else, it's a
1841 // random load we can't handle.
1842 Value *LoadPtr = LI->getPointerOperand();
1843 auto *LoadEv = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LoadPtr));
1844 if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine())
1847 // The store and load must share the same stride.
1848 if (StoreEv->getOperand(1) != LoadEv->getOperand(1))
1851 // Success. This store can be converted into a memcpy.
1856 /// mayLoopAccessLocation - Return true if the specified loop might access the
1857 /// specified pointer location, which is a loop-strided access. The 'Access'
1858 /// argument specifies what the verboten forms of access are (read or write).
1860 mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
1861 const SCEV *BECount, unsigned StoreSize,
1863 SmallPtrSetImpl<Instruction *> &Ignored) {
1864 // Get the location that may be stored across the loop. Since the access
1865 // is strided positively through memory, we say that the modified location
1866 // starts at the pointer and has infinite size.
1867 uint64_t AccessSize = MemoryLocation::UnknownSize;
1869 // If the loop iterates a fixed number of times, we can refine the access
1870 // size to be exactly the size of the memset, which is (BECount+1)*StoreSize
1871 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
1872 AccessSize = (BECst->getValue()->getZExtValue() + 1) * StoreSize;
1874 // TODO: For this to be really effective, we have to dive into the pointer
1875 // operand in the store. Store to &A[i] of 100 will always return may alias
1876 // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
1877 // which will then no-alias a store to &A[100].
1878 MemoryLocation StoreLoc(Ptr, AccessSize);
1880 for (auto *B : L->blocks())
1882 if (Ignored.count(&I) == 0 && (AA.getModRefInfo(&I, StoreLoc) & Access))
1889 void HexagonLoopIdiomRecognize::collectStores(Loop *CurLoop, BasicBlock *BB,
1890 SmallVectorImpl<StoreInst*> &Stores) {
1892 for (Instruction &I : *BB)
1893 if (StoreInst *SI = dyn_cast<StoreInst>(&I))
1894 if (isLegalStore(CurLoop, SI))
1895 Stores.push_back(SI);
1899 bool HexagonLoopIdiomRecognize::processCopyingStore(Loop *CurLoop,
1900 StoreInst *SI, const SCEV *BECount) {
1901 assert((SI->isSimple() || (SI->isVolatile() && HexagonVolatileMemcpy)) &&
1902 "Expected only non-volatile stores, or Hexagon-specific memcpy"
1903 "to volatile destination.");
1905 Value *StorePtr = SI->getPointerOperand();
1906 auto *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
1907 unsigned Stride = getSCEVStride(StoreEv);
1908 unsigned StoreSize = getStoreSizeInBytes(SI);
1909 if (Stride != StoreSize)
1912 // See if the pointer expression is an AddRec like {base,+,1} on the current
1913 // loop, which indicates a strided load. If we have something else, it's a
1914 // random load we can't handle.
1915 LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand());
1916 auto *LoadEv = cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
1918 // The trip count of the loop and the base pointer of the addrec SCEV is
1919 // guaranteed to be loop invariant, which means that it should dominate the
1920 // header. This allows us to insert code for it in the preheader.
1921 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1922 Instruction *ExpPt = Preheader->getTerminator();
1923 IRBuilder<> Builder(ExpPt);
1924 SCEVExpander Expander(*SE, *DL, "hexagon-loop-idiom");
1926 Type *IntPtrTy = Builder.getIntPtrTy(*DL, SI->getPointerAddressSpace());
1928 // Okay, we have a strided store "p[i]" of a loaded value. We can turn
1929 // this into a memcpy/memmove in the loop preheader now if we want. However,
1930 // this would be unsafe to do if there is anything else in the loop that may
1931 // read or write the memory region we're storing to. For memcpy, this
1932 // includes the load that feeds the stores. Check for an alias by generating
1933 // the base address and checking everything.
1934 Value *StoreBasePtr = Expander.expandCodeFor(StoreEv->getStart(),
1935 Builder.getInt8PtrTy(SI->getPointerAddressSpace()), ExpPt);
1936 Value *LoadBasePtr = nullptr;
1938 bool Overlap = false;
1939 bool DestVolatile = SI->isVolatile();
1940 Type *BECountTy = BECount->getType();
1943 // The trip count must fit in i32, since it is the type of the "num_words"
1944 // argument to hexagon_memcpy_forward_vp4cp4n2.
1945 if (StoreSize != 4 || DL->getTypeSizeInBits(BECountTy) > 32) {
1947 // If we generated new code for the base pointer, clean up.
1949 if (StoreBasePtr && (LoadBasePtr != StoreBasePtr)) {
1950 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
1951 StoreBasePtr = nullptr;
1954 RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI);
1955 LoadBasePtr = nullptr;
1961 SmallPtrSet<Instruction*, 2> Ignore1;
1963 if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount,
1964 StoreSize, *AA, Ignore1)) {
1965 // Check if the load is the offending instruction.
1967 if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount,
1968 StoreSize, *AA, Ignore1)) {
1969 // Still bad. Nothing we can do.
1970 goto CleanupAndExit;
1972 // It worked with the load ignored.
1977 if (DisableMemcpyIdiom || !HasMemcpy)
1978 goto CleanupAndExit;
1980 // Don't generate memmove if this function will be inlined. This is
1981 // because the caller will undergo this transformation after inlining.
1982 Function *Func = CurLoop->getHeader()->getParent();
1983 if (Func->hasFnAttribute(Attribute::AlwaysInline))
1984 goto CleanupAndExit;
1986 // In case of a memmove, the call to memmove will be executed instead
1987 // of the loop, so we need to make sure that there is nothing else in
1988 // the loop than the load, store and instructions that these two depend
1990 SmallVector<Instruction*,2> Insts;
1991 Insts.push_back(SI);
1992 Insts.push_back(LI);
1993 if (!coverLoop(CurLoop, Insts))
1994 goto CleanupAndExit;
1996 if (DisableMemmoveIdiom || !HasMemmove)
1997 goto CleanupAndExit;
1998 bool IsNested = CurLoop->getParentLoop() != 0;
1999 if (IsNested && OnlyNonNestedMemmove)
2000 goto CleanupAndExit;
2003 // For a memcpy, we have to make sure that the input array is not being
2004 // mutated by the loop.
2005 LoadBasePtr = Expander.expandCodeFor(LoadEv->getStart(),
2006 Builder.getInt8PtrTy(LI->getPointerAddressSpace()), ExpPt);
2008 SmallPtrSet<Instruction*, 2> Ignore2;
2010 if (mayLoopAccessLocation(LoadBasePtr, MRI_Mod, CurLoop, BECount, StoreSize,
2012 goto CleanupAndExit;
2014 // Check the stride.
2015 bool StridePos = getSCEVStride(LoadEv) >= 0;
2017 // Currently, the volatile memcpy only emulates traversing memory forward.
2018 if (!StridePos && DestVolatile)
2019 goto CleanupAndExit;
2021 bool RuntimeCheck = (Overlap || DestVolatile);
2025 // The runtime check needs a single exit block.
2026 SmallVector<BasicBlock*, 8> ExitBlocks;
2027 CurLoop->getUniqueExitBlocks(ExitBlocks);
2028 if (ExitBlocks.size() != 1)
2029 goto CleanupAndExit;
2030 ExitB = ExitBlocks[0];
2033 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
2034 // pointer size if it isn't already.
2035 LLVMContext &Ctx = SI->getContext();
2036 BECount = SE->getTruncateOrZeroExtend(BECount, IntPtrTy);
2037 unsigned Alignment = std::min(SI->getAlignment(), LI->getAlignment());
2038 DebugLoc DLoc = SI->getDebugLoc();
2040 const SCEV *NumBytesS =
2041 SE->getAddExpr(BECount, SE->getOne(IntPtrTy), SCEV::FlagNUW);
2043 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtrTy, StoreSize),
2045 Value *NumBytes = Expander.expandCodeFor(NumBytesS, IntPtrTy, ExpPt);
2046 if (Instruction *In = dyn_cast<Instruction>(NumBytes))
2047 if (Value *Simp = SimplifyInstruction(In, {*DL, TLI, DT}))
2053 unsigned Threshold = RuntimeMemSizeThreshold;
2054 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes)) {
2055 uint64_t C = CI->getZExtValue();
2056 if (Threshold != 0 && C < Threshold)
2057 goto CleanupAndExit;
2058 if (C < CompileTimeMemSizeThreshold)
2059 goto CleanupAndExit;
2062 BasicBlock *Header = CurLoop->getHeader();
2063 Function *Func = Header->getParent();
2064 Loop *ParentL = LF->getLoopFor(Preheader);
2065 StringRef HeaderName = Header->getName();
2067 // Create a new (empty) preheader, and update the PHI nodes in the
2068 // header to use the new preheader.
2069 BasicBlock *NewPreheader = BasicBlock::Create(Ctx, HeaderName+".rtli.ph",
2072 ParentL->addBasicBlockToLoop(NewPreheader, *LF);
2073 IRBuilder<>(NewPreheader).CreateBr(Header);
2074 for (auto &In : *Header) {
2075 PHINode *PN = dyn_cast<PHINode>(&In);
2078 int bx = PN->getBasicBlockIndex(Preheader);
2080 PN->setIncomingBlock(bx, NewPreheader);
2082 DT->addNewBlock(NewPreheader, Preheader);
2083 DT->changeImmediateDominator(Header, NewPreheader);
2085 // Check for safe conditions to execute memmove.
2086 // If stride is positive, copying things from higher to lower addresses
2087 // is equivalent to memmove. For negative stride, it's the other way
2088 // around. Copying forward in memory with positive stride may not be
2089 // same as memmove since we may be copying values that we just stored
2090 // in some previous iteration.
2091 Value *LA = Builder.CreatePtrToInt(LoadBasePtr, IntPtrTy);
2092 Value *SA = Builder.CreatePtrToInt(StoreBasePtr, IntPtrTy);
2093 Value *LowA = StridePos ? SA : LA;
2094 Value *HighA = StridePos ? LA : SA;
2095 Value *CmpA = Builder.CreateICmpULT(LowA, HighA);
2098 // Check for distance between pointers.
2099 Value *Dist = Builder.CreateSub(HighA, LowA);
2100 Value *CmpD = Builder.CreateICmpSLT(NumBytes, Dist);
2101 Value *CmpEither = Builder.CreateOr(Cond, CmpD);
2104 if (Threshold != 0) {
2105 Type *Ty = NumBytes->getType();
2106 Value *Thr = ConstantInt::get(Ty, Threshold);
2107 Value *CmpB = Builder.CreateICmpULT(Thr, NumBytes);
2108 Value *CmpBoth = Builder.CreateAnd(Cond, CmpB);
2111 BasicBlock *MemmoveB = BasicBlock::Create(Ctx, Header->getName()+".rtli",
2112 Func, NewPreheader);
2114 ParentL->addBasicBlockToLoop(MemmoveB, *LF);
2115 Instruction *OldT = Preheader->getTerminator();
2116 Builder.CreateCondBr(Cond, MemmoveB, NewPreheader);
2117 OldT->eraseFromParent();
2118 Preheader->setName(Preheader->getName()+".old");
2119 DT->addNewBlock(MemmoveB, Preheader);
2120 // Find the new immediate dominator of the exit block.
2121 BasicBlock *ExitD = Preheader;
2122 for (auto PI = pred_begin(ExitB), PE = pred_end(ExitB); PI != PE; ++PI) {
2123 BasicBlock *PB = *PI;
2124 ExitD = DT->findNearestCommonDominator(ExitD, PB);
2128 // If the prior immediate dominator of ExitB was dominated by the
2129 // old preheader, then the old preheader becomes the new immediate
2130 // dominator. Otherwise don't change anything (because the newly
2131 // added blocks are dominated by the old preheader).
2132 if (ExitD && DT->dominates(Preheader, ExitD)) {
2133 DomTreeNode *BN = DT->getNode(ExitB);
2134 DomTreeNode *DN = DT->getNode(ExitD);
2138 // Add a call to memmove to the conditional block.
2139 IRBuilder<> CondBuilder(MemmoveB);
2140 CondBuilder.CreateBr(ExitB);
2141 CondBuilder.SetInsertPoint(MemmoveB->getTerminator());
2144 Type *Int32Ty = Type::getInt32Ty(Ctx);
2145 Type *Int32PtrTy = Type::getInt32PtrTy(Ctx);
2146 Type *VoidTy = Type::getVoidTy(Ctx);
2147 Module *M = Func->getParent();
2148 Constant *CF = M->getOrInsertFunction(HexagonVolatileMemcpyName, VoidTy,
2149 Int32PtrTy, Int32PtrTy, Int32Ty);
2150 Function *Fn = cast<Function>(CF);
2151 Fn->setLinkage(Function::ExternalLinkage);
2153 const SCEV *OneS = SE->getConstant(Int32Ty, 1);
2154 const SCEV *BECount32 = SE->getTruncateOrZeroExtend(BECount, Int32Ty);
2155 const SCEV *NumWordsS = SE->getAddExpr(BECount32, OneS, SCEV::FlagNUW);
2156 Value *NumWords = Expander.expandCodeFor(NumWordsS, Int32Ty,
2157 MemmoveB->getTerminator());
2158 if (Instruction *In = dyn_cast<Instruction>(NumWords))
2159 if (Value *Simp = SimplifyInstruction(In, {*DL, TLI, DT}))
2162 Value *Op0 = (StoreBasePtr->getType() == Int32PtrTy)
2164 : CondBuilder.CreateBitCast(StoreBasePtr, Int32PtrTy);
2165 Value *Op1 = (LoadBasePtr->getType() == Int32PtrTy)
2167 : CondBuilder.CreateBitCast(LoadBasePtr, Int32PtrTy);
2168 NewCall = CondBuilder.CreateCall(Fn, {Op0, Op1, NumWords});
2170 NewCall = CondBuilder.CreateMemMove(StoreBasePtr, LoadBasePtr,
2171 NumBytes, Alignment);
2174 NewCall = Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr,
2175 NumBytes, Alignment);
2176 // Okay, the memcpy has been formed. Zap the original store and
2177 // anything that feeds into it.
2178 RecursivelyDeleteTriviallyDeadInstructions(SI, TLI);
2181 NewCall->setDebugLoc(DLoc);
2183 DEBUG(dbgs() << " Formed " << (Overlap ? "memmove: " : "memcpy: ")
2185 << " from load ptr=" << *LoadEv << " at: " << *LI << "\n"
2186 << " from store ptr=" << *StoreEv << " at: " << *SI << "\n");
2192 // \brief Check if the instructions in Insts, together with their dependencies
2193 // cover the loop in the sense that the loop could be safely eliminated once
2194 // the instructions in Insts are removed.
2195 bool HexagonLoopIdiomRecognize::coverLoop(Loop *L,
2196 SmallVectorImpl<Instruction*> &Insts) const {
2197 SmallSet<BasicBlock*,8> LoopBlocks;
2198 for (auto *B : L->blocks())
2199 LoopBlocks.insert(B);
2201 SetVector<Instruction*> Worklist(Insts.begin(), Insts.end());
2203 // Collect all instructions from the loop that the instructions in Insts
2204 // depend on (plus their dependencies, etc.). These instructions will
2205 // constitute the expression trees that feed those in Insts, but the trees
2206 // will be limited only to instructions contained in the loop.
2207 for (unsigned i = 0; i < Worklist.size(); ++i) {
2208 Instruction *In = Worklist[i];
2209 for (auto I = In->op_begin(), E = In->op_end(); I != E; ++I) {
2210 Instruction *OpI = dyn_cast<Instruction>(I);
2213 BasicBlock *PB = OpI->getParent();
2214 if (!LoopBlocks.count(PB))
2216 Worklist.insert(OpI);
2220 // Scan all instructions in the loop, if any of them have a user outside
2221 // of the loop, or outside of the expressions collected above, then either
2222 // the loop has a side-effect visible outside of it, or there are
2223 // instructions in it that are not involved in the original set Insts.
2224 for (auto *B : L->blocks()) {
2225 for (auto &In : *B) {
2226 if (isa<BranchInst>(In) || isa<DbgInfoIntrinsic>(In))
2228 if (!Worklist.count(&In) && In.mayHaveSideEffects())
2230 for (const auto &K : In.users()) {
2231 Instruction *UseI = dyn_cast<Instruction>(K);
2234 BasicBlock *UseB = UseI->getParent();
2235 if (LF->getLoopFor(UseB) != L)
2244 /// runOnLoopBlock - Process the specified block, which lives in a counted loop
2245 /// with the specified backedge count. This block is known to be in the current
2246 /// loop and not in any subloops.
2247 bool HexagonLoopIdiomRecognize::runOnLoopBlock(Loop *CurLoop, BasicBlock *BB,
2248 const SCEV *BECount, SmallVectorImpl<BasicBlock*> &ExitBlocks) {
2249 // We can only promote stores in this block if they are unconditionally
2250 // executed in the loop. For a block to be unconditionally executed, it has
2251 // to dominate all the exit blocks of the loop. Verify this now.
2252 auto DominatedByBB = [this,BB] (BasicBlock *EB) -> bool {
2253 return DT->dominates(BB, EB);
2255 if (!std::all_of(ExitBlocks.begin(), ExitBlocks.end(), DominatedByBB))
2258 bool MadeChange = false;
2259 // Look for store instructions, which may be optimized to memset/memcpy.
2260 SmallVector<StoreInst*,8> Stores;
2261 collectStores(CurLoop, BB, Stores);
2263 // Optimize the store into a memcpy, if it feeds an similarly strided load.
2264 for (auto &SI : Stores)
2265 MadeChange |= processCopyingStore(CurLoop, SI, BECount);
2271 bool HexagonLoopIdiomRecognize::runOnCountableLoop(Loop *L) {
2272 PolynomialMultiplyRecognize PMR(L, *DL, *DT, *TLI, *SE);
2273 if (PMR.recognize())
2276 if (!HasMemcpy && !HasMemmove)
2279 const SCEV *BECount = SE->getBackedgeTakenCount(L);
2280 assert(!isa<SCEVCouldNotCompute>(BECount) &&
2281 "runOnCountableLoop() called on a loop without a predictable"
2282 "backedge-taken count");
2284 SmallVector<BasicBlock *, 8> ExitBlocks;
2285 L->getUniqueExitBlocks(ExitBlocks);
2287 bool Changed = false;
2289 // Scan all the blocks in the loop that are not in subloops.
2290 for (auto *BB : L->getBlocks()) {
2291 // Ignore blocks in subloops.
2292 if (LF->getLoopFor(BB) != L)
2294 Changed |= runOnLoopBlock(L, BB, BECount, ExitBlocks);
2301 bool HexagonLoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) {
2302 const Module &M = *L->getHeader()->getParent()->getParent();
2303 if (Triple(M.getTargetTriple()).getArch() != Triple::hexagon)
2309 // If the loop could not be converted to canonical form, it must have an
2310 // indirectbr in it, just give up.
2311 if (!L->getLoopPreheader())
2314 // Disable loop idiom recognition if the function's name is a common idiom.
2315 StringRef Name = L->getHeader()->getParent()->getName();
2316 if (Name == "memset" || Name == "memcpy" || Name == "memmove")
2319 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2320 DL = &L->getHeader()->getModule()->getDataLayout();
2321 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2322 LF = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2323 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
2324 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2326 HasMemcpy = TLI->has(LibFunc_memcpy);
2327 HasMemmove = TLI->has(LibFunc_memmove);
2329 if (SE->hasLoopInvariantBackedgeTakenCount(L))
2330 return runOnCountableLoop(L);
2335 Pass *llvm::createHexagonLoopIdiomPass() {
2336 return new HexagonLoopIdiomRecognize();