1 //===- LoopIdiomRecognize.cpp - Loop idiom recognition --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass implements an idiom recognizer that transforms simple loops into a
11 // non-loop form. In cases that this kicks in, it can be a significant
14 // If compiling for code size we avoid idiom recognition if the resulting
15 // code could be larger than the code for the original loop. One way this could
16 // happen is if the loop is not removable after idiom recognition due to the
17 // presence of non-idiom instructions. The initial implementation of the
18 // heuristics applies to idioms in multi-block loops.
20 //===----------------------------------------------------------------------===//
24 // Future loop memory idioms to recognize:
25 // memcmp, memmove, strlen, etc.
26 // Future floating point idioms to recognize in -ffast-math mode:
28 // Future integer operation idioms to recognize:
31 // Beware that isel's default lowering for ctpop is highly inefficient for
32 // i64 and larger types when i64 is legal and the value has few bits set. It
33 // would be good to enhance isel to emit a loop for ctpop in this case.
35 // This could recognize common matrix multiplies and dot product idioms and
36 // replace them with calls to BLAS (if linked in??).
38 //===----------------------------------------------------------------------===//
40 #include "llvm/Transforms/Scalar/LoopIdiomRecognize.h"
41 #include "llvm/ADT/APInt.h"
42 #include "llvm/ADT/ArrayRef.h"
43 #include "llvm/ADT/DenseMap.h"
44 #include "llvm/ADT/MapVector.h"
45 #include "llvm/ADT/SetVector.h"
46 #include "llvm/ADT/SmallPtrSet.h"
47 #include "llvm/ADT/SmallVector.h"
48 #include "llvm/ADT/Statistic.h"
49 #include "llvm/ADT/StringRef.h"
50 #include "llvm/Analysis/AliasAnalysis.h"
51 #include "llvm/Analysis/LoopAccessAnalysis.h"
52 #include "llvm/Analysis/LoopInfo.h"
53 #include "llvm/Analysis/LoopPass.h"
54 #include "llvm/Analysis/MemoryLocation.h"
55 #include "llvm/Analysis/ScalarEvolution.h"
56 #include "llvm/Analysis/ScalarEvolutionExpander.h"
57 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
58 #include "llvm/Analysis/TargetLibraryInfo.h"
59 #include "llvm/Analysis/TargetTransformInfo.h"
60 #include "llvm/Analysis/ValueTracking.h"
61 #include "llvm/IR/Attributes.h"
62 #include "llvm/IR/BasicBlock.h"
63 #include "llvm/IR/Constant.h"
64 #include "llvm/IR/Constants.h"
65 #include "llvm/IR/DataLayout.h"
66 #include "llvm/IR/DebugLoc.h"
67 #include "llvm/IR/DerivedTypes.h"
68 #include "llvm/IR/Dominators.h"
69 #include "llvm/IR/GlobalValue.h"
70 #include "llvm/IR/GlobalVariable.h"
71 #include "llvm/IR/IRBuilder.h"
72 #include "llvm/IR/InstrTypes.h"
73 #include "llvm/IR/Instruction.h"
74 #include "llvm/IR/Instructions.h"
75 #include "llvm/IR/IntrinsicInst.h"
76 #include "llvm/IR/Intrinsics.h"
77 #include "llvm/IR/LLVMContext.h"
78 #include "llvm/IR/Module.h"
79 #include "llvm/IR/PassManager.h"
80 #include "llvm/IR/Type.h"
81 #include "llvm/IR/User.h"
82 #include "llvm/IR/Value.h"
83 #include "llvm/IR/ValueHandle.h"
84 #include "llvm/Pass.h"
85 #include "llvm/Support/Casting.h"
86 #include "llvm/Support/CommandLine.h"
87 #include "llvm/Support/Debug.h"
88 #include "llvm/Support/raw_ostream.h"
89 #include "llvm/Transforms/Scalar.h"
90 #include "llvm/Transforms/Utils/BuildLibCalls.h"
91 #include "llvm/Transforms/Utils/Local.h"
92 #include "llvm/Transforms/Utils/LoopUtils.h"
101 #define DEBUG_TYPE "loop-idiom"
103 STATISTIC(NumMemSet, "Number of memset's formed from loop stores");
104 STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores");
106 static cl::opt<bool> UseLIRCodeSizeHeurs(
107 "use-lir-code-size-heurs",
108 cl::desc("Use loop idiom recognition code size heuristics when compiling"
110 cl::init(true), cl::Hidden);
114 class LoopIdiomRecognize {
115 Loop *CurLoop = nullptr;
120 TargetLibraryInfo *TLI;
121 const TargetTransformInfo *TTI;
122 const DataLayout *DL;
123 bool ApplyCodeSizeHeuristics;
126 explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT,
127 LoopInfo *LI, ScalarEvolution *SE,
128 TargetLibraryInfo *TLI,
129 const TargetTransformInfo *TTI,
130 const DataLayout *DL)
131 : AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL) {}
133 bool runOnLoop(Loop *L);
136 using StoreList = SmallVector<StoreInst *, 8>;
137 using StoreListMap = MapVector<Value *, StoreList>;
139 StoreListMap StoreRefsForMemset;
140 StoreListMap StoreRefsForMemsetPattern;
141 StoreList StoreRefsForMemcpy;
143 bool HasMemsetPattern;
146 /// Return code for isLegalStore()
147 enum LegalStoreKind {
152 UnorderedAtomicMemcpy,
153 DontUse // Dummy retval never to be used. Allows catching errors in retval
157 /// \name Countable Loop Idiom Handling
160 bool runOnCountableLoop();
161 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
162 SmallVectorImpl<BasicBlock *> &ExitBlocks);
164 void collectStores(BasicBlock *BB);
165 LegalStoreKind isLegalStore(StoreInst *SI);
166 bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount,
168 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount);
170 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
171 unsigned StoreAlignment, Value *StoredVal,
172 Instruction *TheStore,
173 SmallPtrSetImpl<Instruction *> &Stores,
174 const SCEVAddRecExpr *Ev, const SCEV *BECount,
175 bool NegStride, bool IsLoopMemset = false);
176 bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount);
177 bool avoidLIRForMultiBlockLoop(bool IsMemset = false,
178 bool IsLoopMemset = false);
181 /// \name Noncountable Loop Idiom Handling
184 bool runOnNoncountableLoop();
186 bool recognizePopcount();
187 void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst,
188 PHINode *CntPhi, Value *Var);
189 bool recognizeAndInsertCTLZ();
190 void transformLoopToCountable(BasicBlock *PreCondBB, Instruction *CntInst,
191 PHINode *CntPhi, Value *Var, const DebugLoc DL,
192 bool ZeroCheck, bool IsCntPhiUsedOutsideLoop);
197 class LoopIdiomRecognizeLegacyPass : public LoopPass {
201 explicit LoopIdiomRecognizeLegacyPass() : LoopPass(ID) {
202 initializeLoopIdiomRecognizeLegacyPassPass(
203 *PassRegistry::getPassRegistry());
206 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
210 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
211 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
212 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
213 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
214 TargetLibraryInfo *TLI =
215 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
216 const TargetTransformInfo *TTI =
217 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
218 *L->getHeader()->getParent());
219 const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout();
221 LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, DL);
222 return LIR.runOnLoop(L);
225 /// This transformation requires natural loop information & requires that
226 /// loop preheaders be inserted into the CFG.
227 void getAnalysisUsage(AnalysisUsage &AU) const override {
228 AU.addRequired<TargetLibraryInfoWrapperPass>();
229 AU.addRequired<TargetTransformInfoWrapperPass>();
230 getLoopAnalysisUsage(AU);
234 } // end anonymous namespace
236 char LoopIdiomRecognizeLegacyPass::ID = 0;
238 PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM,
239 LoopStandardAnalysisResults &AR,
241 const auto *DL = &L.getHeader()->getModule()->getDataLayout();
243 LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI, DL);
244 if (!LIR.runOnLoop(&L))
245 return PreservedAnalyses::all();
247 return getLoopPassPreservedAnalyses();
250 INITIALIZE_PASS_BEGIN(LoopIdiomRecognizeLegacyPass, "loop-idiom",
251 "Recognize loop idioms", false, false)
252 INITIALIZE_PASS_DEPENDENCY(LoopPass)
253 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
254 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
255 INITIALIZE_PASS_END(LoopIdiomRecognizeLegacyPass, "loop-idiom",
256 "Recognize loop idioms", false, false)
258 Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognizeLegacyPass(); }
260 static void deleteDeadInstruction(Instruction *I) {
261 I->replaceAllUsesWith(UndefValue::get(I->getType()));
262 I->eraseFromParent();
265 //===----------------------------------------------------------------------===//
267 // Implementation of LoopIdiomRecognize
269 //===----------------------------------------------------------------------===//
271 bool LoopIdiomRecognize::runOnLoop(Loop *L) {
273 // If the loop could not be converted to canonical form, it must have an
274 // indirectbr in it, just give up.
275 if (!L->getLoopPreheader())
278 // Disable loop idiom recognition if the function's name is a common idiom.
279 StringRef Name = L->getHeader()->getParent()->getName();
280 if (Name == "memset" || Name == "memcpy")
283 // Determine if code size heuristics need to be applied.
284 ApplyCodeSizeHeuristics =
285 L->getHeader()->getParent()->optForSize() && UseLIRCodeSizeHeurs;
287 HasMemset = TLI->has(LibFunc_memset);
288 HasMemsetPattern = TLI->has(LibFunc_memset_pattern16);
289 HasMemcpy = TLI->has(LibFunc_memcpy);
291 if (HasMemset || HasMemsetPattern || HasMemcpy)
292 if (SE->hasLoopInvariantBackedgeTakenCount(L))
293 return runOnCountableLoop();
295 return runOnNoncountableLoop();
298 bool LoopIdiomRecognize::runOnCountableLoop() {
299 const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop);
300 assert(!isa<SCEVCouldNotCompute>(BECount) &&
301 "runOnCountableLoop() called on a loop without a predictable"
302 "backedge-taken count");
304 // If this loop executes exactly one time, then it should be peeled, not
305 // optimized by this pass.
306 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
307 if (BECst->getAPInt() == 0)
310 SmallVector<BasicBlock *, 8> ExitBlocks;
311 CurLoop->getUniqueExitBlocks(ExitBlocks);
313 DEBUG(dbgs() << "loop-idiom Scanning: F["
314 << CurLoop->getHeader()->getParent()->getName() << "] Loop %"
315 << CurLoop->getHeader()->getName() << "\n");
317 bool MadeChange = false;
319 // The following transforms hoist stores/memsets into the loop pre-header.
320 // Give up if the loop has instructions may throw.
321 LoopSafetyInfo SafetyInfo;
322 computeLoopSafetyInfo(&SafetyInfo, CurLoop);
323 if (SafetyInfo.MayThrow)
326 // Scan all the blocks in the loop that are not in subloops.
327 for (auto *BB : CurLoop->getBlocks()) {
328 // Ignore blocks in subloops.
329 if (LI->getLoopFor(BB) != CurLoop)
332 MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks);
337 static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) {
338 const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1));
339 return ConstStride->getAPInt();
342 /// getMemSetPatternValue - If a strided store of the specified value is safe to
343 /// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should
344 /// be passed in. Otherwise, return null.
346 /// Note that we don't ever attempt to use memset_pattern8 or 4, because these
347 /// just replicate their input array and then pass on to memset_pattern16.
348 static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) {
349 // If the value isn't a constant, we can't promote it to being in a constant
350 // array. We could theoretically do a store to an alloca or something, but
351 // that doesn't seem worthwhile.
352 Constant *C = dyn_cast<Constant>(V);
356 // Only handle simple values that are a power of two bytes in size.
357 uint64_t Size = DL->getTypeSizeInBits(V->getType());
358 if (Size == 0 || (Size & 7) || (Size & (Size - 1)))
361 // Don't care enough about darwin/ppc to implement this.
362 if (DL->isBigEndian())
365 // Convert to size in bytes.
368 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
369 // if the top and bottom are the same (e.g. for vectors and large integers).
373 // If the constant is exactly 16 bytes, just use it.
377 // Otherwise, we'll use an array of the constants.
378 unsigned ArraySize = 16 / Size;
379 ArrayType *AT = ArrayType::get(V->getType(), ArraySize);
380 return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C));
383 LoopIdiomRecognize::LegalStoreKind
384 LoopIdiomRecognize::isLegalStore(StoreInst *SI) {
385 // Don't touch volatile stores.
386 if (SI->isVolatile())
387 return LegalStoreKind::None;
388 // We only want simple or unordered-atomic stores.
389 if (!SI->isUnordered())
390 return LegalStoreKind::None;
392 // Don't convert stores of non-integral pointer types to memsets (which stores
394 if (DL->isNonIntegralPointerType(SI->getValueOperand()->getType()))
395 return LegalStoreKind::None;
397 // Avoid merging nontemporal stores.
398 if (SI->getMetadata(LLVMContext::MD_nontemporal))
399 return LegalStoreKind::None;
401 Value *StoredVal = SI->getValueOperand();
402 Value *StorePtr = SI->getPointerOperand();
404 // Reject stores that are so large that they overflow an unsigned.
405 uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
406 if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
407 return LegalStoreKind::None;
409 // See if the pointer expression is an AddRec like {base,+,1} on the current
410 // loop, which indicates a strided store. If we have something else, it's a
411 // random store we can't handle.
412 const SCEVAddRecExpr *StoreEv =
413 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
414 if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
415 return LegalStoreKind::None;
417 // Check to see if we have a constant stride.
418 if (!isa<SCEVConstant>(StoreEv->getOperand(1)))
419 return LegalStoreKind::None;
421 // See if the store can be turned into a memset.
423 // If the stored value is a byte-wise value (like i32 -1), then it may be
424 // turned into a memset of i8 -1, assuming that all the consecutive bytes
425 // are stored. A store of i32 0x01020304 can never be turned into a memset,
426 // but it can be turned into memset_pattern if the target supports it.
427 Value *SplatValue = isBytewiseValue(StoredVal);
428 Constant *PatternValue = nullptr;
430 // Note: memset and memset_pattern on unordered-atomic is yet not supported
431 bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple();
433 // If we're allowed to form a memset, and the stored value would be
434 // acceptable for memset, use it.
435 if (!UnorderedAtomic && HasMemset && SplatValue &&
436 // Verify that the stored value is loop invariant. If not, we can't
437 // promote the memset.
438 CurLoop->isLoopInvariant(SplatValue)) {
439 // It looks like we can use SplatValue.
440 return LegalStoreKind::Memset;
441 } else if (!UnorderedAtomic && HasMemsetPattern &&
442 // Don't create memset_pattern16s with address spaces.
443 StorePtr->getType()->getPointerAddressSpace() == 0 &&
444 (PatternValue = getMemSetPatternValue(StoredVal, DL))) {
445 // It looks like we can use PatternValue!
446 return LegalStoreKind::MemsetPattern;
449 // Otherwise, see if the store can be turned into a memcpy.
451 // Check to see if the stride matches the size of the store. If so, then we
452 // know that every byte is touched in the loop.
453 APInt Stride = getStoreStride(StoreEv);
454 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
455 if (StoreSize != Stride && StoreSize != -Stride)
456 return LegalStoreKind::None;
458 // The store must be feeding a non-volatile load.
459 LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand());
461 // Only allow non-volatile loads
462 if (!LI || LI->isVolatile())
463 return LegalStoreKind::None;
464 // Only allow simple or unordered-atomic loads
465 if (!LI->isUnordered())
466 return LegalStoreKind::None;
468 // See if the pointer expression is an AddRec like {base,+,1} on the current
469 // loop, which indicates a strided load. If we have something else, it's a
470 // random load we can't handle.
471 const SCEVAddRecExpr *LoadEv =
472 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
473 if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine())
474 return LegalStoreKind::None;
476 // The store and load must share the same stride.
477 if (StoreEv->getOperand(1) != LoadEv->getOperand(1))
478 return LegalStoreKind::None;
480 // Success. This store can be converted into a memcpy.
481 UnorderedAtomic = UnorderedAtomic || LI->isAtomic();
482 return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy
483 : LegalStoreKind::Memcpy;
485 // This store can't be transformed into a memset/memcpy.
486 return LegalStoreKind::None;
489 void LoopIdiomRecognize::collectStores(BasicBlock *BB) {
490 StoreRefsForMemset.clear();
491 StoreRefsForMemsetPattern.clear();
492 StoreRefsForMemcpy.clear();
493 for (Instruction &I : *BB) {
494 StoreInst *SI = dyn_cast<StoreInst>(&I);
498 // Make sure this is a strided store with a constant stride.
499 switch (isLegalStore(SI)) {
500 case LegalStoreKind::None:
503 case LegalStoreKind::Memset: {
504 // Find the base pointer.
505 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
506 StoreRefsForMemset[Ptr].push_back(SI);
508 case LegalStoreKind::MemsetPattern: {
509 // Find the base pointer.
510 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
511 StoreRefsForMemsetPattern[Ptr].push_back(SI);
513 case LegalStoreKind::Memcpy:
514 case LegalStoreKind::UnorderedAtomicMemcpy:
515 StoreRefsForMemcpy.push_back(SI);
518 assert(false && "unhandled return value");
524 /// runOnLoopBlock - Process the specified block, which lives in a counted loop
525 /// with the specified backedge count. This block is known to be in the current
526 /// loop and not in any subloops.
527 bool LoopIdiomRecognize::runOnLoopBlock(
528 BasicBlock *BB, const SCEV *BECount,
529 SmallVectorImpl<BasicBlock *> &ExitBlocks) {
530 // We can only promote stores in this block if they are unconditionally
531 // executed in the loop. For a block to be unconditionally executed, it has
532 // to dominate all the exit blocks of the loop. Verify this now.
533 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
534 if (!DT->dominates(BB, ExitBlocks[i]))
537 bool MadeChange = false;
538 // Look for store instructions, which may be optimized to memset/memcpy.
541 // Look for a single store or sets of stores with a common base, which can be
542 // optimized into a memset (memset_pattern). The latter most commonly happens
543 // with structs and handunrolled loops.
544 for (auto &SL : StoreRefsForMemset)
545 MadeChange |= processLoopStores(SL.second, BECount, true);
547 for (auto &SL : StoreRefsForMemsetPattern)
548 MadeChange |= processLoopStores(SL.second, BECount, false);
550 // Optimize the store into a memcpy, if it feeds an similarly strided load.
551 for (auto &SI : StoreRefsForMemcpy)
552 MadeChange |= processLoopStoreOfLoopLoad(SI, BECount);
554 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
555 Instruction *Inst = &*I++;
556 // Look for memset instructions, which may be optimized to a larger memset.
557 if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
558 WeakTrackingVH InstPtr(&*I);
559 if (!processLoopMemSet(MSI, BECount))
563 // If processing the memset invalidated our iterator, start over from the
574 /// processLoopStores - See if this store(s) can be promoted to a memset.
575 bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL,
578 // Try to find consecutive stores that can be transformed into memsets.
579 SetVector<StoreInst *> Heads, Tails;
580 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
582 // Do a quadratic search on all of the given stores and find
583 // all of the pairs of stores that follow each other.
584 SmallVector<unsigned, 16> IndexQueue;
585 for (unsigned i = 0, e = SL.size(); i < e; ++i) {
586 assert(SL[i]->isSimple() && "Expected only non-volatile stores.");
588 Value *FirstStoredVal = SL[i]->getValueOperand();
589 Value *FirstStorePtr = SL[i]->getPointerOperand();
590 const SCEVAddRecExpr *FirstStoreEv =
591 cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr));
592 APInt FirstStride = getStoreStride(FirstStoreEv);
593 unsigned FirstStoreSize = DL->getTypeStoreSize(SL[i]->getValueOperand()->getType());
595 // See if we can optimize just this store in isolation.
596 if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) {
601 Value *FirstSplatValue = nullptr;
602 Constant *FirstPatternValue = nullptr;
605 FirstSplatValue = isBytewiseValue(FirstStoredVal);
607 FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL);
609 assert((FirstSplatValue || FirstPatternValue) &&
610 "Expected either splat value or pattern value.");
613 // If a store has multiple consecutive store candidates, search Stores
614 // array according to the sequence: from i+1 to e, then from i-1 to 0.
615 // This is because usually pairing with immediate succeeding or preceding
616 // candidate create the best chance to find memset opportunity.
618 for (j = i + 1; j < e; ++j)
619 IndexQueue.push_back(j);
620 for (j = i; j > 0; --j)
621 IndexQueue.push_back(j - 1);
623 for (auto &k : IndexQueue) {
624 assert(SL[k]->isSimple() && "Expected only non-volatile stores.");
625 Value *SecondStorePtr = SL[k]->getPointerOperand();
626 const SCEVAddRecExpr *SecondStoreEv =
627 cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr));
628 APInt SecondStride = getStoreStride(SecondStoreEv);
630 if (FirstStride != SecondStride)
633 Value *SecondStoredVal = SL[k]->getValueOperand();
634 Value *SecondSplatValue = nullptr;
635 Constant *SecondPatternValue = nullptr;
638 SecondSplatValue = isBytewiseValue(SecondStoredVal);
640 SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL);
642 assert((SecondSplatValue || SecondPatternValue) &&
643 "Expected either splat value or pattern value.");
645 if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) {
647 if (FirstSplatValue != SecondSplatValue)
650 if (FirstPatternValue != SecondPatternValue)
655 ConsecutiveChain[SL[i]] = SL[k];
661 // We may run into multiple chains that merge into a single chain. We mark the
662 // stores that we transformed so that we don't visit the same store twice.
663 SmallPtrSet<Value *, 16> TransformedStores;
664 bool Changed = false;
666 // For stores that start but don't end a link in the chain:
667 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end();
669 if (Tails.count(*it))
672 // We found a store instr that starts a chain. Now follow the chain and try
674 SmallPtrSet<Instruction *, 8> AdjacentStores;
677 StoreInst *HeadStore = I;
678 unsigned StoreSize = 0;
680 // Collect the chain into a list.
681 while (Tails.count(I) || Heads.count(I)) {
682 if (TransformedStores.count(I))
684 AdjacentStores.insert(I);
686 StoreSize += DL->getTypeStoreSize(I->getValueOperand()->getType());
687 // Move to the next value in the chain.
688 I = ConsecutiveChain[I];
691 Value *StoredVal = HeadStore->getValueOperand();
692 Value *StorePtr = HeadStore->getPointerOperand();
693 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
694 APInt Stride = getStoreStride(StoreEv);
696 // Check to see if the stride matches the size of the stores. If so, then
697 // we know that every byte is touched in the loop.
698 if (StoreSize != Stride && StoreSize != -Stride)
701 bool NegStride = StoreSize == -Stride;
703 if (processLoopStridedStore(StorePtr, StoreSize, HeadStore->getAlignment(),
704 StoredVal, HeadStore, AdjacentStores, StoreEv,
705 BECount, NegStride)) {
706 TransformedStores.insert(AdjacentStores.begin(), AdjacentStores.end());
714 /// processLoopMemSet - See if this memset can be promoted to a large memset.
715 bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
716 const SCEV *BECount) {
717 // We can only handle non-volatile memsets with a constant size.
718 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
721 // If we're not allowed to hack on memset, we fail.
725 Value *Pointer = MSI->getDest();
727 // See if the pointer expression is an AddRec like {base,+,1} on the current
728 // loop, which indicates a strided store. If we have something else, it's a
729 // random store we can't handle.
730 const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
731 if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine())
734 // Reject memsets that are so large that they overflow an unsigned.
735 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
736 if ((SizeInBytes >> 32) != 0)
739 // Check to see if the stride matches the size of the memset. If so, then we
740 // know that every byte is touched in the loop.
741 const SCEVConstant *ConstStride = dyn_cast<SCEVConstant>(Ev->getOperand(1));
745 APInt Stride = ConstStride->getAPInt();
746 if (SizeInBytes != Stride && SizeInBytes != -Stride)
749 // Verify that the memset value is loop invariant. If not, we can't promote
751 Value *SplatValue = MSI->getValue();
752 if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue))
755 SmallPtrSet<Instruction *, 1> MSIs;
757 bool NegStride = SizeInBytes == -Stride;
758 return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
759 MSI->getAlignment(), SplatValue, MSI, MSIs, Ev,
760 BECount, NegStride, /*IsLoopMemset=*/true);
763 /// mayLoopAccessLocation - Return true if the specified loop might access the
764 /// specified pointer location, which is a loop-strided access. The 'Access'
765 /// argument specifies what the verboten forms of access are (read or write).
767 mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
768 const SCEV *BECount, unsigned StoreSize,
770 SmallPtrSetImpl<Instruction *> &IgnoredStores) {
771 // Get the location that may be stored across the loop. Since the access is
772 // strided positively through memory, we say that the modified location starts
773 // at the pointer and has infinite size.
774 uint64_t AccessSize = MemoryLocation::UnknownSize;
776 // If the loop iterates a fixed number of times, we can refine the access size
777 // to be exactly the size of the memset, which is (BECount+1)*StoreSize
778 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
779 AccessSize = (BECst->getValue()->getZExtValue() + 1) * StoreSize;
781 // TODO: For this to be really effective, we have to dive into the pointer
782 // operand in the store. Store to &A[i] of 100 will always return may alias
783 // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
784 // which will then no-alias a store to &A[100].
785 MemoryLocation StoreLoc(Ptr, AccessSize);
787 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
789 for (Instruction &I : **BI)
790 if (IgnoredStores.count(&I) == 0 &&
792 intersectModRef(AA.getModRefInfo(&I, StoreLoc), Access)))
798 // If we have a negative stride, Start refers to the end of the memory location
799 // we're trying to memset. Therefore, we need to recompute the base pointer,
800 // which is just Start - BECount*Size.
801 static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount,
802 Type *IntPtr, unsigned StoreSize,
803 ScalarEvolution *SE) {
804 const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr);
806 Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize),
808 return SE->getMinusSCEV(Start, Index);
811 /// Compute the number of bytes as a SCEV from the backedge taken count.
813 /// This also maps the SCEV into the provided type and tries to handle the
814 /// computation in a way that will fold cleanly.
815 static const SCEV *getNumBytes(const SCEV *BECount, Type *IntPtr,
816 unsigned StoreSize, Loop *CurLoop,
817 const DataLayout *DL, ScalarEvolution *SE) {
818 const SCEV *NumBytesS;
819 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
820 // pointer size if it isn't already.
822 // If we're going to need to zero extend the BE count, check if we can add
823 // one to it prior to zero extending without overflow. Provided this is safe,
824 // it allows better simplification of the +1.
825 if (DL->getTypeSizeInBits(BECount->getType()) <
826 DL->getTypeSizeInBits(IntPtr) &&
827 SE->isLoopEntryGuardedByCond(
828 CurLoop, ICmpInst::ICMP_NE, BECount,
829 SE->getNegativeSCEV(SE->getOne(BECount->getType())))) {
830 NumBytesS = SE->getZeroExtendExpr(
831 SE->getAddExpr(BECount, SE->getOne(BECount->getType()), SCEV::FlagNUW),
834 NumBytesS = SE->getAddExpr(SE->getTruncateOrZeroExtend(BECount, IntPtr),
835 SE->getOne(IntPtr), SCEV::FlagNUW);
838 // And scale it based on the store size.
839 if (StoreSize != 1) {
840 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
846 /// processLoopStridedStore - We see a strided store of some value. If we can
847 /// transform this into a memset or memset_pattern in the loop preheader, do so.
848 bool LoopIdiomRecognize::processLoopStridedStore(
849 Value *DestPtr, unsigned StoreSize, unsigned StoreAlignment,
850 Value *StoredVal, Instruction *TheStore,
851 SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev,
852 const SCEV *BECount, bool NegStride, bool IsLoopMemset) {
853 Value *SplatValue = isBytewiseValue(StoredVal);
854 Constant *PatternValue = nullptr;
857 PatternValue = getMemSetPatternValue(StoredVal, DL);
859 assert((SplatValue || PatternValue) &&
860 "Expected either splat value or pattern value.");
862 // The trip count of the loop and the base pointer of the addrec SCEV is
863 // guaranteed to be loop invariant, which means that it should dominate the
864 // header. This allows us to insert code for it in the preheader.
865 unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
866 BasicBlock *Preheader = CurLoop->getLoopPreheader();
867 IRBuilder<> Builder(Preheader->getTerminator());
868 SCEVExpander Expander(*SE, *DL, "loop-idiom");
870 Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
871 Type *IntPtr = Builder.getIntPtrTy(*DL, DestAS);
873 const SCEV *Start = Ev->getStart();
874 // Handle negative strided loops.
876 Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE);
878 // TODO: ideally we should still be able to generate memset if SCEV expander
879 // is taught to generate the dependencies at the latest point.
880 if (!isSafeToExpand(Start, *SE))
883 // Okay, we have a strided store "p[i]" of a splattable value. We can turn
884 // this into a memset in the loop preheader now if we want. However, this
885 // would be unsafe to do if there is anything else in the loop that may read
886 // or write to the aliased location. Check for any overlap by generating the
887 // base pointer and checking the region.
889 Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator());
890 if (mayLoopAccessLocation(BasePtr, ModRefInfo::ModRef, CurLoop, BECount,
891 StoreSize, *AA, Stores)) {
893 // If we generated new code for the base pointer, clean up.
894 RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI);
898 if (avoidLIRForMultiBlockLoop(/*IsMemset=*/true, IsLoopMemset))
901 // Okay, everything looks good, insert the memset.
903 const SCEV *NumBytesS =
904 getNumBytes(BECount, IntPtr, StoreSize, CurLoop, DL, SE);
906 // TODO: ideally we should still be able to generate memset if SCEV expander
907 // is taught to generate the dependencies at the latest point.
908 if (!isSafeToExpand(NumBytesS, *SE))
912 Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
917 Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, StoreAlignment);
919 // Everything is emitted in default address space
920 Type *Int8PtrTy = DestInt8PtrTy;
922 Module *M = TheStore->getModule();
924 M->getOrInsertFunction("memset_pattern16", Builder.getVoidTy(),
925 Int8PtrTy, Int8PtrTy, IntPtr);
926 inferLibFuncAttributes(*M->getFunction("memset_pattern16"), *TLI);
928 // Otherwise we should form a memset_pattern16. PatternValue is known to be
929 // an constant array of 16-bytes. Plop the value into a mergable global.
930 GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true,
931 GlobalValue::PrivateLinkage,
932 PatternValue, ".memset_pattern");
933 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Ok to merge these.
934 GV->setAlignment(16);
935 Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy);
936 NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes});
939 DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"
940 << " from store to: " << *Ev << " at: " << *TheStore << "\n");
941 NewCall->setDebugLoc(TheStore->getDebugLoc());
943 // Okay, the memset has been formed. Zap the original store and anything that
945 for (auto *I : Stores)
946 deleteDeadInstruction(I);
951 /// If the stored value is a strided load in the same loop with the same stride
952 /// this may be transformable into a memcpy. This kicks in for stuff like
953 /// for (i) A[i] = B[i];
954 bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
955 const SCEV *BECount) {
956 assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores.");
958 Value *StorePtr = SI->getPointerOperand();
959 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
960 APInt Stride = getStoreStride(StoreEv);
961 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
962 bool NegStride = StoreSize == -Stride;
964 // The store must be feeding a non-volatile load.
965 LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
966 assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads.");
968 // See if the pointer expression is an AddRec like {base,+,1} on the current
969 // loop, which indicates a strided load. If we have something else, it's a
970 // random load we can't handle.
971 const SCEVAddRecExpr *LoadEv =
972 cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
974 // The trip count of the loop and the base pointer of the addrec SCEV is
975 // guaranteed to be loop invariant, which means that it should dominate the
976 // header. This allows us to insert code for it in the preheader.
977 BasicBlock *Preheader = CurLoop->getLoopPreheader();
978 IRBuilder<> Builder(Preheader->getTerminator());
979 SCEVExpander Expander(*SE, *DL, "loop-idiom");
981 const SCEV *StrStart = StoreEv->getStart();
982 unsigned StrAS = SI->getPointerAddressSpace();
983 Type *IntPtrTy = Builder.getIntPtrTy(*DL, StrAS);
985 // Handle negative strided loops.
987 StrStart = getStartForNegStride(StrStart, BECount, IntPtrTy, StoreSize, SE);
989 // Okay, we have a strided store "p[i]" of a loaded value. We can turn
990 // this into a memcpy in the loop preheader now if we want. However, this
991 // would be unsafe to do if there is anything else in the loop that may read
992 // or write the memory region we're storing to. This includes the load that
993 // feeds the stores. Check for an alias by generating the base address and
994 // checking everything.
995 Value *StoreBasePtr = Expander.expandCodeFor(
996 StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator());
998 SmallPtrSet<Instruction *, 1> Stores;
1000 if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount,
1001 StoreSize, *AA, Stores)) {
1003 // If we generated new code for the base pointer, clean up.
1004 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
1008 const SCEV *LdStart = LoadEv->getStart();
1009 unsigned LdAS = LI->getPointerAddressSpace();
1011 // Handle negative strided loops.
1013 LdStart = getStartForNegStride(LdStart, BECount, IntPtrTy, StoreSize, SE);
1015 // For a memcpy, we have to make sure that the input array is not being
1016 // mutated by the loop.
1017 Value *LoadBasePtr = Expander.expandCodeFor(
1018 LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator());
1020 if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount,
1021 StoreSize, *AA, Stores)) {
1023 // If we generated new code for the base pointer, clean up.
1024 RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI);
1025 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
1029 if (avoidLIRForMultiBlockLoop())
1032 // Okay, everything is safe, we can transform this!
1034 const SCEV *NumBytesS =
1035 getNumBytes(BECount, IntPtrTy, StoreSize, CurLoop, DL, SE);
1038 Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator());
1040 unsigned Align = std::min(SI->getAlignment(), LI->getAlignment());
1041 CallInst *NewCall = nullptr;
1042 // Check whether to generate an unordered atomic memcpy:
1043 // If the load or store are atomic, then they must neccessarily be unordered
1044 // by previous checks.
1045 if (!SI->isAtomic() && !LI->isAtomic())
1046 NewCall = Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes, Align);
1048 // We cannot allow unaligned ops for unordered load/store, so reject
1049 // anything where the alignment isn't at least the element size.
1050 if (Align < StoreSize)
1053 // If the element.atomic memcpy is not lowered into explicit
1054 // loads/stores later, then it will be lowered into an element-size
1055 // specific lib call. If the lib call doesn't exist for our store size, then
1056 // we shouldn't generate the memcpy.
1057 if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize())
1061 // Note that unordered atomic loads/stores are *required* by the spec to
1062 // have an alignment but non-atomic loads/stores may not.
1063 NewCall = Builder.CreateElementUnorderedAtomicMemCpy(
1064 StoreBasePtr, SI->getAlignment(), LoadBasePtr, LI->getAlignment(),
1065 NumBytes, StoreSize);
1067 NewCall->setDebugLoc(SI->getDebugLoc());
1069 DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n"
1070 << " from load ptr=" << *LoadEv << " at: " << *LI << "\n"
1071 << " from store ptr=" << *StoreEv << " at: " << *SI << "\n");
1073 // Okay, the memcpy has been formed. Zap the original store and anything that
1075 deleteDeadInstruction(SI);
1080 // When compiling for codesize we avoid idiom recognition for a multi-block loop
1081 // unless it is a loop_memset idiom or a memset/memcpy idiom in a nested loop.
1083 bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset,
1084 bool IsLoopMemset) {
1085 if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) {
1086 if (!CurLoop->getParentLoop() && (!IsMemset || !IsLoopMemset)) {
1087 DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName()
1088 << " : LIR " << (IsMemset ? "Memset" : "Memcpy")
1089 << " avoided: multi-block top-level loop\n");
1097 bool LoopIdiomRecognize::runOnNoncountableLoop() {
1098 return recognizePopcount() || recognizeAndInsertCTLZ();
1101 /// Check if the given conditional branch is based on the comparison between
1102 /// a variable and zero, and if the variable is non-zero, the control yields to
1103 /// the loop entry. If the branch matches the behavior, the variable involved
1104 /// in the comparison is returned. This function will be called to see if the
1105 /// precondition and postcondition of the loop are in desirable form.
1106 static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry) {
1107 if (!BI || !BI->isConditional())
1110 ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition());
1114 ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1));
1115 if (!CmpZero || !CmpZero->isZero())
1118 ICmpInst::Predicate Pred = Cond->getPredicate();
1119 if ((Pred == ICmpInst::ICMP_NE && BI->getSuccessor(0) == LoopEntry) ||
1120 (Pred == ICmpInst::ICMP_EQ && BI->getSuccessor(1) == LoopEntry))
1121 return Cond->getOperand(0);
1126 // Check if the recurrence variable `VarX` is in the right form to create
1127 // the idiom. Returns the value coerced to a PHINode if so.
1128 static PHINode *getRecurrenceVar(Value *VarX, Instruction *DefX,
1129 BasicBlock *LoopEntry) {
1130 auto *PhiX = dyn_cast<PHINode>(VarX);
1131 if (PhiX && PhiX->getParent() == LoopEntry &&
1132 (PhiX->getOperand(0) == DefX || PhiX->getOperand(1) == DefX))
1137 /// Return true iff the idiom is detected in the loop.
1140 /// 1) \p CntInst is set to the instruction counting the population bit.
1141 /// 2) \p CntPhi is set to the corresponding phi node.
1142 /// 3) \p Var is set to the value whose population bits are being counted.
1144 /// The core idiom we are trying to detect is:
1147 /// goto loop-exit // the precondition of the loop
1148 /// cnt0 = init-val;
1150 /// x1 = phi (x0, x2);
1151 /// cnt1 = phi(cnt0, cnt2);
1153 /// cnt2 = cnt1 + 1;
1155 /// x2 = x1 & (x1 - 1);
1157 /// } while(x != 0);
1161 static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
1162 Instruction *&CntInst, PHINode *&CntPhi,
1164 // step 1: Check to see if the look-back branch match this pattern:
1165 // "if (a!=0) goto loop-entry".
1166 BasicBlock *LoopEntry;
1167 Instruction *DefX2, *CountInst;
1168 Value *VarX1, *VarX0;
1169 PHINode *PhiX, *CountPhi;
1171 DefX2 = CountInst = nullptr;
1172 VarX1 = VarX0 = nullptr;
1173 PhiX = CountPhi = nullptr;
1174 LoopEntry = *(CurLoop->block_begin());
1176 // step 1: Check if the loop-back branch is in desirable form.
1178 if (Value *T = matchCondition(
1179 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
1180 DefX2 = dyn_cast<Instruction>(T);
1185 // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)"
1187 if (!DefX2 || DefX2->getOpcode() != Instruction::And)
1190 BinaryOperator *SubOneOp;
1192 if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0))))
1193 VarX1 = DefX2->getOperand(1);
1195 VarX1 = DefX2->getOperand(0);
1196 SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1));
1201 Instruction *SubInst = cast<Instruction>(SubOneOp);
1202 ConstantInt *Dec = dyn_cast<ConstantInt>(SubInst->getOperand(1));
1204 !((SubInst->getOpcode() == Instruction::Sub && Dec->isOne()) ||
1205 (SubInst->getOpcode() == Instruction::Add &&
1206 Dec->isMinusOne()))) {
1211 // step 3: Check the recurrence of variable X
1212 PhiX = getRecurrenceVar(VarX1, DefX2, LoopEntry);
1216 // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
1218 CountInst = nullptr;
1219 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
1220 IterE = LoopEntry->end();
1221 Iter != IterE; Iter++) {
1222 Instruction *Inst = &*Iter;
1223 if (Inst->getOpcode() != Instruction::Add)
1226 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
1227 if (!Inc || !Inc->isOne())
1230 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
1234 // Check if the result of the instruction is live of the loop.
1235 bool LiveOutLoop = false;
1236 for (User *U : Inst->users()) {
1237 if ((cast<Instruction>(U))->getParent() != LoopEntry) {
1254 // step 5: check if the precondition is in this form:
1255 // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;"
1257 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1258 Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader());
1259 if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1))
1262 CntInst = CountInst;
1270 /// Return true if the idiom is detected in the loop.
1273 /// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ)
1274 /// or nullptr if there is no such.
1275 /// 2) \p CntPhi is set to the corresponding phi node
1276 /// or nullptr if there is no such.
1277 /// 3) \p Var is set to the value whose CTLZ could be used.
1278 /// 4) \p DefX is set to the instruction calculating Loop exit condition.
1280 /// The core idiom we are trying to detect is:
1283 /// goto loop-exit // the precondition of the loop
1284 /// cnt0 = init-val;
1286 /// x = phi (x0, x.next); //PhiX
1287 /// cnt = phi(cnt0, cnt.next);
1289 /// cnt.next = cnt + 1;
1291 /// x.next = x >> 1; // DefX
1293 /// } while(x.next != 0);
1297 static bool detectCTLZIdiom(Loop *CurLoop, PHINode *&PhiX,
1298 Instruction *&CntInst, PHINode *&CntPhi,
1299 Instruction *&DefX) {
1300 BasicBlock *LoopEntry;
1301 Value *VarX = nullptr;
1307 LoopEntry = *(CurLoop->block_begin());
1309 // step 1: Check if the loop-back branch is in desirable form.
1310 if (Value *T = matchCondition(
1311 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
1312 DefX = dyn_cast<Instruction>(T);
1316 // step 2: detect instructions corresponding to "x.next = x >> 1"
1317 if (!DefX || DefX->getOpcode() != Instruction::AShr)
1319 ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1));
1320 if (!Shft || !Shft->isOne())
1322 VarX = DefX->getOperand(0);
1324 // step 3: Check the recurrence of variable X
1325 PhiX = getRecurrenceVar(VarX, DefX, LoopEntry);
1329 // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1
1330 // TODO: We can skip the step. If loop trip count is known (CTLZ),
1331 // then all uses of "cnt.next" could be optimized to the trip count
1332 // plus "cnt0". Currently it is not optimized.
1333 // This step could be used to detect POPCNT instruction:
1334 // cnt.next = cnt + (x.next & 1)
1335 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
1336 IterE = LoopEntry->end();
1337 Iter != IterE; Iter++) {
1338 Instruction *Inst = &*Iter;
1339 if (Inst->getOpcode() != Instruction::Add)
1342 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
1343 if (!Inc || !Inc->isOne())
1346 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
1360 /// Recognize CTLZ idiom in a non-countable loop and convert the loop
1361 /// to countable (with CTLZ trip count).
1362 /// If CTLZ inserted as a new trip count returns true; otherwise, returns false.
1363 bool LoopIdiomRecognize::recognizeAndInsertCTLZ() {
1364 // Give up if the loop has multiple blocks or multiple backedges.
1365 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
1368 Instruction *CntInst, *DefX;
1369 PHINode *CntPhi, *PhiX;
1370 if (!detectCTLZIdiom(CurLoop, PhiX, CntInst, CntPhi, DefX))
1373 bool IsCntPhiUsedOutsideLoop = false;
1374 for (User *U : CntPhi->users())
1375 if (!CurLoop->contains(dyn_cast<Instruction>(U))) {
1376 IsCntPhiUsedOutsideLoop = true;
1379 bool IsCntInstUsedOutsideLoop = false;
1380 for (User *U : CntInst->users())
1381 if (!CurLoop->contains(dyn_cast<Instruction>(U))) {
1382 IsCntInstUsedOutsideLoop = true;
1385 // If both CntInst and CntPhi are used outside the loop the profitability
1387 if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop)
1390 // For some CPUs result of CTLZ(X) intrinsic is undefined
1391 // when X is 0. If we can not guarantee X != 0, we need to check this
1393 bool ZeroCheck = false;
1394 // It is safe to assume Preheader exist as it was checked in
1395 // parent function RunOnLoop.
1396 BasicBlock *PH = CurLoop->getLoopPreheader();
1397 Value *InitX = PhiX->getIncomingValueForBlock(PH);
1398 // If we check X != 0 before entering the loop we don't need a zero
1399 // check in CTLZ intrinsic, but only if Cnt Phi is not used outside of the
1400 // loop (if it is used we count CTLZ(X >> 1)).
1401 if (!IsCntPhiUsedOutsideLoop)
1402 if (BasicBlock *PreCondBB = PH->getSinglePredecessor())
1403 if (BranchInst *PreCondBr =
1404 dyn_cast<BranchInst>(PreCondBB->getTerminator())) {
1405 if (matchCondition(PreCondBr, PH) == InitX)
1409 // Check if CTLZ intrinsic is profitable. Assume it is always profitable
1410 // if we delete the loop (the loop has only 6 instructions):
1411 // %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ]
1412 // %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ]
1413 // %shr = ashr %n.addr.0, 1
1414 // %tobool = icmp eq %shr, 0
1415 // %inc = add nsw %i.0, 1
1418 IRBuilder<> Builder(PH->getTerminator());
1419 SmallVector<const Value *, 2> Ops =
1420 {InitX, ZeroCheck ? Builder.getTrue() : Builder.getFalse()};
1421 ArrayRef<const Value *> Args(Ops);
1422 if (CurLoop->getHeader()->size() != 6 &&
1423 TTI->getIntrinsicCost(Intrinsic::ctlz, InitX->getType(), Args) >
1424 TargetTransformInfo::TCC_Basic)
1427 const DebugLoc DL = DefX->getDebugLoc();
1428 transformLoopToCountable(PH, CntInst, CntPhi, InitX, DL, ZeroCheck,
1429 IsCntPhiUsedOutsideLoop);
1433 /// Recognizes a population count idiom in a non-countable loop.
1435 /// If detected, transforms the relevant code to issue the popcount intrinsic
1436 /// function call, and returns true; otherwise, returns false.
1437 bool LoopIdiomRecognize::recognizePopcount() {
1438 if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware)
1441 // Counting population are usually conducted by few arithmetic instructions.
1442 // Such instructions can be easily "absorbed" by vacant slots in a
1443 // non-compact loop. Therefore, recognizing popcount idiom only makes sense
1444 // in a compact loop.
1446 // Give up if the loop has multiple blocks or multiple backedges.
1447 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
1450 BasicBlock *LoopBody = *(CurLoop->block_begin());
1451 if (LoopBody->size() >= 20) {
1452 // The loop is too big, bail out.
1456 // It should have a preheader containing nothing but an unconditional branch.
1457 BasicBlock *PH = CurLoop->getLoopPreheader();
1458 if (!PH || &PH->front() != PH->getTerminator())
1460 auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator());
1461 if (!EntryBI || EntryBI->isConditional())
1464 // It should have a precondition block where the generated popcount instrinsic
1465 // function can be inserted.
1466 auto *PreCondBB = PH->getSinglePredecessor();
1469 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1470 if (!PreCondBI || PreCondBI->isUnconditional())
1473 Instruction *CntInst;
1476 if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val))
1479 transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val);
1483 static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
1484 const DebugLoc &DL) {
1485 Value *Ops[] = {Val};
1486 Type *Tys[] = {Val->getType()};
1488 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
1489 Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys);
1490 CallInst *CI = IRBuilder.CreateCall(Func, Ops);
1491 CI->setDebugLoc(DL);
1496 static CallInst *createCTLZIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
1497 const DebugLoc &DL, bool ZeroCheck) {
1498 Value *Ops[] = {Val, ZeroCheck ? IRBuilder.getTrue() : IRBuilder.getFalse()};
1499 Type *Tys[] = {Val->getType()};
1501 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
1502 Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctlz, Tys);
1503 CallInst *CI = IRBuilder.CreateCall(Func, Ops);
1504 CI->setDebugLoc(DL);
1509 /// Transform the following loop:
1511 /// CntPhi = PHI [Cnt0, CntInst]
1512 /// PhiX = PHI [InitX, DefX]
1513 /// CntInst = CntPhi + 1
1514 /// DefX = PhiX >> 1
1516 /// Br: loop if (DefX != 0)
1517 /// Use(CntPhi) or Use(CntInst)
1520 /// If CntPhi used outside the loop:
1521 /// CountPrev = BitWidth(InitX) - CTLZ(InitX >> 1)
1522 /// Count = CountPrev + 1
1524 /// Count = BitWidth(InitX) - CTLZ(InitX)
1526 /// CntPhi = PHI [Cnt0, CntInst]
1527 /// PhiX = PHI [InitX, DefX]
1528 /// PhiCount = PHI [Count, Dec]
1529 /// CntInst = CntPhi + 1
1530 /// DefX = PhiX >> 1
1531 /// Dec = PhiCount - 1
1533 /// Br: loop if (Dec != 0)
1534 /// Use(CountPrev + Cnt0) // Use(CntPhi)
1536 /// Use(Count + Cnt0) // Use(CntInst)
1538 /// If LOOP_BODY is empty the loop will be deleted.
1539 /// If CntInst and DefX are not used in LOOP_BODY they will be removed.
1540 void LoopIdiomRecognize::transformLoopToCountable(
1541 BasicBlock *Preheader, Instruction *CntInst, PHINode *CntPhi, Value *InitX,
1542 const DebugLoc DL, bool ZeroCheck, bool IsCntPhiUsedOutsideLoop) {
1543 BranchInst *PreheaderBr = dyn_cast<BranchInst>(Preheader->getTerminator());
1545 // Step 1: Insert the CTLZ instruction at the end of the preheader block
1546 // Count = BitWidth - CTLZ(InitX);
1547 // If there are uses of CntPhi create:
1548 // CountPrev = BitWidth - CTLZ(InitX >> 1);
1549 IRBuilder<> Builder(PreheaderBr);
1550 Builder.SetCurrentDebugLocation(DL);
1551 Value *CTLZ, *Count, *CountPrev, *NewCount, *InitXNext;
1553 if (IsCntPhiUsedOutsideLoop)
1554 InitXNext = Builder.CreateAShr(InitX,
1555 ConstantInt::get(InitX->getType(), 1));
1558 CTLZ = createCTLZIntrinsic(Builder, InitXNext, DL, ZeroCheck);
1559 Count = Builder.CreateSub(
1560 ConstantInt::get(CTLZ->getType(),
1561 CTLZ->getType()->getIntegerBitWidth()),
1563 if (IsCntPhiUsedOutsideLoop) {
1565 Count = Builder.CreateAdd(
1567 ConstantInt::get(CountPrev->getType(), 1));
1569 if (IsCntPhiUsedOutsideLoop)
1570 NewCount = Builder.CreateZExtOrTrunc(CountPrev,
1571 cast<IntegerType>(CntInst->getType()));
1573 NewCount = Builder.CreateZExtOrTrunc(Count,
1574 cast<IntegerType>(CntInst->getType()));
1576 // If the CTLZ counter's initial value is not zero, insert Add Inst.
1577 Value *CntInitVal = CntPhi->getIncomingValueForBlock(Preheader);
1578 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
1579 if (!InitConst || !InitConst->isZero())
1580 NewCount = Builder.CreateAdd(NewCount, CntInitVal);
1582 // Step 2: Insert new IV and loop condition:
1585 // PhiCount = PHI [Count, Dec]
1587 // Dec = PhiCount - 1
1589 // Br: loop if (Dec != 0)
1590 BasicBlock *Body = *(CurLoop->block_begin());
1591 auto *LbBr = dyn_cast<BranchInst>(Body->getTerminator());
1592 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
1593 Type *Ty = Count->getType();
1595 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
1597 Builder.SetInsertPoint(LbCond);
1598 Instruction *TcDec = cast<Instruction>(
1599 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
1600 "tcdec", false, true));
1602 TcPhi->addIncoming(Count, Preheader);
1603 TcPhi->addIncoming(TcDec, Body);
1605 CmpInst::Predicate Pred =
1606 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
1607 LbCond->setPredicate(Pred);
1608 LbCond->setOperand(0, TcDec);
1609 LbCond->setOperand(1, ConstantInt::get(Ty, 0));
1611 // Step 3: All the references to the original counter outside
1612 // the loop are replaced with the NewCount -- the value returned from
1613 // __builtin_ctlz(x).
1614 if (IsCntPhiUsedOutsideLoop)
1615 CntPhi->replaceUsesOutsideBlock(NewCount, Body);
1617 CntInst->replaceUsesOutsideBlock(NewCount, Body);
1619 // step 4: Forget the "non-computable" trip-count SCEV associated with the
1620 // loop. The loop would otherwise not be deleted even if it becomes empty.
1621 SE->forgetLoop(CurLoop);
1624 void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB,
1625 Instruction *CntInst,
1626 PHINode *CntPhi, Value *Var) {
1627 BasicBlock *PreHead = CurLoop->getLoopPreheader();
1628 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1629 const DebugLoc DL = CntInst->getDebugLoc();
1631 // Assuming before transformation, the loop is following:
1632 // if (x) // the precondition
1633 // do { cnt++; x &= x - 1; } while(x);
1635 // Step 1: Insert the ctpop instruction at the end of the precondition block
1636 IRBuilder<> Builder(PreCondBr);
1637 Value *PopCnt, *PopCntZext, *NewCount, *TripCnt;
1639 PopCnt = createPopcntIntrinsic(Builder, Var, DL);
1640 NewCount = PopCntZext =
1641 Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType()));
1643 if (NewCount != PopCnt)
1644 (cast<Instruction>(NewCount))->setDebugLoc(DL);
1646 // TripCnt is exactly the number of iterations the loop has
1649 // If the population counter's initial value is not zero, insert Add Inst.
1650 Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
1651 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
1652 if (!InitConst || !InitConst->isZero()) {
1653 NewCount = Builder.CreateAdd(NewCount, CntInitVal);
1654 (cast<Instruction>(NewCount))->setDebugLoc(DL);
1658 // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to
1659 // "if (NewCount == 0) loop-exit". Without this change, the intrinsic
1660 // function would be partial dead code, and downstream passes will drag
1661 // it back from the precondition block to the preheader.
1663 ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition());
1665 Value *Opnd0 = PopCntZext;
1666 Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0);
1667 if (PreCond->getOperand(0) != Var)
1668 std::swap(Opnd0, Opnd1);
1670 ICmpInst *NewPreCond = cast<ICmpInst>(
1671 Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1));
1672 PreCondBr->setCondition(NewPreCond);
1674 RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI);
1677 // Step 3: Note that the population count is exactly the trip count of the
1678 // loop in question, which enable us to to convert the loop from noncountable
1679 // loop into a countable one. The benefit is twofold:
1681 // - If the loop only counts population, the entire loop becomes dead after
1682 // the transformation. It is a lot easier to prove a countable loop dead
1683 // than to prove a noncountable one. (In some C dialects, an infinite loop
1684 // isn't dead even if it computes nothing useful. In general, DCE needs
1685 // to prove a noncountable loop finite before safely delete it.)
1687 // - If the loop also performs something else, it remains alive.
1688 // Since it is transformed to countable form, it can be aggressively
1689 // optimized by some optimizations which are in general not applicable
1690 // to a noncountable loop.
1692 // After this step, this loop (conceptually) would look like following:
1693 // newcnt = __builtin_ctpop(x);
1696 // do { cnt++; x &= x-1; t--) } while (t > 0);
1697 BasicBlock *Body = *(CurLoop->block_begin());
1699 auto *LbBr = dyn_cast<BranchInst>(Body->getTerminator());
1700 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
1701 Type *Ty = TripCnt->getType();
1703 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
1705 Builder.SetInsertPoint(LbCond);
1706 Instruction *TcDec = cast<Instruction>(
1707 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
1708 "tcdec", false, true));
1710 TcPhi->addIncoming(TripCnt, PreHead);
1711 TcPhi->addIncoming(TcDec, Body);
1713 CmpInst::Predicate Pred =
1714 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE;
1715 LbCond->setPredicate(Pred);
1716 LbCond->setOperand(0, TcDec);
1717 LbCond->setOperand(1, ConstantInt::get(Ty, 0));
1720 // Step 4: All the references to the original population counter outside
1721 // the loop are replaced with the NewCount -- the value returned from
1722 // __builtin_ctpop().
1723 CntInst->replaceUsesOutsideBlock(NewCount, Body);
1725 // step 5: Forget the "non-computable" trip-count SCEV associated with the
1726 // loop. The loop would otherwise not be deleted even if it becomes empty.
1727 SE->forgetLoop(CurLoop);