1 //===- LoopIdiomRecognize.cpp - Loop idiom recognition --------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass implements an idiom recognizer that transforms simple loops into a
10 // non-loop form. In cases that this kicks in, it can be a significant
13 // If compiling for code size we avoid idiom recognition if the resulting
14 // code could be larger than the code for the original loop. One way this could
15 // happen is if the loop is not removable after idiom recognition due to the
16 // presence of non-idiom instructions. The initial implementation of the
17 // heuristics applies to idioms in multi-block loops.
19 //===----------------------------------------------------------------------===//
23 // Future loop memory idioms to recognize:
24 // memcmp, memmove, strlen, etc.
25 // Future floating point idioms to recognize in -ffast-math mode:
27 // Future integer operation idioms to recognize:
30 // Beware that isel's default lowering for ctpop is highly inefficient for
31 // i64 and larger types when i64 is legal and the value has few bits set. It
32 // would be good to enhance isel to emit a loop for ctpop in this case.
34 // This could recognize common matrix multiplies and dot product idioms and
35 // replace them with calls to BLAS (if linked in??).
37 //===----------------------------------------------------------------------===//
39 #include "llvm/Transforms/Scalar/LoopIdiomRecognize.h"
40 #include "llvm/ADT/APInt.h"
41 #include "llvm/ADT/ArrayRef.h"
42 #include "llvm/ADT/DenseMap.h"
43 #include "llvm/ADT/MapVector.h"
44 #include "llvm/ADT/SetVector.h"
45 #include "llvm/ADT/SmallPtrSet.h"
46 #include "llvm/ADT/SmallVector.h"
47 #include "llvm/ADT/Statistic.h"
48 #include "llvm/ADT/StringRef.h"
49 #include "llvm/Analysis/AliasAnalysis.h"
50 #include "llvm/Analysis/LoopAccessAnalysis.h"
51 #include "llvm/Analysis/LoopInfo.h"
52 #include "llvm/Analysis/LoopPass.h"
53 #include "llvm/Analysis/MemoryLocation.h"
54 #include "llvm/Analysis/MemorySSA.h"
55 #include "llvm/Analysis/MemorySSAUpdater.h"
56 #include "llvm/Analysis/MustExecute.h"
57 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
58 #include "llvm/Analysis/ScalarEvolution.h"
59 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
60 #include "llvm/Analysis/TargetLibraryInfo.h"
61 #include "llvm/Analysis/TargetTransformInfo.h"
62 #include "llvm/Analysis/ValueTracking.h"
63 #include "llvm/IR/Attributes.h"
64 #include "llvm/IR/BasicBlock.h"
65 #include "llvm/IR/Constant.h"
66 #include "llvm/IR/Constants.h"
67 #include "llvm/IR/DataLayout.h"
68 #include "llvm/IR/DebugLoc.h"
69 #include "llvm/IR/DerivedTypes.h"
70 #include "llvm/IR/Dominators.h"
71 #include "llvm/IR/GlobalValue.h"
72 #include "llvm/IR/GlobalVariable.h"
73 #include "llvm/IR/IRBuilder.h"
74 #include "llvm/IR/InstrTypes.h"
75 #include "llvm/IR/Instruction.h"
76 #include "llvm/IR/Instructions.h"
77 #include "llvm/IR/IntrinsicInst.h"
78 #include "llvm/IR/Intrinsics.h"
79 #include "llvm/IR/LLVMContext.h"
80 #include "llvm/IR/Module.h"
81 #include "llvm/IR/PassManager.h"
82 #include "llvm/IR/Type.h"
83 #include "llvm/IR/User.h"
84 #include "llvm/IR/Value.h"
85 #include "llvm/IR/ValueHandle.h"
86 #include "llvm/InitializePasses.h"
87 #include "llvm/Pass.h"
88 #include "llvm/Support/Casting.h"
89 #include "llvm/Support/CommandLine.h"
90 #include "llvm/Support/Debug.h"
91 #include "llvm/Support/raw_ostream.h"
92 #include "llvm/Transforms/Scalar.h"
93 #include "llvm/Transforms/Utils/BuildLibCalls.h"
94 #include "llvm/Transforms/Utils/Local.h"
95 #include "llvm/Transforms/Utils/LoopUtils.h"
96 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
103 using namespace llvm;
105 #define DEBUG_TYPE "loop-idiom"
107 STATISTIC(NumMemSet, "Number of memset's formed from loop stores");
108 STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores");
110 static cl::opt<bool> UseLIRCodeSizeHeurs(
111 "use-lir-code-size-heurs",
112 cl::desc("Use loop idiom recognition code size heuristics when compiling"
114 cl::init(true), cl::Hidden);
118 class LoopIdiomRecognize {
119 Loop *CurLoop = nullptr;
124 TargetLibraryInfo *TLI;
125 const TargetTransformInfo *TTI;
126 const DataLayout *DL;
127 OptimizationRemarkEmitter &ORE;
128 bool ApplyCodeSizeHeuristics;
129 std::unique_ptr<MemorySSAUpdater> MSSAU;
132 explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT,
133 LoopInfo *LI, ScalarEvolution *SE,
134 TargetLibraryInfo *TLI,
135 const TargetTransformInfo *TTI, MemorySSA *MSSA,
136 const DataLayout *DL,
137 OptimizationRemarkEmitter &ORE)
138 : AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL), ORE(ORE) {
140 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
143 bool runOnLoop(Loop *L);
146 using StoreList = SmallVector<StoreInst *, 8>;
147 using StoreListMap = MapVector<Value *, StoreList>;
149 StoreListMap StoreRefsForMemset;
150 StoreListMap StoreRefsForMemsetPattern;
151 StoreList StoreRefsForMemcpy;
153 bool HasMemsetPattern;
156 /// Return code for isLegalStore()
157 enum LegalStoreKind {
162 UnorderedAtomicMemcpy,
163 DontUse // Dummy retval never to be used. Allows catching errors in retval
167 /// \name Countable Loop Idiom Handling
170 bool runOnCountableLoop();
171 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
172 SmallVectorImpl<BasicBlock *> &ExitBlocks);
174 void collectStores(BasicBlock *BB);
175 LegalStoreKind isLegalStore(StoreInst *SI);
176 enum class ForMemset { No, Yes };
177 bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount,
179 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount);
181 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
182 MaybeAlign StoreAlignment, Value *StoredVal,
183 Instruction *TheStore,
184 SmallPtrSetImpl<Instruction *> &Stores,
185 const SCEVAddRecExpr *Ev, const SCEV *BECount,
186 bool NegStride, bool IsLoopMemset = false);
187 bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount);
188 bool avoidLIRForMultiBlockLoop(bool IsMemset = false,
189 bool IsLoopMemset = false);
192 /// \name Noncountable Loop Idiom Handling
195 bool runOnNoncountableLoop();
197 bool recognizePopcount();
198 void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst,
199 PHINode *CntPhi, Value *Var);
200 bool recognizeAndInsertFFS(); /// Find First Set: ctlz or cttz
201 void transformLoopToCountable(Intrinsic::ID IntrinID, BasicBlock *PreCondBB,
202 Instruction *CntInst, PHINode *CntPhi,
203 Value *Var, Instruction *DefX,
204 const DebugLoc &DL, bool ZeroCheck,
205 bool IsCntPhiUsedOutsideLoop);
210 class LoopIdiomRecognizeLegacyPass : public LoopPass {
214 explicit LoopIdiomRecognizeLegacyPass() : LoopPass(ID) {
215 initializeLoopIdiomRecognizeLegacyPassPass(
216 *PassRegistry::getPassRegistry());
219 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
223 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
224 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
225 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
226 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
227 TargetLibraryInfo *TLI =
228 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
229 *L->getHeader()->getParent());
230 const TargetTransformInfo *TTI =
231 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
232 *L->getHeader()->getParent());
233 const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout();
234 auto *MSSAAnalysis = getAnalysisIfAvailable<MemorySSAWrapperPass>();
235 MemorySSA *MSSA = nullptr;
237 MSSA = &MSSAAnalysis->getMSSA();
239 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
240 // pass. Function analyses need to be preserved across loop transformations
241 // but ORE cannot be preserved (see comment before the pass definition).
242 OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
244 LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, MSSA, DL, ORE);
245 return LIR.runOnLoop(L);
248 /// This transformation requires natural loop information & requires that
249 /// loop preheaders be inserted into the CFG.
250 void getAnalysisUsage(AnalysisUsage &AU) const override {
251 AU.addRequired<TargetLibraryInfoWrapperPass>();
252 AU.addRequired<TargetTransformInfoWrapperPass>();
253 AU.addPreserved<MemorySSAWrapperPass>();
254 getLoopAnalysisUsage(AU);
258 } // end anonymous namespace
260 char LoopIdiomRecognizeLegacyPass::ID = 0;
262 PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM,
263 LoopStandardAnalysisResults &AR,
265 const auto *DL = &L.getHeader()->getModule()->getDataLayout();
267 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
268 // pass. Function analyses need to be preserved across loop transformations
269 // but ORE cannot be preserved (see comment before the pass definition).
270 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
272 LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI,
274 if (!LIR.runOnLoop(&L))
275 return PreservedAnalyses::all();
277 auto PA = getLoopPassPreservedAnalyses();
279 PA.preserve<MemorySSAAnalysis>();
283 INITIALIZE_PASS_BEGIN(LoopIdiomRecognizeLegacyPass, "loop-idiom",
284 "Recognize loop idioms", false, false)
285 INITIALIZE_PASS_DEPENDENCY(LoopPass)
286 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
287 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
288 INITIALIZE_PASS_END(LoopIdiomRecognizeLegacyPass, "loop-idiom",
289 "Recognize loop idioms", false, false)
291 Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognizeLegacyPass(); }
293 static void deleteDeadInstruction(Instruction *I) {
294 I->replaceAllUsesWith(UndefValue::get(I->getType()));
295 I->eraseFromParent();
298 //===----------------------------------------------------------------------===//
300 // Implementation of LoopIdiomRecognize
302 //===----------------------------------------------------------------------===//
304 bool LoopIdiomRecognize::runOnLoop(Loop *L) {
306 // If the loop could not be converted to canonical form, it must have an
307 // indirectbr in it, just give up.
308 if (!L->getLoopPreheader())
311 // Disable loop idiom recognition if the function's name is a common idiom.
312 StringRef Name = L->getHeader()->getParent()->getName();
313 if (Name == "memset" || Name == "memcpy")
316 // Determine if code size heuristics need to be applied.
317 ApplyCodeSizeHeuristics =
318 L->getHeader()->getParent()->hasOptSize() && UseLIRCodeSizeHeurs;
320 HasMemset = TLI->has(LibFunc_memset);
321 HasMemsetPattern = TLI->has(LibFunc_memset_pattern16);
322 HasMemcpy = TLI->has(LibFunc_memcpy);
324 if (HasMemset || HasMemsetPattern || HasMemcpy)
325 if (SE->hasLoopInvariantBackedgeTakenCount(L))
326 return runOnCountableLoop();
328 return runOnNoncountableLoop();
331 bool LoopIdiomRecognize::runOnCountableLoop() {
332 const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop);
333 assert(!isa<SCEVCouldNotCompute>(BECount) &&
334 "runOnCountableLoop() called on a loop without a predictable"
335 "backedge-taken count");
337 // If this loop executes exactly one time, then it should be peeled, not
338 // optimized by this pass.
339 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
340 if (BECst->getAPInt() == 0)
343 SmallVector<BasicBlock *, 8> ExitBlocks;
344 CurLoop->getUniqueExitBlocks(ExitBlocks);
346 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F["
347 << CurLoop->getHeader()->getParent()->getName()
348 << "] Countable Loop %" << CurLoop->getHeader()->getName()
351 // The following transforms hoist stores/memsets into the loop pre-header.
352 // Give up if the loop has instructions that may throw.
353 SimpleLoopSafetyInfo SafetyInfo;
354 SafetyInfo.computeLoopSafetyInfo(CurLoop);
355 if (SafetyInfo.anyBlockMayThrow())
358 bool MadeChange = false;
360 // Scan all the blocks in the loop that are not in subloops.
361 for (auto *BB : CurLoop->getBlocks()) {
362 // Ignore blocks in subloops.
363 if (LI->getLoopFor(BB) != CurLoop)
366 MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks);
371 static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) {
372 const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1));
373 return ConstStride->getAPInt();
376 /// getMemSetPatternValue - If a strided store of the specified value is safe to
377 /// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should
378 /// be passed in. Otherwise, return null.
380 /// Note that we don't ever attempt to use memset_pattern8 or 4, because these
381 /// just replicate their input array and then pass on to memset_pattern16.
382 static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) {
383 // FIXME: This could check for UndefValue because it can be merged into any
384 // other valid pattern.
386 // If the value isn't a constant, we can't promote it to being in a constant
387 // array. We could theoretically do a store to an alloca or something, but
388 // that doesn't seem worthwhile.
389 Constant *C = dyn_cast<Constant>(V);
393 // Only handle simple values that are a power of two bytes in size.
394 uint64_t Size = DL->getTypeSizeInBits(V->getType());
395 if (Size == 0 || (Size & 7) || (Size & (Size - 1)))
398 // Don't care enough about darwin/ppc to implement this.
399 if (DL->isBigEndian())
402 // Convert to size in bytes.
405 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
406 // if the top and bottom are the same (e.g. for vectors and large integers).
410 // If the constant is exactly 16 bytes, just use it.
414 // Otherwise, we'll use an array of the constants.
415 unsigned ArraySize = 16 / Size;
416 ArrayType *AT = ArrayType::get(V->getType(), ArraySize);
417 return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C));
420 LoopIdiomRecognize::LegalStoreKind
421 LoopIdiomRecognize::isLegalStore(StoreInst *SI) {
422 // Don't touch volatile stores.
423 if (SI->isVolatile())
424 return LegalStoreKind::None;
425 // We only want simple or unordered-atomic stores.
426 if (!SI->isUnordered())
427 return LegalStoreKind::None;
429 // Don't convert stores of non-integral pointer types to memsets (which stores
431 if (DL->isNonIntegralPointerType(SI->getValueOperand()->getType()))
432 return LegalStoreKind::None;
434 // Avoid merging nontemporal stores.
435 if (SI->getMetadata(LLVMContext::MD_nontemporal))
436 return LegalStoreKind::None;
438 Value *StoredVal = SI->getValueOperand();
439 Value *StorePtr = SI->getPointerOperand();
441 // Reject stores that are so large that they overflow an unsigned.
442 uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
443 if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
444 return LegalStoreKind::None;
446 // See if the pointer expression is an AddRec like {base,+,1} on the current
447 // loop, which indicates a strided store. If we have something else, it's a
448 // random store we can't handle.
449 const SCEVAddRecExpr *StoreEv =
450 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
451 if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
452 return LegalStoreKind::None;
454 // Check to see if we have a constant stride.
455 if (!isa<SCEVConstant>(StoreEv->getOperand(1)))
456 return LegalStoreKind::None;
458 // See if the store can be turned into a memset.
460 // If the stored value is a byte-wise value (like i32 -1), then it may be
461 // turned into a memset of i8 -1, assuming that all the consecutive bytes
462 // are stored. A store of i32 0x01020304 can never be turned into a memset,
463 // but it can be turned into memset_pattern if the target supports it.
464 Value *SplatValue = isBytewiseValue(StoredVal, *DL);
465 Constant *PatternValue = nullptr;
467 // Note: memset and memset_pattern on unordered-atomic is yet not supported
468 bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple();
470 // If we're allowed to form a memset, and the stored value would be
471 // acceptable for memset, use it.
472 if (!UnorderedAtomic && HasMemset && SplatValue &&
473 // Verify that the stored value is loop invariant. If not, we can't
474 // promote the memset.
475 CurLoop->isLoopInvariant(SplatValue)) {
476 // It looks like we can use SplatValue.
477 return LegalStoreKind::Memset;
478 } else if (!UnorderedAtomic && HasMemsetPattern &&
479 // Don't create memset_pattern16s with address spaces.
480 StorePtr->getType()->getPointerAddressSpace() == 0 &&
481 (PatternValue = getMemSetPatternValue(StoredVal, DL))) {
482 // It looks like we can use PatternValue!
483 return LegalStoreKind::MemsetPattern;
486 // Otherwise, see if the store can be turned into a memcpy.
488 // Check to see if the stride matches the size of the store. If so, then we
489 // know that every byte is touched in the loop.
490 APInt Stride = getStoreStride(StoreEv);
491 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
492 if (StoreSize != Stride && StoreSize != -Stride)
493 return LegalStoreKind::None;
495 // The store must be feeding a non-volatile load.
496 LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand());
498 // Only allow non-volatile loads
499 if (!LI || LI->isVolatile())
500 return LegalStoreKind::None;
501 // Only allow simple or unordered-atomic loads
502 if (!LI->isUnordered())
503 return LegalStoreKind::None;
505 // See if the pointer expression is an AddRec like {base,+,1} on the current
506 // loop, which indicates a strided load. If we have something else, it's a
507 // random load we can't handle.
508 const SCEVAddRecExpr *LoadEv =
509 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
510 if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine())
511 return LegalStoreKind::None;
513 // The store and load must share the same stride.
514 if (StoreEv->getOperand(1) != LoadEv->getOperand(1))
515 return LegalStoreKind::None;
517 // Success. This store can be converted into a memcpy.
518 UnorderedAtomic = UnorderedAtomic || LI->isAtomic();
519 return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy
520 : LegalStoreKind::Memcpy;
522 // This store can't be transformed into a memset/memcpy.
523 return LegalStoreKind::None;
526 void LoopIdiomRecognize::collectStores(BasicBlock *BB) {
527 StoreRefsForMemset.clear();
528 StoreRefsForMemsetPattern.clear();
529 StoreRefsForMemcpy.clear();
530 for (Instruction &I : *BB) {
531 StoreInst *SI = dyn_cast<StoreInst>(&I);
535 // Make sure this is a strided store with a constant stride.
536 switch (isLegalStore(SI)) {
537 case LegalStoreKind::None:
540 case LegalStoreKind::Memset: {
541 // Find the base pointer.
542 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
543 StoreRefsForMemset[Ptr].push_back(SI);
545 case LegalStoreKind::MemsetPattern: {
546 // Find the base pointer.
547 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
548 StoreRefsForMemsetPattern[Ptr].push_back(SI);
550 case LegalStoreKind::Memcpy:
551 case LegalStoreKind::UnorderedAtomicMemcpy:
552 StoreRefsForMemcpy.push_back(SI);
555 assert(false && "unhandled return value");
561 /// runOnLoopBlock - Process the specified block, which lives in a counted loop
562 /// with the specified backedge count. This block is known to be in the current
563 /// loop and not in any subloops.
564 bool LoopIdiomRecognize::runOnLoopBlock(
565 BasicBlock *BB, const SCEV *BECount,
566 SmallVectorImpl<BasicBlock *> &ExitBlocks) {
567 // We can only promote stores in this block if they are unconditionally
568 // executed in the loop. For a block to be unconditionally executed, it has
569 // to dominate all the exit blocks of the loop. Verify this now.
570 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
571 if (!DT->dominates(BB, ExitBlocks[i]))
574 bool MadeChange = false;
575 // Look for store instructions, which may be optimized to memset/memcpy.
578 // Look for a single store or sets of stores with a common base, which can be
579 // optimized into a memset (memset_pattern). The latter most commonly happens
580 // with structs and handunrolled loops.
581 for (auto &SL : StoreRefsForMemset)
582 MadeChange |= processLoopStores(SL.second, BECount, ForMemset::Yes);
584 for (auto &SL : StoreRefsForMemsetPattern)
585 MadeChange |= processLoopStores(SL.second, BECount, ForMemset::No);
587 // Optimize the store into a memcpy, if it feeds an similarly strided load.
588 for (auto &SI : StoreRefsForMemcpy)
589 MadeChange |= processLoopStoreOfLoopLoad(SI, BECount);
591 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
592 Instruction *Inst = &*I++;
593 // Look for memset instructions, which may be optimized to a larger memset.
594 if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
595 WeakTrackingVH InstPtr(&*I);
596 if (!processLoopMemSet(MSI, BECount))
600 // If processing the memset invalidated our iterator, start over from the
611 /// See if this store(s) can be promoted to a memset.
612 bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL,
613 const SCEV *BECount, ForMemset For) {
614 // Try to find consecutive stores that can be transformed into memsets.
615 SetVector<StoreInst *> Heads, Tails;
616 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
618 // Do a quadratic search on all of the given stores and find
619 // all of the pairs of stores that follow each other.
620 SmallVector<unsigned, 16> IndexQueue;
621 for (unsigned i = 0, e = SL.size(); i < e; ++i) {
622 assert(SL[i]->isSimple() && "Expected only non-volatile stores.");
624 Value *FirstStoredVal = SL[i]->getValueOperand();
625 Value *FirstStorePtr = SL[i]->getPointerOperand();
626 const SCEVAddRecExpr *FirstStoreEv =
627 cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr));
628 APInt FirstStride = getStoreStride(FirstStoreEv);
629 unsigned FirstStoreSize = DL->getTypeStoreSize(SL[i]->getValueOperand()->getType());
631 // See if we can optimize just this store in isolation.
632 if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) {
637 Value *FirstSplatValue = nullptr;
638 Constant *FirstPatternValue = nullptr;
640 if (For == ForMemset::Yes)
641 FirstSplatValue = isBytewiseValue(FirstStoredVal, *DL);
643 FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL);
645 assert((FirstSplatValue || FirstPatternValue) &&
646 "Expected either splat value or pattern value.");
649 // If a store has multiple consecutive store candidates, search Stores
650 // array according to the sequence: from i+1 to e, then from i-1 to 0.
651 // This is because usually pairing with immediate succeeding or preceding
652 // candidate create the best chance to find memset opportunity.
654 for (j = i + 1; j < e; ++j)
655 IndexQueue.push_back(j);
656 for (j = i; j > 0; --j)
657 IndexQueue.push_back(j - 1);
659 for (auto &k : IndexQueue) {
660 assert(SL[k]->isSimple() && "Expected only non-volatile stores.");
661 Value *SecondStorePtr = SL[k]->getPointerOperand();
662 const SCEVAddRecExpr *SecondStoreEv =
663 cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr));
664 APInt SecondStride = getStoreStride(SecondStoreEv);
666 if (FirstStride != SecondStride)
669 Value *SecondStoredVal = SL[k]->getValueOperand();
670 Value *SecondSplatValue = nullptr;
671 Constant *SecondPatternValue = nullptr;
673 if (For == ForMemset::Yes)
674 SecondSplatValue = isBytewiseValue(SecondStoredVal, *DL);
676 SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL);
678 assert((SecondSplatValue || SecondPatternValue) &&
679 "Expected either splat value or pattern value.");
681 if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) {
682 if (For == ForMemset::Yes) {
683 if (isa<UndefValue>(FirstSplatValue))
684 FirstSplatValue = SecondSplatValue;
685 if (FirstSplatValue != SecondSplatValue)
688 if (isa<UndefValue>(FirstPatternValue))
689 FirstPatternValue = SecondPatternValue;
690 if (FirstPatternValue != SecondPatternValue)
695 ConsecutiveChain[SL[i]] = SL[k];
701 // We may run into multiple chains that merge into a single chain. We mark the
702 // stores that we transformed so that we don't visit the same store twice.
703 SmallPtrSet<Value *, 16> TransformedStores;
704 bool Changed = false;
706 // For stores that start but don't end a link in the chain:
707 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end();
709 if (Tails.count(*it))
712 // We found a store instr that starts a chain. Now follow the chain and try
714 SmallPtrSet<Instruction *, 8> AdjacentStores;
717 StoreInst *HeadStore = I;
718 unsigned StoreSize = 0;
720 // Collect the chain into a list.
721 while (Tails.count(I) || Heads.count(I)) {
722 if (TransformedStores.count(I))
724 AdjacentStores.insert(I);
726 StoreSize += DL->getTypeStoreSize(I->getValueOperand()->getType());
727 // Move to the next value in the chain.
728 I = ConsecutiveChain[I];
731 Value *StoredVal = HeadStore->getValueOperand();
732 Value *StorePtr = HeadStore->getPointerOperand();
733 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
734 APInt Stride = getStoreStride(StoreEv);
736 // Check to see if the stride matches the size of the stores. If so, then
737 // we know that every byte is touched in the loop.
738 if (StoreSize != Stride && StoreSize != -Stride)
741 bool NegStride = StoreSize == -Stride;
743 if (processLoopStridedStore(StorePtr, StoreSize,
744 MaybeAlign(HeadStore->getAlignment()),
745 StoredVal, HeadStore, AdjacentStores, StoreEv,
746 BECount, NegStride)) {
747 TransformedStores.insert(AdjacentStores.begin(), AdjacentStores.end());
755 /// processLoopMemSet - See if this memset can be promoted to a large memset.
756 bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
757 const SCEV *BECount) {
758 // We can only handle non-volatile memsets with a constant size.
759 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
762 // If we're not allowed to hack on memset, we fail.
766 Value *Pointer = MSI->getDest();
768 // See if the pointer expression is an AddRec like {base,+,1} on the current
769 // loop, which indicates a strided store. If we have something else, it's a
770 // random store we can't handle.
771 const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
772 if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine())
775 // Reject memsets that are so large that they overflow an unsigned.
776 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
777 if ((SizeInBytes >> 32) != 0)
780 // Check to see if the stride matches the size of the memset. If so, then we
781 // know that every byte is touched in the loop.
782 const SCEVConstant *ConstStride = dyn_cast<SCEVConstant>(Ev->getOperand(1));
786 APInt Stride = ConstStride->getAPInt();
787 if (SizeInBytes != Stride && SizeInBytes != -Stride)
790 // Verify that the memset value is loop invariant. If not, we can't promote
792 Value *SplatValue = MSI->getValue();
793 if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue))
796 SmallPtrSet<Instruction *, 1> MSIs;
798 bool NegStride = SizeInBytes == -Stride;
799 return processLoopStridedStore(
800 Pointer, (unsigned)SizeInBytes, MaybeAlign(MSI->getDestAlignment()),
801 SplatValue, MSI, MSIs, Ev, BECount, NegStride, /*IsLoopMemset=*/true);
804 /// mayLoopAccessLocation - Return true if the specified loop might access the
805 /// specified pointer location, which is a loop-strided access. The 'Access'
806 /// argument specifies what the verboten forms of access are (read or write).
808 mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
809 const SCEV *BECount, unsigned StoreSize,
811 SmallPtrSetImpl<Instruction *> &IgnoredStores) {
812 // Get the location that may be stored across the loop. Since the access is
813 // strided positively through memory, we say that the modified location starts
814 // at the pointer and has infinite size.
815 LocationSize AccessSize = LocationSize::unknown();
817 // If the loop iterates a fixed number of times, we can refine the access size
818 // to be exactly the size of the memset, which is (BECount+1)*StoreSize
819 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
820 AccessSize = LocationSize::precise((BECst->getValue()->getZExtValue() + 1) *
823 // TODO: For this to be really effective, we have to dive into the pointer
824 // operand in the store. Store to &A[i] of 100 will always return may alias
825 // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
826 // which will then no-alias a store to &A[100].
827 MemoryLocation StoreLoc(Ptr, AccessSize);
829 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
831 for (Instruction &I : **BI)
832 if (IgnoredStores.count(&I) == 0 &&
834 intersectModRef(AA.getModRefInfo(&I, StoreLoc), Access)))
840 // If we have a negative stride, Start refers to the end of the memory location
841 // we're trying to memset. Therefore, we need to recompute the base pointer,
842 // which is just Start - BECount*Size.
843 static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount,
844 Type *IntPtr, unsigned StoreSize,
845 ScalarEvolution *SE) {
846 const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr);
848 Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize),
850 return SE->getMinusSCEV(Start, Index);
853 /// Compute the number of bytes as a SCEV from the backedge taken count.
855 /// This also maps the SCEV into the provided type and tries to handle the
856 /// computation in a way that will fold cleanly.
857 static const SCEV *getNumBytes(const SCEV *BECount, Type *IntPtr,
858 unsigned StoreSize, Loop *CurLoop,
859 const DataLayout *DL, ScalarEvolution *SE) {
860 const SCEV *NumBytesS;
861 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
862 // pointer size if it isn't already.
864 // If we're going to need to zero extend the BE count, check if we can add
865 // one to it prior to zero extending without overflow. Provided this is safe,
866 // it allows better simplification of the +1.
867 if (DL->getTypeSizeInBits(BECount->getType()) <
868 DL->getTypeSizeInBits(IntPtr) &&
869 SE->isLoopEntryGuardedByCond(
870 CurLoop, ICmpInst::ICMP_NE, BECount,
871 SE->getNegativeSCEV(SE->getOne(BECount->getType())))) {
872 NumBytesS = SE->getZeroExtendExpr(
873 SE->getAddExpr(BECount, SE->getOne(BECount->getType()), SCEV::FlagNUW),
876 NumBytesS = SE->getAddExpr(SE->getTruncateOrZeroExtend(BECount, IntPtr),
877 SE->getOne(IntPtr), SCEV::FlagNUW);
880 // And scale it based on the store size.
881 if (StoreSize != 1) {
882 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
888 /// processLoopStridedStore - We see a strided store of some value. If we can
889 /// transform this into a memset or memset_pattern in the loop preheader, do so.
890 bool LoopIdiomRecognize::processLoopStridedStore(
891 Value *DestPtr, unsigned StoreSize, MaybeAlign StoreAlignment,
892 Value *StoredVal, Instruction *TheStore,
893 SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev,
894 const SCEV *BECount, bool NegStride, bool IsLoopMemset) {
895 Value *SplatValue = isBytewiseValue(StoredVal, *DL);
896 Constant *PatternValue = nullptr;
899 PatternValue = getMemSetPatternValue(StoredVal, DL);
901 assert((SplatValue || PatternValue) &&
902 "Expected either splat value or pattern value.");
904 // The trip count of the loop and the base pointer of the addrec SCEV is
905 // guaranteed to be loop invariant, which means that it should dominate the
906 // header. This allows us to insert code for it in the preheader.
907 unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
908 BasicBlock *Preheader = CurLoop->getLoopPreheader();
909 IRBuilder<> Builder(Preheader->getTerminator());
910 SCEVExpander Expander(*SE, *DL, "loop-idiom");
912 Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
913 Type *IntIdxTy = DL->getIndexType(DestPtr->getType());
915 const SCEV *Start = Ev->getStart();
916 // Handle negative strided loops.
918 Start = getStartForNegStride(Start, BECount, IntIdxTy, StoreSize, SE);
920 // TODO: ideally we should still be able to generate memset if SCEV expander
921 // is taught to generate the dependencies at the latest point.
922 if (!isSafeToExpand(Start, *SE))
925 // Okay, we have a strided store "p[i]" of a splattable value. We can turn
926 // this into a memset in the loop preheader now if we want. However, this
927 // would be unsafe to do if there is anything else in the loop that may read
928 // or write to the aliased location. Check for any overlap by generating the
929 // base pointer and checking the region.
931 Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator());
932 if (mayLoopAccessLocation(BasePtr, ModRefInfo::ModRef, CurLoop, BECount,
933 StoreSize, *AA, Stores)) {
935 // If we generated new code for the base pointer, clean up.
936 RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI);
940 if (avoidLIRForMultiBlockLoop(/*IsMemset=*/true, IsLoopMemset))
943 // Okay, everything looks good, insert the memset.
945 const SCEV *NumBytesS =
946 getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE);
948 // TODO: ideally we should still be able to generate memset if SCEV expander
949 // is taught to generate the dependencies at the latest point.
950 if (!isSafeToExpand(NumBytesS, *SE))
954 Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator());
958 NewCall = Builder.CreateMemSet(BasePtr, SplatValue, NumBytes,
959 MaybeAlign(StoreAlignment));
961 // Everything is emitted in default address space
962 Type *Int8PtrTy = DestInt8PtrTy;
964 Module *M = TheStore->getModule();
965 StringRef FuncName = "memset_pattern16";
966 FunctionCallee MSP = M->getOrInsertFunction(FuncName, Builder.getVoidTy(),
967 Int8PtrTy, Int8PtrTy, IntIdxTy);
968 inferLibFuncAttributes(M, FuncName, *TLI);
970 // Otherwise we should form a memset_pattern16. PatternValue is known to be
971 // an constant array of 16-bytes. Plop the value into a mergable global.
972 GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true,
973 GlobalValue::PrivateLinkage,
974 PatternValue, ".memset_pattern");
975 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Ok to merge these.
976 GV->setAlignment(Align(16));
977 Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy);
978 NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes});
980 NewCall->setDebugLoc(TheStore->getDebugLoc());
983 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
984 NewCall, nullptr, NewCall->getParent(), MemorySSA::BeforeTerminator);
985 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true);
988 LLVM_DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"
989 << " from store to: " << *Ev << " at: " << *TheStore
993 return OptimizationRemark(DEBUG_TYPE, "ProcessLoopStridedStore",
994 NewCall->getDebugLoc(), Preheader)
995 << "Transformed loop-strided store into a call to "
996 << ore::NV("NewFunction", NewCall->getCalledFunction())
1000 // Okay, the memset has been formed. Zap the original store and anything that
1002 for (auto *I : Stores) {
1004 MSSAU->removeMemoryAccess(I, true);
1005 deleteDeadInstruction(I);
1007 if (MSSAU && VerifyMemorySSA)
1008 MSSAU->getMemorySSA()->verifyMemorySSA();
1013 class ExpandedValuesCleaner {
1014 SCEVExpander &Expander;
1015 TargetLibraryInfo *TLI;
1016 SmallVector<Value *, 4> ExpandedValues;
1017 bool Commit = false;
1020 ExpandedValuesCleaner(SCEVExpander &Expander, TargetLibraryInfo *TLI)
1021 : Expander(Expander), TLI(TLI) {}
1023 void add(Value *V) { ExpandedValues.push_back(V); }
1025 void commit() { Commit = true; }
1027 ~ExpandedValuesCleaner() {
1030 for (auto *V : ExpandedValues)
1031 RecursivelyDeleteTriviallyDeadInstructions(V, TLI);
1036 /// If the stored value is a strided load in the same loop with the same stride
1037 /// this may be transformable into a memcpy. This kicks in for stuff like
1038 /// for (i) A[i] = B[i];
1039 bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
1040 const SCEV *BECount) {
1041 assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores.");
1043 Value *StorePtr = SI->getPointerOperand();
1044 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
1045 APInt Stride = getStoreStride(StoreEv);
1046 unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
1047 bool NegStride = StoreSize == -Stride;
1049 // The store must be feeding a non-volatile load.
1050 LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
1051 assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads.");
1053 // See if the pointer expression is an AddRec like {base,+,1} on the current
1054 // loop, which indicates a strided load. If we have something else, it's a
1055 // random load we can't handle.
1056 const SCEVAddRecExpr *LoadEv =
1057 cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
1059 // The trip count of the loop and the base pointer of the addrec SCEV is
1060 // guaranteed to be loop invariant, which means that it should dominate the
1061 // header. This allows us to insert code for it in the preheader.
1062 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1063 IRBuilder<> Builder(Preheader->getTerminator());
1064 SCEVExpander Expander(*SE, *DL, "loop-idiom");
1066 ExpandedValuesCleaner EVC(Expander, TLI);
1068 const SCEV *StrStart = StoreEv->getStart();
1069 unsigned StrAS = SI->getPointerAddressSpace();
1070 Type *IntIdxTy = Builder.getIntNTy(DL->getIndexSizeInBits(StrAS));
1072 // Handle negative strided loops.
1074 StrStart = getStartForNegStride(StrStart, BECount, IntIdxTy, StoreSize, SE);
1076 // Okay, we have a strided store "p[i]" of a loaded value. We can turn
1077 // this into a memcpy in the loop preheader now if we want. However, this
1078 // would be unsafe to do if there is anything else in the loop that may read
1079 // or write the memory region we're storing to. This includes the load that
1080 // feeds the stores. Check for an alias by generating the base address and
1081 // checking everything.
1082 Value *StoreBasePtr = Expander.expandCodeFor(
1083 StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator());
1084 EVC.add(StoreBasePtr);
1086 SmallPtrSet<Instruction *, 1> Stores;
1088 if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount,
1089 StoreSize, *AA, Stores))
1092 const SCEV *LdStart = LoadEv->getStart();
1093 unsigned LdAS = LI->getPointerAddressSpace();
1095 // Handle negative strided loops.
1097 LdStart = getStartForNegStride(LdStart, BECount, IntIdxTy, StoreSize, SE);
1099 // For a memcpy, we have to make sure that the input array is not being
1100 // mutated by the loop.
1101 Value *LoadBasePtr = Expander.expandCodeFor(
1102 LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator());
1103 EVC.add(LoadBasePtr);
1105 if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount,
1106 StoreSize, *AA, Stores))
1109 if (avoidLIRForMultiBlockLoop())
1112 // Okay, everything is safe, we can transform this!
1114 const SCEV *NumBytesS =
1115 getNumBytes(BECount, IntIdxTy, StoreSize, CurLoop, DL, SE);
1118 Expander.expandCodeFor(NumBytesS, IntIdxTy, Preheader->getTerminator());
1121 CallInst *NewCall = nullptr;
1122 // Check whether to generate an unordered atomic memcpy:
1123 // If the load or store are atomic, then they must necessarily be unordered
1124 // by previous checks.
1125 if (!SI->isAtomic() && !LI->isAtomic())
1126 NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlign(), LoadBasePtr,
1127 LI->getAlign(), NumBytes);
1129 // We cannot allow unaligned ops for unordered load/store, so reject
1130 // anything where the alignment isn't at least the element size.
1131 const Align StoreAlign = SI->getAlign();
1132 const Align LoadAlign = LI->getAlign();
1133 if (StoreAlign < StoreSize || LoadAlign < StoreSize)
1136 // If the element.atomic memcpy is not lowered into explicit
1137 // loads/stores later, then it will be lowered into an element-size
1138 // specific lib call. If the lib call doesn't exist for our store size, then
1139 // we shouldn't generate the memcpy.
1140 if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize())
1144 // Note that unordered atomic loads/stores are *required* by the spec to
1145 // have an alignment but non-atomic loads/stores may not.
1146 NewCall = Builder.CreateElementUnorderedAtomicMemCpy(
1147 StoreBasePtr, StoreAlign, LoadBasePtr, LoadAlign, NumBytes,
1150 NewCall->setDebugLoc(SI->getDebugLoc());
1153 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1154 NewCall, nullptr, NewCall->getParent(), MemorySSA::BeforeTerminator);
1155 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true);
1158 LLVM_DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n"
1159 << " from load ptr=" << *LoadEv << " at: " << *LI << "\n"
1160 << " from store ptr=" << *StoreEv << " at: " << *SI
1164 return OptimizationRemark(DEBUG_TYPE, "ProcessLoopStoreOfLoopLoad",
1165 NewCall->getDebugLoc(), Preheader)
1166 << "Formed a call to "
1167 << ore::NV("NewFunction", NewCall->getCalledFunction())
1171 // Okay, the memcpy has been formed. Zap the original store and anything that
1174 MSSAU->removeMemoryAccess(SI, true);
1175 deleteDeadInstruction(SI);
1176 if (MSSAU && VerifyMemorySSA)
1177 MSSAU->getMemorySSA()->verifyMemorySSA();
1183 // When compiling for codesize we avoid idiom recognition for a multi-block loop
1184 // unless it is a loop_memset idiom or a memset/memcpy idiom in a nested loop.
1186 bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset,
1187 bool IsLoopMemset) {
1188 if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) {
1189 if (!CurLoop->getParentLoop() && (!IsMemset || !IsLoopMemset)) {
1190 LLVM_DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName()
1191 << " : LIR " << (IsMemset ? "Memset" : "Memcpy")
1192 << " avoided: multi-block top-level loop\n");
1200 bool LoopIdiomRecognize::runOnNoncountableLoop() {
1201 LLVM_DEBUG(dbgs() << DEBUG_TYPE " Scanning: F["
1202 << CurLoop->getHeader()->getParent()->getName()
1203 << "] Noncountable Loop %"
1204 << CurLoop->getHeader()->getName() << "\n");
1206 return recognizePopcount() || recognizeAndInsertFFS();
1209 /// Check if the given conditional branch is based on the comparison between
1210 /// a variable and zero, and if the variable is non-zero or zero (JmpOnZero is
1211 /// true), the control yields to the loop entry. If the branch matches the
1212 /// behavior, the variable involved in the comparison is returned. This function
1213 /// will be called to see if the precondition and postcondition of the loop are
1214 /// in desirable form.
1215 static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry,
1216 bool JmpOnZero = false) {
1217 if (!BI || !BI->isConditional())
1220 ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition());
1224 ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1));
1225 if (!CmpZero || !CmpZero->isZero())
1228 BasicBlock *TrueSucc = BI->getSuccessor(0);
1229 BasicBlock *FalseSucc = BI->getSuccessor(1);
1231 std::swap(TrueSucc, FalseSucc);
1233 ICmpInst::Predicate Pred = Cond->getPredicate();
1234 if ((Pred == ICmpInst::ICMP_NE && TrueSucc == LoopEntry) ||
1235 (Pred == ICmpInst::ICMP_EQ && FalseSucc == LoopEntry))
1236 return Cond->getOperand(0);
1241 // Check if the recurrence variable `VarX` is in the right form to create
1242 // the idiom. Returns the value coerced to a PHINode if so.
1243 static PHINode *getRecurrenceVar(Value *VarX, Instruction *DefX,
1244 BasicBlock *LoopEntry) {
1245 auto *PhiX = dyn_cast<PHINode>(VarX);
1246 if (PhiX && PhiX->getParent() == LoopEntry &&
1247 (PhiX->getOperand(0) == DefX || PhiX->getOperand(1) == DefX))
1252 /// Return true iff the idiom is detected in the loop.
1255 /// 1) \p CntInst is set to the instruction counting the population bit.
1256 /// 2) \p CntPhi is set to the corresponding phi node.
1257 /// 3) \p Var is set to the value whose population bits are being counted.
1259 /// The core idiom we are trying to detect is:
1262 /// goto loop-exit // the precondition of the loop
1263 /// cnt0 = init-val;
1265 /// x1 = phi (x0, x2);
1266 /// cnt1 = phi(cnt0, cnt2);
1268 /// cnt2 = cnt1 + 1;
1270 /// x2 = x1 & (x1 - 1);
1272 /// } while(x != 0);
1276 static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
1277 Instruction *&CntInst, PHINode *&CntPhi,
1279 // step 1: Check to see if the look-back branch match this pattern:
1280 // "if (a!=0) goto loop-entry".
1281 BasicBlock *LoopEntry;
1282 Instruction *DefX2, *CountInst;
1283 Value *VarX1, *VarX0;
1284 PHINode *PhiX, *CountPhi;
1286 DefX2 = CountInst = nullptr;
1287 VarX1 = VarX0 = nullptr;
1288 PhiX = CountPhi = nullptr;
1289 LoopEntry = *(CurLoop->block_begin());
1291 // step 1: Check if the loop-back branch is in desirable form.
1293 if (Value *T = matchCondition(
1294 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
1295 DefX2 = dyn_cast<Instruction>(T);
1300 // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)"
1302 if (!DefX2 || DefX2->getOpcode() != Instruction::And)
1305 BinaryOperator *SubOneOp;
1307 if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0))))
1308 VarX1 = DefX2->getOperand(1);
1310 VarX1 = DefX2->getOperand(0);
1311 SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1));
1313 if (!SubOneOp || SubOneOp->getOperand(0) != VarX1)
1316 ConstantInt *Dec = dyn_cast<ConstantInt>(SubOneOp->getOperand(1));
1318 !((SubOneOp->getOpcode() == Instruction::Sub && Dec->isOne()) ||
1319 (SubOneOp->getOpcode() == Instruction::Add &&
1320 Dec->isMinusOne()))) {
1325 // step 3: Check the recurrence of variable X
1326 PhiX = getRecurrenceVar(VarX1, DefX2, LoopEntry);
1330 // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
1332 CountInst = nullptr;
1333 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
1334 IterE = LoopEntry->end();
1335 Iter != IterE; Iter++) {
1336 Instruction *Inst = &*Iter;
1337 if (Inst->getOpcode() != Instruction::Add)
1340 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
1341 if (!Inc || !Inc->isOne())
1344 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
1348 // Check if the result of the instruction is live of the loop.
1349 bool LiveOutLoop = false;
1350 for (User *U : Inst->users()) {
1351 if ((cast<Instruction>(U))->getParent() != LoopEntry) {
1368 // step 5: check if the precondition is in this form:
1369 // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;"
1371 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1372 Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader());
1373 if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1))
1376 CntInst = CountInst;
1384 /// Return true if the idiom is detected in the loop.
1387 /// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ)
1388 /// or nullptr if there is no such.
1389 /// 2) \p CntPhi is set to the corresponding phi node
1390 /// or nullptr if there is no such.
1391 /// 3) \p Var is set to the value whose CTLZ could be used.
1392 /// 4) \p DefX is set to the instruction calculating Loop exit condition.
1394 /// The core idiom we are trying to detect is:
1397 /// goto loop-exit // the precondition of the loop
1398 /// cnt0 = init-val;
1400 /// x = phi (x0, x.next); //PhiX
1401 /// cnt = phi(cnt0, cnt.next);
1403 /// cnt.next = cnt + 1;
1405 /// x.next = x >> 1; // DefX
1407 /// } while(x.next != 0);
1411 static bool detectShiftUntilZeroIdiom(Loop *CurLoop, const DataLayout &DL,
1412 Intrinsic::ID &IntrinID, Value *&InitX,
1413 Instruction *&CntInst, PHINode *&CntPhi,
1414 Instruction *&DefX) {
1415 BasicBlock *LoopEntry;
1416 Value *VarX = nullptr;
1421 LoopEntry = *(CurLoop->block_begin());
1423 // step 1: Check if the loop-back branch is in desirable form.
1424 if (Value *T = matchCondition(
1425 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
1426 DefX = dyn_cast<Instruction>(T);
1430 // step 2: detect instructions corresponding to "x.next = x >> 1 or x << 1"
1431 if (!DefX || !DefX->isShift())
1433 IntrinID = DefX->getOpcode() == Instruction::Shl ? Intrinsic::cttz :
1435 ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1));
1436 if (!Shft || !Shft->isOne())
1438 VarX = DefX->getOperand(0);
1440 // step 3: Check the recurrence of variable X
1441 PHINode *PhiX = getRecurrenceVar(VarX, DefX, LoopEntry);
1445 InitX = PhiX->getIncomingValueForBlock(CurLoop->getLoopPreheader());
1447 // Make sure the initial value can't be negative otherwise the ashr in the
1448 // loop might never reach zero which would make the loop infinite.
1449 if (DefX->getOpcode() == Instruction::AShr && !isKnownNonNegative(InitX, DL))
1452 // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1
1453 // TODO: We can skip the step. If loop trip count is known (CTLZ),
1454 // then all uses of "cnt.next" could be optimized to the trip count
1455 // plus "cnt0". Currently it is not optimized.
1456 // This step could be used to detect POPCNT instruction:
1457 // cnt.next = cnt + (x.next & 1)
1458 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
1459 IterE = LoopEntry->end();
1460 Iter != IterE; Iter++) {
1461 Instruction *Inst = &*Iter;
1462 if (Inst->getOpcode() != Instruction::Add)
1465 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
1466 if (!Inc || !Inc->isOne())
1469 PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
1483 /// Recognize CTLZ or CTTZ idiom in a non-countable loop and convert the loop
1484 /// to countable (with CTLZ / CTTZ trip count). If CTLZ / CTTZ inserted as a new
1485 /// trip count returns true; otherwise, returns false.
1486 bool LoopIdiomRecognize::recognizeAndInsertFFS() {
1487 // Give up if the loop has multiple blocks or multiple backedges.
1488 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
1491 Intrinsic::ID IntrinID;
1493 Instruction *DefX = nullptr;
1494 PHINode *CntPhi = nullptr;
1495 Instruction *CntInst = nullptr;
1496 // Help decide if transformation is profitable. For ShiftUntilZero idiom,
1497 // this is always 6.
1498 size_t IdiomCanonicalSize = 6;
1500 if (!detectShiftUntilZeroIdiom(CurLoop, *DL, IntrinID, InitX,
1501 CntInst, CntPhi, DefX))
1504 bool IsCntPhiUsedOutsideLoop = false;
1505 for (User *U : CntPhi->users())
1506 if (!CurLoop->contains(cast<Instruction>(U))) {
1507 IsCntPhiUsedOutsideLoop = true;
1510 bool IsCntInstUsedOutsideLoop = false;
1511 for (User *U : CntInst->users())
1512 if (!CurLoop->contains(cast<Instruction>(U))) {
1513 IsCntInstUsedOutsideLoop = true;
1516 // If both CntInst and CntPhi are used outside the loop the profitability
1518 if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop)
1521 // For some CPUs result of CTLZ(X) intrinsic is undefined
1522 // when X is 0. If we can not guarantee X != 0, we need to check this
1524 bool ZeroCheck = false;
1525 // It is safe to assume Preheader exist as it was checked in
1526 // parent function RunOnLoop.
1527 BasicBlock *PH = CurLoop->getLoopPreheader();
1529 // If we are using the count instruction outside the loop, make sure we
1530 // have a zero check as a precondition. Without the check the loop would run
1531 // one iteration for before any check of the input value. This means 0 and 1
1532 // would have identical behavior in the original loop and thus
1533 if (!IsCntPhiUsedOutsideLoop) {
1534 auto *PreCondBB = PH->getSinglePredecessor();
1537 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1540 if (matchCondition(PreCondBI, PH) != InitX)
1545 // Check if CTLZ / CTTZ intrinsic is profitable. Assume it is always
1546 // profitable if we delete the loop.
1548 // the loop has only 6 instructions:
1549 // %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ]
1550 // %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ]
1551 // %shr = ashr %n.addr.0, 1
1552 // %tobool = icmp eq %shr, 0
1553 // %inc = add nsw %i.0, 1
1556 const Value *Args[] = {
1557 InitX, ZeroCheck ? ConstantInt::getTrue(InitX->getContext())
1558 : ConstantInt::getFalse(InitX->getContext())};
1560 // @llvm.dbg doesn't count as they have no semantic effect.
1561 auto InstWithoutDebugIt = CurLoop->getHeader()->instructionsWithoutDebug();
1562 uint32_t HeaderSize =
1563 std::distance(InstWithoutDebugIt.begin(), InstWithoutDebugIt.end());
1565 IntrinsicCostAttributes Attrs(IntrinID, InitX->getType(), Args);
1567 TTI->getIntrinsicInstrCost(Attrs, TargetTransformInfo::TCK_SizeAndLatency);
1568 if (HeaderSize != IdiomCanonicalSize &&
1569 Cost > TargetTransformInfo::TCC_Basic)
1572 transformLoopToCountable(IntrinID, PH, CntInst, CntPhi, InitX, DefX,
1573 DefX->getDebugLoc(), ZeroCheck,
1574 IsCntPhiUsedOutsideLoop);
1578 /// Recognizes a population count idiom in a non-countable loop.
1580 /// If detected, transforms the relevant code to issue the popcount intrinsic
1581 /// function call, and returns true; otherwise, returns false.
1582 bool LoopIdiomRecognize::recognizePopcount() {
1583 if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware)
1586 // Counting population are usually conducted by few arithmetic instructions.
1587 // Such instructions can be easily "absorbed" by vacant slots in a
1588 // non-compact loop. Therefore, recognizing popcount idiom only makes sense
1589 // in a compact loop.
1591 // Give up if the loop has multiple blocks or multiple backedges.
1592 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
1595 BasicBlock *LoopBody = *(CurLoop->block_begin());
1596 if (LoopBody->size() >= 20) {
1597 // The loop is too big, bail out.
1601 // It should have a preheader containing nothing but an unconditional branch.
1602 BasicBlock *PH = CurLoop->getLoopPreheader();
1603 if (!PH || &PH->front() != PH->getTerminator())
1605 auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator());
1606 if (!EntryBI || EntryBI->isConditional())
1609 // It should have a precondition block where the generated popcount intrinsic
1610 // function can be inserted.
1611 auto *PreCondBB = PH->getSinglePredecessor();
1614 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1615 if (!PreCondBI || PreCondBI->isUnconditional())
1618 Instruction *CntInst;
1621 if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val))
1624 transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val);
1628 static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
1629 const DebugLoc &DL) {
1630 Value *Ops[] = {Val};
1631 Type *Tys[] = {Val->getType()};
1633 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
1634 Function *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys);
1635 CallInst *CI = IRBuilder.CreateCall(Func, Ops);
1636 CI->setDebugLoc(DL);
1641 static CallInst *createFFSIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
1642 const DebugLoc &DL, bool ZeroCheck,
1643 Intrinsic::ID IID) {
1644 Value *Ops[] = {Val, ZeroCheck ? IRBuilder.getTrue() : IRBuilder.getFalse()};
1645 Type *Tys[] = {Val->getType()};
1647 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
1648 Function *Func = Intrinsic::getDeclaration(M, IID, Tys);
1649 CallInst *CI = IRBuilder.CreateCall(Func, Ops);
1650 CI->setDebugLoc(DL);
1655 /// Transform the following loop (Using CTLZ, CTTZ is similar):
1657 /// CntPhi = PHI [Cnt0, CntInst]
1658 /// PhiX = PHI [InitX, DefX]
1659 /// CntInst = CntPhi + 1
1660 /// DefX = PhiX >> 1
1662 /// Br: loop if (DefX != 0)
1663 /// Use(CntPhi) or Use(CntInst)
1666 /// If CntPhi used outside the loop:
1667 /// CountPrev = BitWidth(InitX) - CTLZ(InitX >> 1)
1668 /// Count = CountPrev + 1
1670 /// Count = BitWidth(InitX) - CTLZ(InitX)
1672 /// CntPhi = PHI [Cnt0, CntInst]
1673 /// PhiX = PHI [InitX, DefX]
1674 /// PhiCount = PHI [Count, Dec]
1675 /// CntInst = CntPhi + 1
1676 /// DefX = PhiX >> 1
1677 /// Dec = PhiCount - 1
1679 /// Br: loop if (Dec != 0)
1680 /// Use(CountPrev + Cnt0) // Use(CntPhi)
1682 /// Use(Count + Cnt0) // Use(CntInst)
1684 /// If LOOP_BODY is empty the loop will be deleted.
1685 /// If CntInst and DefX are not used in LOOP_BODY they will be removed.
1686 void LoopIdiomRecognize::transformLoopToCountable(
1687 Intrinsic::ID IntrinID, BasicBlock *Preheader, Instruction *CntInst,
1688 PHINode *CntPhi, Value *InitX, Instruction *DefX, const DebugLoc &DL,
1689 bool ZeroCheck, bool IsCntPhiUsedOutsideLoop) {
1690 BranchInst *PreheaderBr = cast<BranchInst>(Preheader->getTerminator());
1692 // Step 1: Insert the CTLZ/CTTZ instruction at the end of the preheader block
1693 IRBuilder<> Builder(PreheaderBr);
1694 Builder.SetCurrentDebugLocation(DL);
1695 Value *FFS, *Count, *CountPrev, *NewCount, *InitXNext;
1697 // Count = BitWidth - CTLZ(InitX);
1698 // If there are uses of CntPhi create:
1699 // CountPrev = BitWidth - CTLZ(InitX >> 1);
1700 if (IsCntPhiUsedOutsideLoop) {
1701 if (DefX->getOpcode() == Instruction::AShr)
1703 Builder.CreateAShr(InitX, ConstantInt::get(InitX->getType(), 1));
1704 else if (DefX->getOpcode() == Instruction::LShr)
1706 Builder.CreateLShr(InitX, ConstantInt::get(InitX->getType(), 1));
1707 else if (DefX->getOpcode() == Instruction::Shl) // cttz
1709 Builder.CreateShl(InitX, ConstantInt::get(InitX->getType(), 1));
1711 llvm_unreachable("Unexpected opcode!");
1714 FFS = createFFSIntrinsic(Builder, InitXNext, DL, ZeroCheck, IntrinID);
1715 Count = Builder.CreateSub(
1716 ConstantInt::get(FFS->getType(),
1717 FFS->getType()->getIntegerBitWidth()),
1719 if (IsCntPhiUsedOutsideLoop) {
1721 Count = Builder.CreateAdd(
1723 ConstantInt::get(CountPrev->getType(), 1));
1726 NewCount = Builder.CreateZExtOrTrunc(
1727 IsCntPhiUsedOutsideLoop ? CountPrev : Count,
1728 cast<IntegerType>(CntInst->getType()));
1730 // If the counter's initial value is not zero, insert Add Inst.
1731 Value *CntInitVal = CntPhi->getIncomingValueForBlock(Preheader);
1732 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
1733 if (!InitConst || !InitConst->isZero())
1734 NewCount = Builder.CreateAdd(NewCount, CntInitVal);
1736 // Step 2: Insert new IV and loop condition:
1739 // PhiCount = PHI [Count, Dec]
1741 // Dec = PhiCount - 1
1743 // Br: loop if (Dec != 0)
1744 BasicBlock *Body = *(CurLoop->block_begin());
1745 auto *LbBr = cast<BranchInst>(Body->getTerminator());
1746 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
1747 Type *Ty = Count->getType();
1749 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
1751 Builder.SetInsertPoint(LbCond);
1752 Instruction *TcDec = cast<Instruction>(
1753 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
1754 "tcdec", false, true));
1756 TcPhi->addIncoming(Count, Preheader);
1757 TcPhi->addIncoming(TcDec, Body);
1759 CmpInst::Predicate Pred =
1760 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
1761 LbCond->setPredicate(Pred);
1762 LbCond->setOperand(0, TcDec);
1763 LbCond->setOperand(1, ConstantInt::get(Ty, 0));
1765 // Step 3: All the references to the original counter outside
1766 // the loop are replaced with the NewCount
1767 if (IsCntPhiUsedOutsideLoop)
1768 CntPhi->replaceUsesOutsideBlock(NewCount, Body);
1770 CntInst->replaceUsesOutsideBlock(NewCount, Body);
1772 // step 4: Forget the "non-computable" trip-count SCEV associated with the
1773 // loop. The loop would otherwise not be deleted even if it becomes empty.
1774 SE->forgetLoop(CurLoop);
1777 void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB,
1778 Instruction *CntInst,
1779 PHINode *CntPhi, Value *Var) {
1780 BasicBlock *PreHead = CurLoop->getLoopPreheader();
1781 auto *PreCondBr = cast<BranchInst>(PreCondBB->getTerminator());
1782 const DebugLoc &DL = CntInst->getDebugLoc();
1784 // Assuming before transformation, the loop is following:
1785 // if (x) // the precondition
1786 // do { cnt++; x &= x - 1; } while(x);
1788 // Step 1: Insert the ctpop instruction at the end of the precondition block
1789 IRBuilder<> Builder(PreCondBr);
1790 Value *PopCnt, *PopCntZext, *NewCount, *TripCnt;
1792 PopCnt = createPopcntIntrinsic(Builder, Var, DL);
1793 NewCount = PopCntZext =
1794 Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType()));
1796 if (NewCount != PopCnt)
1797 (cast<Instruction>(NewCount))->setDebugLoc(DL);
1799 // TripCnt is exactly the number of iterations the loop has
1802 // If the population counter's initial value is not zero, insert Add Inst.
1803 Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
1804 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
1805 if (!InitConst || !InitConst->isZero()) {
1806 NewCount = Builder.CreateAdd(NewCount, CntInitVal);
1807 (cast<Instruction>(NewCount))->setDebugLoc(DL);
1811 // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to
1812 // "if (NewCount == 0) loop-exit". Without this change, the intrinsic
1813 // function would be partial dead code, and downstream passes will drag
1814 // it back from the precondition block to the preheader.
1816 ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition());
1818 Value *Opnd0 = PopCntZext;
1819 Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0);
1820 if (PreCond->getOperand(0) != Var)
1821 std::swap(Opnd0, Opnd1);
1823 ICmpInst *NewPreCond = cast<ICmpInst>(
1824 Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1));
1825 PreCondBr->setCondition(NewPreCond);
1827 RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI);
1830 // Step 3: Note that the population count is exactly the trip count of the
1831 // loop in question, which enable us to convert the loop from noncountable
1832 // loop into a countable one. The benefit is twofold:
1834 // - If the loop only counts population, the entire loop becomes dead after
1835 // the transformation. It is a lot easier to prove a countable loop dead
1836 // than to prove a noncountable one. (In some C dialects, an infinite loop
1837 // isn't dead even if it computes nothing useful. In general, DCE needs
1838 // to prove a noncountable loop finite before safely delete it.)
1840 // - If the loop also performs something else, it remains alive.
1841 // Since it is transformed to countable form, it can be aggressively
1842 // optimized by some optimizations which are in general not applicable
1843 // to a noncountable loop.
1845 // After this step, this loop (conceptually) would look like following:
1846 // newcnt = __builtin_ctpop(x);
1849 // do { cnt++; x &= x-1; t--) } while (t > 0);
1850 BasicBlock *Body = *(CurLoop->block_begin());
1852 auto *LbBr = cast<BranchInst>(Body->getTerminator());
1853 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
1854 Type *Ty = TripCnt->getType();
1856 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
1858 Builder.SetInsertPoint(LbCond);
1859 Instruction *TcDec = cast<Instruction>(
1860 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
1861 "tcdec", false, true));
1863 TcPhi->addIncoming(TripCnt, PreHead);
1864 TcPhi->addIncoming(TcDec, Body);
1866 CmpInst::Predicate Pred =
1867 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE;
1868 LbCond->setPredicate(Pred);
1869 LbCond->setOperand(0, TcDec);
1870 LbCond->setOperand(1, ConstantInt::get(Ty, 0));
1873 // Step 4: All the references to the original population counter outside
1874 // the loop are replaced with the NewCount -- the value returned from
1875 // __builtin_ctpop().
1876 CntInst->replaceUsesOutsideBlock(NewCount, Body);
1878 // step 5: Forget the "non-computable" trip-count SCEV associated with the
1879 // loop. The loop would otherwise not be deleted even if it becomes empty.
1880 SE->forgetLoop(CurLoop);