1 //===-- LoopIdiomRecognize.cpp - Loop idiom recognition -------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass implements an idiom recognizer that transforms simple loops into a
11 // non-loop form. In cases that this kicks in, it can be a significant
14 // If compiling for code size we avoid idiom recognition if the resulting
15 // code could be larger than the code for the original loop. One way this could
16 // happen is if the loop is not removable after idiom recognition due to the
17 // presence of non-idiom instructions. The initial implementation of the
18 // heuristics applies to idioms in multi-block loops.
20 //===----------------------------------------------------------------------===//
24 // Future loop memory idioms to recognize:
25 // memcmp, memmove, strlen, etc.
26 // Future floating point idioms to recognize in -ffast-math mode:
28 // Future integer operation idioms to recognize:
31 // Beware that isel's default lowering for ctpop is highly inefficient for
32 // i64 and larger types when i64 is legal and the value has few bits set. It
33 // would be good to enhance isel to emit a loop for ctpop in this case.
35 // This could recognize common matrix multiplies and dot product idioms and
36 // replace them with calls to BLAS (if linked in??).
38 //===----------------------------------------------------------------------===//
40 #include "llvm/Transforms/Scalar/LoopIdiomRecognize.h"
41 #include "llvm/ADT/MapVector.h"
42 #include "llvm/ADT/SetVector.h"
43 #include "llvm/ADT/Statistic.h"
44 #include "llvm/Analysis/AliasAnalysis.h"
45 #include "llvm/Analysis/BasicAliasAnalysis.h"
46 #include "llvm/Analysis/GlobalsModRef.h"
47 #include "llvm/Analysis/LoopAccessAnalysis.h"
48 #include "llvm/Analysis/LoopPass.h"
49 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
50 #include "llvm/Analysis/ScalarEvolutionExpander.h"
51 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
52 #include "llvm/Analysis/TargetLibraryInfo.h"
53 #include "llvm/Analysis/TargetTransformInfo.h"
54 #include "llvm/Analysis/ValueTracking.h"
55 #include "llvm/IR/DataLayout.h"
56 #include "llvm/IR/Dominators.h"
57 #include "llvm/IR/IRBuilder.h"
58 #include "llvm/IR/IntrinsicInst.h"
59 #include "llvm/IR/Module.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/raw_ostream.h"
62 #include "llvm/Transforms/Scalar.h"
63 #include "llvm/Transforms/Scalar/LoopPassManager.h"
64 #include "llvm/Transforms/Utils/BuildLibCalls.h"
65 #include "llvm/Transforms/Utils/Local.h"
66 #include "llvm/Transforms/Utils/LoopUtils.h"
69 #define DEBUG_TYPE "loop-idiom"
71 STATISTIC(NumMemSet, "Number of memset's formed from loop stores");
72 STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores");
74 static cl::opt<bool> UseLIRCodeSizeHeurs(
75 "use-lir-code-size-heurs",
76 cl::desc("Use loop idiom recognition code size heuristics when compiling"
78 cl::init(true), cl::Hidden);
82 class LoopIdiomRecognize {
88 TargetLibraryInfo *TLI;
89 const TargetTransformInfo *TTI;
91 bool ApplyCodeSizeHeuristics;
94 explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT,
95 LoopInfo *LI, ScalarEvolution *SE,
96 TargetLibraryInfo *TLI,
97 const TargetTransformInfo *TTI,
99 : CurLoop(nullptr), AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI),
102 bool runOnLoop(Loop *L);
105 typedef SmallVector<StoreInst *, 8> StoreList;
106 typedef MapVector<Value *, StoreList> StoreListMap;
107 StoreListMap StoreRefsForMemset;
108 StoreListMap StoreRefsForMemsetPattern;
109 StoreList StoreRefsForMemcpy;
111 bool HasMemsetPattern;
114 /// \name Countable Loop Idiom Handling
117 bool runOnCountableLoop();
118 bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
119 SmallVectorImpl<BasicBlock *> &ExitBlocks);
121 void collectStores(BasicBlock *BB);
122 bool isLegalStore(StoreInst *SI, bool &ForMemset, bool &ForMemsetPattern,
124 bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount,
126 bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount);
128 bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
129 unsigned StoreAlignment, Value *StoredVal,
130 Instruction *TheStore,
131 SmallPtrSetImpl<Instruction *> &Stores,
132 const SCEVAddRecExpr *Ev, const SCEV *BECount,
133 bool NegStride, bool IsLoopMemset = false);
134 bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount);
135 bool avoidLIRForMultiBlockLoop(bool IsMemset = false,
136 bool IsLoopMemset = false);
139 /// \name Noncountable Loop Idiom Handling
142 bool runOnNoncountableLoop();
144 bool recognizePopcount();
145 void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst,
146 PHINode *CntPhi, Value *Var);
151 class LoopIdiomRecognizeLegacyPass : public LoopPass {
154 explicit LoopIdiomRecognizeLegacyPass() : LoopPass(ID) {
155 initializeLoopIdiomRecognizeLegacyPassPass(
156 *PassRegistry::getPassRegistry());
159 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
163 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
164 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
165 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
166 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
167 TargetLibraryInfo *TLI =
168 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
169 const TargetTransformInfo *TTI =
170 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
171 *L->getHeader()->getParent());
172 const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout();
174 LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, DL);
175 return LIR.runOnLoop(L);
178 /// This transformation requires natural loop information & requires that
179 /// loop preheaders be inserted into the CFG.
181 void getAnalysisUsage(AnalysisUsage &AU) const override {
182 AU.addRequired<TargetLibraryInfoWrapperPass>();
183 AU.addRequired<TargetTransformInfoWrapperPass>();
184 getLoopAnalysisUsage(AU);
187 } // End anonymous namespace.
189 PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM,
190 LoopStandardAnalysisResults &AR,
192 const auto *DL = &L.getHeader()->getModule()->getDataLayout();
194 LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI, DL);
195 if (!LIR.runOnLoop(&L))
196 return PreservedAnalyses::all();
198 return getLoopPassPreservedAnalyses();
201 char LoopIdiomRecognizeLegacyPass::ID = 0;
202 INITIALIZE_PASS_BEGIN(LoopIdiomRecognizeLegacyPass, "loop-idiom",
203 "Recognize loop idioms", false, false)
204 INITIALIZE_PASS_DEPENDENCY(LoopPass)
205 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
206 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
207 INITIALIZE_PASS_END(LoopIdiomRecognizeLegacyPass, "loop-idiom",
208 "Recognize loop idioms", false, false)
210 Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognizeLegacyPass(); }
212 static void deleteDeadInstruction(Instruction *I) {
213 I->replaceAllUsesWith(UndefValue::get(I->getType()));
214 I->eraseFromParent();
217 //===----------------------------------------------------------------------===//
219 // Implementation of LoopIdiomRecognize
221 //===----------------------------------------------------------------------===//
223 bool LoopIdiomRecognize::runOnLoop(Loop *L) {
225 // If the loop could not be converted to canonical form, it must have an
226 // indirectbr in it, just give up.
227 if (!L->getLoopPreheader())
230 // Disable loop idiom recognition if the function's name is a common idiom.
231 StringRef Name = L->getHeader()->getParent()->getName();
232 if (Name == "memset" || Name == "memcpy")
235 // Determine if code size heuristics need to be applied.
236 ApplyCodeSizeHeuristics =
237 L->getHeader()->getParent()->optForSize() && UseLIRCodeSizeHeurs;
239 HasMemset = TLI->has(LibFunc_memset);
240 HasMemsetPattern = TLI->has(LibFunc_memset_pattern16);
241 HasMemcpy = TLI->has(LibFunc_memcpy);
243 if (HasMemset || HasMemsetPattern || HasMemcpy)
244 if (SE->hasLoopInvariantBackedgeTakenCount(L))
245 return runOnCountableLoop();
247 return runOnNoncountableLoop();
250 bool LoopIdiomRecognize::runOnCountableLoop() {
251 const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop);
252 assert(!isa<SCEVCouldNotCompute>(BECount) &&
253 "runOnCountableLoop() called on a loop without a predictable"
254 "backedge-taken count");
256 // If this loop executes exactly one time, then it should be peeled, not
257 // optimized by this pass.
258 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
259 if (BECst->getAPInt() == 0)
262 SmallVector<BasicBlock *, 8> ExitBlocks;
263 CurLoop->getUniqueExitBlocks(ExitBlocks);
265 DEBUG(dbgs() << "loop-idiom Scanning: F["
266 << CurLoop->getHeader()->getParent()->getName() << "] Loop %"
267 << CurLoop->getHeader()->getName() << "\n");
269 bool MadeChange = false;
271 // The following transforms hoist stores/memsets into the loop pre-header.
272 // Give up if the loop has instructions may throw.
273 LoopSafetyInfo SafetyInfo;
274 computeLoopSafetyInfo(&SafetyInfo, CurLoop);
275 if (SafetyInfo.MayThrow)
278 // Scan all the blocks in the loop that are not in subloops.
279 for (auto *BB : CurLoop->getBlocks()) {
280 // Ignore blocks in subloops.
281 if (LI->getLoopFor(BB) != CurLoop)
284 MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks);
289 static unsigned getStoreSizeInBytes(StoreInst *SI, const DataLayout *DL) {
290 uint64_t SizeInBits = DL->getTypeSizeInBits(SI->getValueOperand()->getType());
291 assert(((SizeInBits & 7) || (SizeInBits >> 32) == 0) &&
292 "Don't overflow unsigned.");
293 return (unsigned)SizeInBits >> 3;
296 static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) {
297 const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1));
298 return ConstStride->getAPInt();
301 /// getMemSetPatternValue - If a strided store of the specified value is safe to
302 /// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should
303 /// be passed in. Otherwise, return null.
305 /// Note that we don't ever attempt to use memset_pattern8 or 4, because these
306 /// just replicate their input array and then pass on to memset_pattern16.
307 static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) {
308 // If the value isn't a constant, we can't promote it to being in a constant
309 // array. We could theoretically do a store to an alloca or something, but
310 // that doesn't seem worthwhile.
311 Constant *C = dyn_cast<Constant>(V);
315 // Only handle simple values that are a power of two bytes in size.
316 uint64_t Size = DL->getTypeSizeInBits(V->getType());
317 if (Size == 0 || (Size & 7) || (Size & (Size - 1)))
320 // Don't care enough about darwin/ppc to implement this.
321 if (DL->isBigEndian())
324 // Convert to size in bytes.
327 // TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
328 // if the top and bottom are the same (e.g. for vectors and large integers).
332 // If the constant is exactly 16 bytes, just use it.
336 // Otherwise, we'll use an array of the constants.
337 unsigned ArraySize = 16 / Size;
338 ArrayType *AT = ArrayType::get(V->getType(), ArraySize);
339 return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C));
342 bool LoopIdiomRecognize::isLegalStore(StoreInst *SI, bool &ForMemset,
343 bool &ForMemsetPattern, bool &ForMemcpy) {
344 // Don't touch volatile stores.
348 // Don't convert stores of non-integral pointer types to memsets (which stores
350 if (DL->isNonIntegralPointerType(SI->getValueOperand()->getType()))
353 // Avoid merging nontemporal stores.
354 if (SI->getMetadata(LLVMContext::MD_nontemporal))
357 Value *StoredVal = SI->getValueOperand();
358 Value *StorePtr = SI->getPointerOperand();
360 // Reject stores that are so large that they overflow an unsigned.
361 uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
362 if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
365 // See if the pointer expression is an AddRec like {base,+,1} on the current
366 // loop, which indicates a strided store. If we have something else, it's a
367 // random store we can't handle.
368 const SCEVAddRecExpr *StoreEv =
369 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
370 if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
373 // Check to see if we have a constant stride.
374 if (!isa<SCEVConstant>(StoreEv->getOperand(1)))
377 // See if the store can be turned into a memset.
379 // If the stored value is a byte-wise value (like i32 -1), then it may be
380 // turned into a memset of i8 -1, assuming that all the consecutive bytes
381 // are stored. A store of i32 0x01020304 can never be turned into a memset,
382 // but it can be turned into memset_pattern if the target supports it.
383 Value *SplatValue = isBytewiseValue(StoredVal);
384 Constant *PatternValue = nullptr;
386 // If we're allowed to form a memset, and the stored value would be
387 // acceptable for memset, use it.
388 if (HasMemset && SplatValue &&
389 // Verify that the stored value is loop invariant. If not, we can't
390 // promote the memset.
391 CurLoop->isLoopInvariant(SplatValue)) {
392 // It looks like we can use SplatValue.
395 } else if (HasMemsetPattern &&
396 // Don't create memset_pattern16s with address spaces.
397 StorePtr->getType()->getPointerAddressSpace() == 0 &&
398 (PatternValue = getMemSetPatternValue(StoredVal, DL))) {
399 // It looks like we can use PatternValue!
400 ForMemsetPattern = true;
404 // Otherwise, see if the store can be turned into a memcpy.
406 // Check to see if the stride matches the size of the store. If so, then we
407 // know that every byte is touched in the loop.
408 APInt Stride = getStoreStride(StoreEv);
409 unsigned StoreSize = getStoreSizeInBytes(SI, DL);
410 if (StoreSize != Stride && StoreSize != -Stride)
413 // The store must be feeding a non-volatile load.
414 LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand());
415 if (!LI || !LI->isSimple())
418 // See if the pointer expression is an AddRec like {base,+,1} on the current
419 // loop, which indicates a strided load. If we have something else, it's a
420 // random load we can't handle.
421 const SCEVAddRecExpr *LoadEv =
422 dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
423 if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine())
426 // The store and load must share the same stride.
427 if (StoreEv->getOperand(1) != LoadEv->getOperand(1))
430 // Success. This store can be converted into a memcpy.
434 // This store can't be transformed into a memset/memcpy.
438 void LoopIdiomRecognize::collectStores(BasicBlock *BB) {
439 StoreRefsForMemset.clear();
440 StoreRefsForMemsetPattern.clear();
441 StoreRefsForMemcpy.clear();
442 for (Instruction &I : *BB) {
443 StoreInst *SI = dyn_cast<StoreInst>(&I);
447 bool ForMemset = false;
448 bool ForMemsetPattern = false;
449 bool ForMemcpy = false;
450 // Make sure this is a strided store with a constant stride.
451 if (!isLegalStore(SI, ForMemset, ForMemsetPattern, ForMemcpy))
454 // Save the store locations.
456 // Find the base pointer.
457 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
458 StoreRefsForMemset[Ptr].push_back(SI);
459 } else if (ForMemsetPattern) {
460 // Find the base pointer.
461 Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
462 StoreRefsForMemsetPattern[Ptr].push_back(SI);
463 } else if (ForMemcpy)
464 StoreRefsForMemcpy.push_back(SI);
468 /// runOnLoopBlock - Process the specified block, which lives in a counted loop
469 /// with the specified backedge count. This block is known to be in the current
470 /// loop and not in any subloops.
471 bool LoopIdiomRecognize::runOnLoopBlock(
472 BasicBlock *BB, const SCEV *BECount,
473 SmallVectorImpl<BasicBlock *> &ExitBlocks) {
474 // We can only promote stores in this block if they are unconditionally
475 // executed in the loop. For a block to be unconditionally executed, it has
476 // to dominate all the exit blocks of the loop. Verify this now.
477 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
478 if (!DT->dominates(BB, ExitBlocks[i]))
481 bool MadeChange = false;
482 // Look for store instructions, which may be optimized to memset/memcpy.
485 // Look for a single store or sets of stores with a common base, which can be
486 // optimized into a memset (memset_pattern). The latter most commonly happens
487 // with structs and handunrolled loops.
488 for (auto &SL : StoreRefsForMemset)
489 MadeChange |= processLoopStores(SL.second, BECount, true);
491 for (auto &SL : StoreRefsForMemsetPattern)
492 MadeChange |= processLoopStores(SL.second, BECount, false);
494 // Optimize the store into a memcpy, if it feeds an similarly strided load.
495 for (auto &SI : StoreRefsForMemcpy)
496 MadeChange |= processLoopStoreOfLoopLoad(SI, BECount);
498 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
499 Instruction *Inst = &*I++;
500 // Look for memset instructions, which may be optimized to a larger memset.
501 if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
503 if (!processLoopMemSet(MSI, BECount))
507 // If processing the memset invalidated our iterator, start over from the
518 /// processLoopStores - See if this store(s) can be promoted to a memset.
519 bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL,
522 // Try to find consecutive stores that can be transformed into memsets.
523 SetVector<StoreInst *> Heads, Tails;
524 SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
526 // Do a quadratic search on all of the given stores and find
527 // all of the pairs of stores that follow each other.
528 SmallVector<unsigned, 16> IndexQueue;
529 for (unsigned i = 0, e = SL.size(); i < e; ++i) {
530 assert(SL[i]->isSimple() && "Expected only non-volatile stores.");
532 Value *FirstStoredVal = SL[i]->getValueOperand();
533 Value *FirstStorePtr = SL[i]->getPointerOperand();
534 const SCEVAddRecExpr *FirstStoreEv =
535 cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr));
536 APInt FirstStride = getStoreStride(FirstStoreEv);
537 unsigned FirstStoreSize = getStoreSizeInBytes(SL[i], DL);
539 // See if we can optimize just this store in isolation.
540 if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) {
545 Value *FirstSplatValue = nullptr;
546 Constant *FirstPatternValue = nullptr;
549 FirstSplatValue = isBytewiseValue(FirstStoredVal);
551 FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL);
553 assert((FirstSplatValue || FirstPatternValue) &&
554 "Expected either splat value or pattern value.");
557 // If a store has multiple consecutive store candidates, search Stores
558 // array according to the sequence: from i+1 to e, then from i-1 to 0.
559 // This is because usually pairing with immediate succeeding or preceding
560 // candidate create the best chance to find memset opportunity.
562 for (j = i + 1; j < e; ++j)
563 IndexQueue.push_back(j);
564 for (j = i; j > 0; --j)
565 IndexQueue.push_back(j - 1);
567 for (auto &k : IndexQueue) {
568 assert(SL[k]->isSimple() && "Expected only non-volatile stores.");
569 Value *SecondStorePtr = SL[k]->getPointerOperand();
570 const SCEVAddRecExpr *SecondStoreEv =
571 cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr));
572 APInt SecondStride = getStoreStride(SecondStoreEv);
574 if (FirstStride != SecondStride)
577 Value *SecondStoredVal = SL[k]->getValueOperand();
578 Value *SecondSplatValue = nullptr;
579 Constant *SecondPatternValue = nullptr;
582 SecondSplatValue = isBytewiseValue(SecondStoredVal);
584 SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL);
586 assert((SecondSplatValue || SecondPatternValue) &&
587 "Expected either splat value or pattern value.");
589 if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) {
591 if (FirstSplatValue != SecondSplatValue)
594 if (FirstPatternValue != SecondPatternValue)
599 ConsecutiveChain[SL[i]] = SL[k];
605 // We may run into multiple chains that merge into a single chain. We mark the
606 // stores that we transformed so that we don't visit the same store twice.
607 SmallPtrSet<Value *, 16> TransformedStores;
608 bool Changed = false;
610 // For stores that start but don't end a link in the chain:
611 for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end();
613 if (Tails.count(*it))
616 // We found a store instr that starts a chain. Now follow the chain and try
618 SmallPtrSet<Instruction *, 8> AdjacentStores;
621 StoreInst *HeadStore = I;
622 unsigned StoreSize = 0;
624 // Collect the chain into a list.
625 while (Tails.count(I) || Heads.count(I)) {
626 if (TransformedStores.count(I))
628 AdjacentStores.insert(I);
630 StoreSize += getStoreSizeInBytes(I, DL);
631 // Move to the next value in the chain.
632 I = ConsecutiveChain[I];
635 Value *StoredVal = HeadStore->getValueOperand();
636 Value *StorePtr = HeadStore->getPointerOperand();
637 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
638 APInt Stride = getStoreStride(StoreEv);
640 // Check to see if the stride matches the size of the stores. If so, then
641 // we know that every byte is touched in the loop.
642 if (StoreSize != Stride && StoreSize != -Stride)
645 bool NegStride = StoreSize == -Stride;
647 if (processLoopStridedStore(StorePtr, StoreSize, HeadStore->getAlignment(),
648 StoredVal, HeadStore, AdjacentStores, StoreEv,
649 BECount, NegStride)) {
650 TransformedStores.insert(AdjacentStores.begin(), AdjacentStores.end());
658 /// processLoopMemSet - See if this memset can be promoted to a large memset.
659 bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
660 const SCEV *BECount) {
661 // We can only handle non-volatile memsets with a constant size.
662 if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
665 // If we're not allowed to hack on memset, we fail.
669 Value *Pointer = MSI->getDest();
671 // See if the pointer expression is an AddRec like {base,+,1} on the current
672 // loop, which indicates a strided store. If we have something else, it's a
673 // random store we can't handle.
674 const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
675 if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine())
678 // Reject memsets that are so large that they overflow an unsigned.
679 uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
680 if ((SizeInBytes >> 32) != 0)
683 // Check to see if the stride matches the size of the memset. If so, then we
684 // know that every byte is touched in the loop.
685 const SCEVConstant *ConstStride = dyn_cast<SCEVConstant>(Ev->getOperand(1));
689 APInt Stride = ConstStride->getAPInt();
690 if (SizeInBytes != Stride && SizeInBytes != -Stride)
693 // Verify that the memset value is loop invariant. If not, we can't promote
695 Value *SplatValue = MSI->getValue();
696 if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue))
699 SmallPtrSet<Instruction *, 1> MSIs;
701 bool NegStride = SizeInBytes == -Stride;
702 return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
703 MSI->getAlignment(), SplatValue, MSI, MSIs, Ev,
704 BECount, NegStride, /*IsLoopMemset=*/true);
707 /// mayLoopAccessLocation - Return true if the specified loop might access the
708 /// specified pointer location, which is a loop-strided access. The 'Access'
709 /// argument specifies what the verboten forms of access are (read or write).
711 mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
712 const SCEV *BECount, unsigned StoreSize,
714 SmallPtrSetImpl<Instruction *> &IgnoredStores) {
715 // Get the location that may be stored across the loop. Since the access is
716 // strided positively through memory, we say that the modified location starts
717 // at the pointer and has infinite size.
718 uint64_t AccessSize = MemoryLocation::UnknownSize;
720 // If the loop iterates a fixed number of times, we can refine the access size
721 // to be exactly the size of the memset, which is (BECount+1)*StoreSize
722 if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
723 AccessSize = (BECst->getValue()->getZExtValue() + 1) * StoreSize;
725 // TODO: For this to be really effective, we have to dive into the pointer
726 // operand in the store. Store to &A[i] of 100 will always return may alias
727 // with store of &A[100], we need to StoreLoc to be "A" with size of 100,
728 // which will then no-alias a store to &A[100].
729 MemoryLocation StoreLoc(Ptr, AccessSize);
731 for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
733 for (Instruction &I : **BI)
734 if (IgnoredStores.count(&I) == 0 &&
735 (AA.getModRefInfo(&I, StoreLoc) & Access))
741 // If we have a negative stride, Start refers to the end of the memory location
742 // we're trying to memset. Therefore, we need to recompute the base pointer,
743 // which is just Start - BECount*Size.
744 static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount,
745 Type *IntPtr, unsigned StoreSize,
746 ScalarEvolution *SE) {
747 const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr);
749 Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize),
751 return SE->getMinusSCEV(Start, Index);
754 /// processLoopStridedStore - We see a strided store of some value. If we can
755 /// transform this into a memset or memset_pattern in the loop preheader, do so.
756 bool LoopIdiomRecognize::processLoopStridedStore(
757 Value *DestPtr, unsigned StoreSize, unsigned StoreAlignment,
758 Value *StoredVal, Instruction *TheStore,
759 SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev,
760 const SCEV *BECount, bool NegStride, bool IsLoopMemset) {
761 Value *SplatValue = isBytewiseValue(StoredVal);
762 Constant *PatternValue = nullptr;
765 PatternValue = getMemSetPatternValue(StoredVal, DL);
767 assert((SplatValue || PatternValue) &&
768 "Expected either splat value or pattern value.");
770 // The trip count of the loop and the base pointer of the addrec SCEV is
771 // guaranteed to be loop invariant, which means that it should dominate the
772 // header. This allows us to insert code for it in the preheader.
773 unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
774 BasicBlock *Preheader = CurLoop->getLoopPreheader();
775 IRBuilder<> Builder(Preheader->getTerminator());
776 SCEVExpander Expander(*SE, *DL, "loop-idiom");
778 Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
779 Type *IntPtr = Builder.getIntPtrTy(*DL, DestAS);
781 const SCEV *Start = Ev->getStart();
782 // Handle negative strided loops.
784 Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE);
786 // Okay, we have a strided store "p[i]" of a splattable value. We can turn
787 // this into a memset in the loop preheader now if we want. However, this
788 // would be unsafe to do if there is anything else in the loop that may read
789 // or write to the aliased location. Check for any overlap by generating the
790 // base pointer and checking the region.
792 Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator());
793 if (mayLoopAccessLocation(BasePtr, MRI_ModRef, CurLoop, BECount, StoreSize,
796 // If we generated new code for the base pointer, clean up.
797 RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI);
801 if (avoidLIRForMultiBlockLoop(/*IsMemset=*/true, IsLoopMemset))
804 // Okay, everything looks good, insert the memset.
806 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
807 // pointer size if it isn't already.
808 BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
810 const SCEV *NumBytesS =
811 SE->getAddExpr(BECount, SE->getOne(IntPtr), SCEV::FlagNUW);
812 if (StoreSize != 1) {
813 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
818 Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
823 Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, StoreAlignment);
825 // Everything is emitted in default address space
826 Type *Int8PtrTy = DestInt8PtrTy;
828 Module *M = TheStore->getModule();
830 M->getOrInsertFunction("memset_pattern16", Builder.getVoidTy(),
831 Int8PtrTy, Int8PtrTy, IntPtr);
832 inferLibFuncAttributes(*M->getFunction("memset_pattern16"), *TLI);
834 // Otherwise we should form a memset_pattern16. PatternValue is known to be
835 // an constant array of 16-bytes. Plop the value into a mergable global.
836 GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true,
837 GlobalValue::PrivateLinkage,
838 PatternValue, ".memset_pattern");
839 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Ok to merge these.
840 GV->setAlignment(16);
841 Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy);
842 NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes});
845 DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"
846 << " from store to: " << *Ev << " at: " << *TheStore << "\n");
847 NewCall->setDebugLoc(TheStore->getDebugLoc());
849 // Okay, the memset has been formed. Zap the original store and anything that
851 for (auto *I : Stores)
852 deleteDeadInstruction(I);
857 /// If the stored value is a strided load in the same loop with the same stride
858 /// this may be transformable into a memcpy. This kicks in for stuff like
859 /// for (i) A[i] = B[i];
860 bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
861 const SCEV *BECount) {
862 assert(SI->isSimple() && "Expected only non-volatile stores.");
864 Value *StorePtr = SI->getPointerOperand();
865 const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
866 APInt Stride = getStoreStride(StoreEv);
867 unsigned StoreSize = getStoreSizeInBytes(SI, DL);
868 bool NegStride = StoreSize == -Stride;
870 // The store must be feeding a non-volatile load.
871 LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
872 assert(LI->isSimple() && "Expected only non-volatile stores.");
874 // See if the pointer expression is an AddRec like {base,+,1} on the current
875 // loop, which indicates a strided load. If we have something else, it's a
876 // random load we can't handle.
877 const SCEVAddRecExpr *LoadEv =
878 cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
880 // The trip count of the loop and the base pointer of the addrec SCEV is
881 // guaranteed to be loop invariant, which means that it should dominate the
882 // header. This allows us to insert code for it in the preheader.
883 BasicBlock *Preheader = CurLoop->getLoopPreheader();
884 IRBuilder<> Builder(Preheader->getTerminator());
885 SCEVExpander Expander(*SE, *DL, "loop-idiom");
887 const SCEV *StrStart = StoreEv->getStart();
888 unsigned StrAS = SI->getPointerAddressSpace();
889 Type *IntPtrTy = Builder.getIntPtrTy(*DL, StrAS);
891 // Handle negative strided loops.
893 StrStart = getStartForNegStride(StrStart, BECount, IntPtrTy, StoreSize, SE);
895 // Okay, we have a strided store "p[i]" of a loaded value. We can turn
896 // this into a memcpy in the loop preheader now if we want. However, this
897 // would be unsafe to do if there is anything else in the loop that may read
898 // or write the memory region we're storing to. This includes the load that
899 // feeds the stores. Check for an alias by generating the base address and
900 // checking everything.
901 Value *StoreBasePtr = Expander.expandCodeFor(
902 StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator());
904 SmallPtrSet<Instruction *, 1> Stores;
906 if (mayLoopAccessLocation(StoreBasePtr, MRI_ModRef, CurLoop, BECount,
907 StoreSize, *AA, Stores)) {
909 // If we generated new code for the base pointer, clean up.
910 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
914 const SCEV *LdStart = LoadEv->getStart();
915 unsigned LdAS = LI->getPointerAddressSpace();
917 // Handle negative strided loops.
919 LdStart = getStartForNegStride(LdStart, BECount, IntPtrTy, StoreSize, SE);
921 // For a memcpy, we have to make sure that the input array is not being
922 // mutated by the loop.
923 Value *LoadBasePtr = Expander.expandCodeFor(
924 LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator());
926 if (mayLoopAccessLocation(LoadBasePtr, MRI_Mod, CurLoop, BECount, StoreSize,
929 // If we generated new code for the base pointer, clean up.
930 RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI);
931 RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
935 if (avoidLIRForMultiBlockLoop())
938 // Okay, everything is safe, we can transform this!
940 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
941 // pointer size if it isn't already.
942 BECount = SE->getTruncateOrZeroExtend(BECount, IntPtrTy);
944 const SCEV *NumBytesS =
945 SE->getAddExpr(BECount, SE->getOne(IntPtrTy), SCEV::FlagNUW);
947 NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtrTy, StoreSize),
951 Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator());
954 Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes,
955 std::min(SI->getAlignment(), LI->getAlignment()));
956 NewCall->setDebugLoc(SI->getDebugLoc());
958 DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n"
959 << " from load ptr=" << *LoadEv << " at: " << *LI << "\n"
960 << " from store ptr=" << *StoreEv << " at: " << *SI << "\n");
962 // Okay, the memcpy has been formed. Zap the original store and anything that
964 deleteDeadInstruction(SI);
969 // When compiling for codesize we avoid idiom recognition for a multi-block loop
970 // unless it is a loop_memset idiom or a memset/memcpy idiom in a nested loop.
972 bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset,
974 if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) {
975 if (!CurLoop->getParentLoop() && (!IsMemset || !IsLoopMemset)) {
976 DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName()
977 << " : LIR " << (IsMemset ? "Memset" : "Memcpy")
978 << " avoided: multi-block top-level loop\n");
986 bool LoopIdiomRecognize::runOnNoncountableLoop() {
987 return recognizePopcount();
990 /// Check if the given conditional branch is based on the comparison between
991 /// a variable and zero, and if the variable is non-zero, the control yields to
992 /// the loop entry. If the branch matches the behavior, the variable involved
993 /// in the comparison is returned. This function will be called to see if the
994 /// precondition and postcondition of the loop are in desirable form.
995 static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry) {
996 if (!BI || !BI->isConditional())
999 ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition());
1003 ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1));
1004 if (!CmpZero || !CmpZero->isZero())
1007 ICmpInst::Predicate Pred = Cond->getPredicate();
1008 if ((Pred == ICmpInst::ICMP_NE && BI->getSuccessor(0) == LoopEntry) ||
1009 (Pred == ICmpInst::ICMP_EQ && BI->getSuccessor(1) == LoopEntry))
1010 return Cond->getOperand(0);
1015 /// Return true iff the idiom is detected in the loop.
1018 /// 1) \p CntInst is set to the instruction counting the population bit.
1019 /// 2) \p CntPhi is set to the corresponding phi node.
1020 /// 3) \p Var is set to the value whose population bits are being counted.
1022 /// The core idiom we are trying to detect is:
1025 /// goto loop-exit // the precondition of the loop
1026 /// cnt0 = init-val;
1028 /// x1 = phi (x0, x2);
1029 /// cnt1 = phi(cnt0, cnt2);
1031 /// cnt2 = cnt1 + 1;
1033 /// x2 = x1 & (x1 - 1);
1035 /// } while(x != 0);
1039 static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
1040 Instruction *&CntInst, PHINode *&CntPhi,
1042 // step 1: Check to see if the look-back branch match this pattern:
1043 // "if (a!=0) goto loop-entry".
1044 BasicBlock *LoopEntry;
1045 Instruction *DefX2, *CountInst;
1046 Value *VarX1, *VarX0;
1047 PHINode *PhiX, *CountPhi;
1049 DefX2 = CountInst = nullptr;
1050 VarX1 = VarX0 = nullptr;
1051 PhiX = CountPhi = nullptr;
1052 LoopEntry = *(CurLoop->block_begin());
1054 // step 1: Check if the loop-back branch is in desirable form.
1056 if (Value *T = matchCondition(
1057 dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
1058 DefX2 = dyn_cast<Instruction>(T);
1063 // step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)"
1065 if (!DefX2 || DefX2->getOpcode() != Instruction::And)
1068 BinaryOperator *SubOneOp;
1070 if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0))))
1071 VarX1 = DefX2->getOperand(1);
1073 VarX1 = DefX2->getOperand(0);
1074 SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1));
1079 Instruction *SubInst = cast<Instruction>(SubOneOp);
1080 ConstantInt *Dec = dyn_cast<ConstantInt>(SubInst->getOperand(1));
1082 !((SubInst->getOpcode() == Instruction::Sub && Dec->isOne()) ||
1083 (SubInst->getOpcode() == Instruction::Add &&
1084 Dec->isAllOnesValue()))) {
1089 // step 3: Check the recurrence of variable X
1091 PhiX = dyn_cast<PHINode>(VarX1);
1093 (PhiX->getOperand(0) != DefX2 && PhiX->getOperand(1) != DefX2)) {
1098 // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
1100 CountInst = nullptr;
1101 for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
1102 IterE = LoopEntry->end();
1103 Iter != IterE; Iter++) {
1104 Instruction *Inst = &*Iter;
1105 if (Inst->getOpcode() != Instruction::Add)
1108 ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
1109 if (!Inc || !Inc->isOne())
1112 PHINode *Phi = dyn_cast<PHINode>(Inst->getOperand(0));
1113 if (!Phi || Phi->getParent() != LoopEntry)
1116 // Check if the result of the instruction is live of the loop.
1117 bool LiveOutLoop = false;
1118 for (User *U : Inst->users()) {
1119 if ((cast<Instruction>(U))->getParent() != LoopEntry) {
1136 // step 5: check if the precondition is in this form:
1137 // "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;"
1139 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1140 Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader());
1141 if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1))
1144 CntInst = CountInst;
1152 /// Recognizes a population count idiom in a non-countable loop.
1154 /// If detected, transforms the relevant code to issue the popcount intrinsic
1155 /// function call, and returns true; otherwise, returns false.
1156 bool LoopIdiomRecognize::recognizePopcount() {
1157 if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware)
1160 // Counting population are usually conducted by few arithmetic instructions.
1161 // Such instructions can be easily "absorbed" by vacant slots in a
1162 // non-compact loop. Therefore, recognizing popcount idiom only makes sense
1163 // in a compact loop.
1165 // Give up if the loop has multiple blocks or multiple backedges.
1166 if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
1169 BasicBlock *LoopBody = *(CurLoop->block_begin());
1170 if (LoopBody->size() >= 20) {
1171 // The loop is too big, bail out.
1175 // It should have a preheader containing nothing but an unconditional branch.
1176 BasicBlock *PH = CurLoop->getLoopPreheader();
1177 if (!PH || &PH->front() != PH->getTerminator())
1179 auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator());
1180 if (!EntryBI || EntryBI->isConditional())
1183 // It should have a precondition block where the generated popcount instrinsic
1184 // function can be inserted.
1185 auto *PreCondBB = PH->getSinglePredecessor();
1188 auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1189 if (!PreCondBI || PreCondBI->isUnconditional())
1192 Instruction *CntInst;
1195 if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val))
1198 transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val);
1202 static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
1203 const DebugLoc &DL) {
1204 Value *Ops[] = {Val};
1205 Type *Tys[] = {Val->getType()};
1207 Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
1208 Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys);
1209 CallInst *CI = IRBuilder.CreateCall(Func, Ops);
1210 CI->setDebugLoc(DL);
1215 void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB,
1216 Instruction *CntInst,
1217 PHINode *CntPhi, Value *Var) {
1218 BasicBlock *PreHead = CurLoop->getLoopPreheader();
1219 auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
1220 const DebugLoc DL = CntInst->getDebugLoc();
1222 // Assuming before transformation, the loop is following:
1223 // if (x) // the precondition
1224 // do { cnt++; x &= x - 1; } while(x);
1226 // Step 1: Insert the ctpop instruction at the end of the precondition block
1227 IRBuilder<> Builder(PreCondBr);
1228 Value *PopCnt, *PopCntZext, *NewCount, *TripCnt;
1230 PopCnt = createPopcntIntrinsic(Builder, Var, DL);
1231 NewCount = PopCntZext =
1232 Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType()));
1234 if (NewCount != PopCnt)
1235 (cast<Instruction>(NewCount))->setDebugLoc(DL);
1237 // TripCnt is exactly the number of iterations the loop has
1240 // If the population counter's initial value is not zero, insert Add Inst.
1241 Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
1242 ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
1243 if (!InitConst || !InitConst->isZero()) {
1244 NewCount = Builder.CreateAdd(NewCount, CntInitVal);
1245 (cast<Instruction>(NewCount))->setDebugLoc(DL);
1249 // Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to
1250 // "if (NewCount == 0) loop-exit". Without this change, the intrinsic
1251 // function would be partial dead code, and downstream passes will drag
1252 // it back from the precondition block to the preheader.
1254 ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition());
1256 Value *Opnd0 = PopCntZext;
1257 Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0);
1258 if (PreCond->getOperand(0) != Var)
1259 std::swap(Opnd0, Opnd1);
1261 ICmpInst *NewPreCond = cast<ICmpInst>(
1262 Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1));
1263 PreCondBr->setCondition(NewPreCond);
1265 RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI);
1268 // Step 3: Note that the population count is exactly the trip count of the
1269 // loop in question, which enable us to to convert the loop from noncountable
1270 // loop into a countable one. The benefit is twofold:
1272 // - If the loop only counts population, the entire loop becomes dead after
1273 // the transformation. It is a lot easier to prove a countable loop dead
1274 // than to prove a noncountable one. (In some C dialects, an infinite loop
1275 // isn't dead even if it computes nothing useful. In general, DCE needs
1276 // to prove a noncountable loop finite before safely delete it.)
1278 // - If the loop also performs something else, it remains alive.
1279 // Since it is transformed to countable form, it can be aggressively
1280 // optimized by some optimizations which are in general not applicable
1281 // to a noncountable loop.
1283 // After this step, this loop (conceptually) would look like following:
1284 // newcnt = __builtin_ctpop(x);
1287 // do { cnt++; x &= x-1; t--) } while (t > 0);
1288 BasicBlock *Body = *(CurLoop->block_begin());
1290 auto *LbBr = dyn_cast<BranchInst>(Body->getTerminator());
1291 ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
1292 Type *Ty = TripCnt->getType();
1294 PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
1296 Builder.SetInsertPoint(LbCond);
1297 Instruction *TcDec = cast<Instruction>(
1298 Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
1299 "tcdec", false, true));
1301 TcPhi->addIncoming(TripCnt, PreHead);
1302 TcPhi->addIncoming(TcDec, Body);
1304 CmpInst::Predicate Pred =
1305 (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE;
1306 LbCond->setPredicate(Pred);
1307 LbCond->setOperand(0, TcDec);
1308 LbCond->setOperand(1, ConstantInt::get(Ty, 0));
1311 // Step 4: All the references to the original population counter outside
1312 // the loop are replaced with the NewCount -- the value returned from
1313 // __builtin_ctpop().
1314 CntInst->replaceUsesOutsideBlock(NewCount, Body);
1316 // step 5: Forget the "non-computable" trip-count SCEV associated with the
1317 // loop. The loop would otherwise not be deleted even if it becomes empty.
1318 SE->forgetLoop(CurLoop);