1 //===- NaryReassociate.cpp - Reassociate n-ary expressions ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass reassociates n-ary add expressions and eliminates the redundancy
11 // exposed by the reassociation.
13 // A motivating example:
15 // void foo(int a, int b) {
20 // An ideal compiler should reassociate (a + 2) + b to (a + b) + 2 and simplify
27 // However, the Reassociate pass is unable to do that because it processes each
28 // instruction individually and believes (a + 2) + b is the best form according
29 // to its rank system.
31 // To address this limitation, NaryReassociate reassociates an expression in a
32 // form that reuses existing instructions. As a result, NaryReassociate can
33 // reassociate (a + 2) + b in the example to (a + b) + 2 because it detects that
34 // (a + b) is computed before.
36 // NaryReassociate works as follows. For every instruction in the form of (a +
37 // b) + c, it checks whether a + c or b + c is already computed by a dominating
38 // instruction. If so, it then reassociates (a + b) + c into (a + c) + b or (b +
39 // c) + a and removes the redundancy accordingly. To efficiently look up whether
40 // an expression is computed before, we store each instruction seen and its SCEV
41 // into an SCEV-to-instruction map.
43 // Although the algorithm pattern-matches only ternary additions, it
44 // automatically handles many >3-ary expressions by walking through the function
45 // in the depth-first order. For example, given
50 // NaryReassociate first rewrites (a + b) + c to (a + c) + b, and then rewrites
51 // ((a + c) + b) + d into ((a + c) + d) + b.
53 // Finally, the above dominator-based algorithm may need to be run multiple
54 // iterations before emitting optimal code. One source of this need is that we
55 // only split an operand when it is used only once. The above algorithm can
56 // eliminate an instruction and decrease the usage count of its operands. As a
57 // result, an instruction that previously had multiple uses may become a
58 // single-use instruction and thus eligible for split consideration. For
67 // In the first iteration, we cannot reassociate abc to ac+b because ab is used
68 // twice. However, we can reassociate ab2c to abc+b in the first iteration. As a
69 // result, ab2 becomes dead and ab will be used only once in the second
72 // Limitations and TODO items:
74 // 1) We only considers n-ary adds and muls for now. This should be extended
77 //===----------------------------------------------------------------------===//
79 #include "llvm/Transforms/Scalar/NaryReassociate.h"
80 #include "llvm/Analysis/ValueTracking.h"
81 #include "llvm/IR/Module.h"
82 #include "llvm/IR/PatternMatch.h"
83 #include "llvm/Support/Debug.h"
84 #include "llvm/Support/raw_ostream.h"
85 #include "llvm/Transforms/Scalar.h"
86 #include "llvm/Transforms/Utils/Local.h"
88 using namespace PatternMatch;
90 #define DEBUG_TYPE "nary-reassociate"
93 class NaryReassociateLegacyPass : public FunctionPass {
97 NaryReassociateLegacyPass() : FunctionPass(ID) {
98 initializeNaryReassociateLegacyPassPass(*PassRegistry::getPassRegistry());
101 bool doInitialization(Module &M) override {
104 bool runOnFunction(Function &F) override;
106 void getAnalysisUsage(AnalysisUsage &AU) const override {
107 AU.addPreserved<DominatorTreeWrapperPass>();
108 AU.addPreserved<ScalarEvolutionWrapperPass>();
109 AU.addPreserved<TargetLibraryInfoWrapperPass>();
110 AU.addRequired<AssumptionCacheTracker>();
111 AU.addRequired<DominatorTreeWrapperPass>();
112 AU.addRequired<ScalarEvolutionWrapperPass>();
113 AU.addRequired<TargetLibraryInfoWrapperPass>();
114 AU.addRequired<TargetTransformInfoWrapperPass>();
115 AU.setPreservesCFG();
119 NaryReassociatePass Impl;
121 } // anonymous namespace
123 char NaryReassociateLegacyPass::ID = 0;
124 INITIALIZE_PASS_BEGIN(NaryReassociateLegacyPass, "nary-reassociate",
125 "Nary reassociation", false, false)
126 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
127 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
128 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
129 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
130 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
131 INITIALIZE_PASS_END(NaryReassociateLegacyPass, "nary-reassociate",
132 "Nary reassociation", false, false)
134 FunctionPass *llvm::createNaryReassociatePass() {
135 return new NaryReassociateLegacyPass();
138 bool NaryReassociateLegacyPass::runOnFunction(Function &F) {
142 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
143 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
144 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
145 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
146 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
148 return Impl.runImpl(F, AC, DT, SE, TLI, TTI);
151 PreservedAnalyses NaryReassociatePass::run(Function &F,
152 FunctionAnalysisManager &AM) {
153 auto *AC = &AM.getResult<AssumptionAnalysis>(F);
154 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
155 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
156 auto *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
157 auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
159 bool Changed = runImpl(F, AC, DT, SE, TLI, TTI);
161 // FIXME: We need to invalidate this to avoid PR28400. Is there a better
163 AM.invalidate<ScalarEvolutionAnalysis>(F);
166 return PreservedAnalyses::all();
168 // FIXME: This should also 'preserve the CFG'.
169 PreservedAnalyses PA;
170 PA.preserve<DominatorTreeAnalysis>();
171 PA.preserve<ScalarEvolutionAnalysis>();
172 PA.preserve<TargetLibraryAnalysis>();
176 bool NaryReassociatePass::runImpl(Function &F, AssumptionCache *AC_,
177 DominatorTree *DT_, ScalarEvolution *SE_,
178 TargetLibraryInfo *TLI_,
179 TargetTransformInfo *TTI_) {
185 DL = &F.getParent()->getDataLayout();
187 bool Changed = false, ChangedInThisIteration;
189 ChangedInThisIteration = doOneIteration(F);
190 Changed |= ChangedInThisIteration;
191 } while (ChangedInThisIteration);
195 // Whitelist the instruction types NaryReassociate handles for now.
196 static bool isPotentiallyNaryReassociable(Instruction *I) {
197 switch (I->getOpcode()) {
198 case Instruction::Add:
199 case Instruction::GetElementPtr:
200 case Instruction::Mul:
207 bool NaryReassociatePass::doOneIteration(Function &F) {
208 bool Changed = false;
210 // Process the basic blocks in a depth first traversal of the dominator
211 // tree. This order ensures that all bases of a candidate are in Candidates
212 // when we process it.
213 for (const auto Node : depth_first(DT)) {
214 BasicBlock *BB = Node->getBlock();
215 for (auto I = BB->begin(); I != BB->end(); ++I) {
216 if (SE->isSCEVable(I->getType()) && isPotentiallyNaryReassociable(&*I)) {
217 const SCEV *OldSCEV = SE->getSCEV(&*I);
218 if (Instruction *NewI = tryReassociate(&*I)) {
220 SE->forgetValue(&*I);
221 I->replaceAllUsesWith(NewI);
222 // If SeenExprs constains I's WeakVH, that entry will be replaced with
224 RecursivelyDeleteTriviallyDeadInstructions(&*I, TLI);
225 I = NewI->getIterator();
227 // Add the rewritten instruction to SeenExprs; the original instruction
229 const SCEV *NewSCEV = SE->getSCEV(&*I);
230 SeenExprs[NewSCEV].push_back(WeakVH(&*I));
231 // Ideally, NewSCEV should equal OldSCEV because tryReassociate(I)
232 // is equivalent to I. However, ScalarEvolution::getSCEV may
233 // weaken nsw causing NewSCEV not to equal OldSCEV. For example, suppose
235 // I = &a[sext(i +nsw j)] // assuming sizeof(a[0]) = 4
237 // NewI = &a[sext(i)] + sext(j).
239 // ScalarEvolution computes
240 // getSCEV(I) = a + 4 * sext(i + j)
241 // getSCEV(newI) = a + 4 * sext(i) + 4 * sext(j)
242 // which are different SCEVs.
244 // To alleviate this issue of ScalarEvolution not always capturing
245 // equivalence, we add I to SeenExprs[OldSCEV] as well so that we can
246 // map both SCEV before and after tryReassociate(I) to I.
248 // This improvement is exercised in @reassociate_gep_nsw in nary-gep.ll.
249 if (NewSCEV != OldSCEV)
250 SeenExprs[OldSCEV].push_back(WeakVH(&*I));
257 Instruction *NaryReassociatePass::tryReassociate(Instruction *I) {
258 switch (I->getOpcode()) {
259 case Instruction::Add:
260 case Instruction::Mul:
261 return tryReassociateBinaryOp(cast<BinaryOperator>(I));
262 case Instruction::GetElementPtr:
263 return tryReassociateGEP(cast<GetElementPtrInst>(I));
265 llvm_unreachable("should be filtered out by isPotentiallyNaryReassociable");
269 static bool isGEPFoldable(GetElementPtrInst *GEP,
270 const TargetTransformInfo *TTI) {
271 SmallVector<const Value*, 4> Indices;
272 for (auto I = GEP->idx_begin(); I != GEP->idx_end(); ++I)
273 Indices.push_back(*I);
274 return TTI->getGEPCost(GEP->getSourceElementType(), GEP->getPointerOperand(),
275 Indices) == TargetTransformInfo::TCC_Free;
278 Instruction *NaryReassociatePass::tryReassociateGEP(GetElementPtrInst *GEP) {
279 // Not worth reassociating GEP if it is foldable.
280 if (isGEPFoldable(GEP, TTI))
283 gep_type_iterator GTI = gep_type_begin(*GEP);
284 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
285 if (GTI.isSequential()) {
286 if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1,
287 GTI.getIndexedType())) {
295 bool NaryReassociatePass::requiresSignExtension(Value *Index,
296 GetElementPtrInst *GEP) {
297 unsigned PointerSizeInBits =
298 DL->getPointerSizeInBits(GEP->getType()->getPointerAddressSpace());
299 return cast<IntegerType>(Index->getType())->getBitWidth() < PointerSizeInBits;
303 NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
304 unsigned I, Type *IndexedType) {
305 Value *IndexToSplit = GEP->getOperand(I + 1);
306 if (SExtInst *SExt = dyn_cast<SExtInst>(IndexToSplit)) {
307 IndexToSplit = SExt->getOperand(0);
308 } else if (ZExtInst *ZExt = dyn_cast<ZExtInst>(IndexToSplit)) {
309 // zext can be treated as sext if the source is non-negative.
310 if (isKnownNonNegative(ZExt->getOperand(0), *DL, 0, AC, GEP, DT))
311 IndexToSplit = ZExt->getOperand(0);
314 if (AddOperator *AO = dyn_cast<AddOperator>(IndexToSplit)) {
315 // If the I-th index needs sext and the underlying add is not equipped with
316 // nsw, we cannot split the add because
317 // sext(LHS + RHS) != sext(LHS) + sext(RHS).
318 if (requiresSignExtension(IndexToSplit, GEP) &&
319 computeOverflowForSignedAdd(AO, *DL, AC, GEP, DT) !=
320 OverflowResult::NeverOverflows)
323 Value *LHS = AO->getOperand(0), *RHS = AO->getOperand(1);
324 // IndexToSplit = LHS + RHS.
325 if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I, LHS, RHS, IndexedType))
327 // Symmetrically, try IndexToSplit = RHS + LHS.
330 tryReassociateGEPAtIndex(GEP, I, RHS, LHS, IndexedType))
338 NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
339 unsigned I, Value *LHS,
340 Value *RHS, Type *IndexedType) {
341 // Look for GEP's closest dominator that has the same SCEV as GEP except that
342 // the I-th index is replaced with LHS.
343 SmallVector<const SCEV *, 4> IndexExprs;
344 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index)
345 IndexExprs.push_back(SE->getSCEV(*Index));
346 // Replace the I-th index with LHS.
347 IndexExprs[I] = SE->getSCEV(LHS);
348 if (isKnownNonNegative(LHS, *DL, 0, AC, GEP, DT) &&
349 DL->getTypeSizeInBits(LHS->getType()) <
350 DL->getTypeSizeInBits(GEP->getOperand(I)->getType())) {
351 // Zero-extend LHS if it is non-negative. InstCombine canonicalizes sext to
352 // zext if the source operand is proved non-negative. We should do that
353 // consistently so that CandidateExpr more likely appears before. See
354 // @reassociate_gep_assume for an example of this canonicalization.
356 SE->getZeroExtendExpr(IndexExprs[I], GEP->getOperand(I)->getType());
358 const SCEV *CandidateExpr = SE->getGEPExpr(cast<GEPOperator>(GEP),
361 Value *Candidate = findClosestMatchingDominator(CandidateExpr, GEP);
362 if (Candidate == nullptr)
365 IRBuilder<> Builder(GEP);
366 // Candidate does not necessarily have the same pointer type as GEP. Use
367 // bitcast or pointer cast to make sure they have the same type, so that the
368 // later RAUW doesn't complain.
369 Candidate = Builder.CreateBitOrPointerCast(Candidate, GEP->getType());
370 assert(Candidate->getType() == GEP->getType());
372 // NewGEP = (char *)Candidate + RHS * sizeof(IndexedType)
373 uint64_t IndexedSize = DL->getTypeAllocSize(IndexedType);
374 Type *ElementType = GEP->getResultElementType();
375 uint64_t ElementSize = DL->getTypeAllocSize(ElementType);
376 // Another less rare case: because I is not necessarily the last index of the
377 // GEP, the size of the type at the I-th index (IndexedSize) is not
378 // necessarily divisible by ElementSize. For example,
387 // sizeof(S) = 100 is indivisible by sizeof(int64) = 8.
389 // TODO: bail out on this case for now. We could emit uglygep.
390 if (IndexedSize % ElementSize != 0)
393 // NewGEP = &Candidate[RHS * (sizeof(IndexedType) / sizeof(Candidate[0])));
394 Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
395 if (RHS->getType() != IntPtrTy)
396 RHS = Builder.CreateSExtOrTrunc(RHS, IntPtrTy);
397 if (IndexedSize != ElementSize) {
398 RHS = Builder.CreateMul(
399 RHS, ConstantInt::get(IntPtrTy, IndexedSize / ElementSize));
401 GetElementPtrInst *NewGEP =
402 cast<GetElementPtrInst>(Builder.CreateGEP(Candidate, RHS));
403 NewGEP->setIsInBounds(GEP->isInBounds());
404 NewGEP->takeName(GEP);
408 Instruction *NaryReassociatePass::tryReassociateBinaryOp(BinaryOperator *I) {
409 Value *LHS = I->getOperand(0), *RHS = I->getOperand(1);
410 if (auto *NewI = tryReassociateBinaryOp(LHS, RHS, I))
412 if (auto *NewI = tryReassociateBinaryOp(RHS, LHS, I))
417 Instruction *NaryReassociatePass::tryReassociateBinaryOp(Value *LHS, Value *RHS,
419 Value *A = nullptr, *B = nullptr;
420 // To be conservative, we reassociate I only when it is the only user of (A op
422 if (LHS->hasOneUse() && matchTernaryOp(I, LHS, A, B)) {
423 // I = (A op B) op RHS
424 // = (A op RHS) op B or (B op RHS) op A
425 const SCEV *AExpr = SE->getSCEV(A), *BExpr = SE->getSCEV(B);
426 const SCEV *RHSExpr = SE->getSCEV(RHS);
427 if (BExpr != RHSExpr) {
429 tryReassociatedBinaryOp(getBinarySCEV(I, AExpr, RHSExpr), B, I))
432 if (AExpr != RHSExpr) {
434 tryReassociatedBinaryOp(getBinarySCEV(I, BExpr, RHSExpr), A, I))
441 Instruction *NaryReassociatePass::tryReassociatedBinaryOp(const SCEV *LHSExpr,
444 // Look for the closest dominator LHS of I that computes LHSExpr, and replace
445 // I with LHS op RHS.
446 auto *LHS = findClosestMatchingDominator(LHSExpr, I);
450 Instruction *NewI = nullptr;
451 switch (I->getOpcode()) {
452 case Instruction::Add:
453 NewI = BinaryOperator::CreateAdd(LHS, RHS, "", I);
455 case Instruction::Mul:
456 NewI = BinaryOperator::CreateMul(LHS, RHS, "", I);
459 llvm_unreachable("Unexpected instruction.");
465 bool NaryReassociatePass::matchTernaryOp(BinaryOperator *I, Value *V,
466 Value *&Op1, Value *&Op2) {
467 switch (I->getOpcode()) {
468 case Instruction::Add:
469 return match(V, m_Add(m_Value(Op1), m_Value(Op2)));
470 case Instruction::Mul:
471 return match(V, m_Mul(m_Value(Op1), m_Value(Op2)));
473 llvm_unreachable("Unexpected instruction.");
478 const SCEV *NaryReassociatePass::getBinarySCEV(BinaryOperator *I,
481 switch (I->getOpcode()) {
482 case Instruction::Add:
483 return SE->getAddExpr(LHS, RHS);
484 case Instruction::Mul:
485 return SE->getMulExpr(LHS, RHS);
487 llvm_unreachable("Unexpected instruction.");
493 NaryReassociatePass::findClosestMatchingDominator(const SCEV *CandidateExpr,
494 Instruction *Dominatee) {
495 auto Pos = SeenExprs.find(CandidateExpr);
496 if (Pos == SeenExprs.end())
499 auto &Candidates = Pos->second;
500 // Because we process the basic blocks in pre-order of the dominator tree, a
501 // candidate that doesn't dominate the current instruction won't dominate any
502 // future instruction either. Therefore, we pop it out of the stack. This
503 // optimization makes the algorithm O(n).
504 while (!Candidates.empty()) {
505 // Candidates stores WeakVHs, so a candidate can be nullptr if it's removed
507 if (Value *Candidate = Candidates.back()) {
508 Instruction *CandidateInstruction = cast<Instruction>(Candidate);
509 if (DT->dominates(CandidateInstruction, Dominatee))
510 return CandidateInstruction;
512 Candidates.pop_back();