1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
12 /// Reference Counting and is a system for managing reference counts for objects
15 /// The optimizations performed include elimination of redundant, partially
16 /// redundant, and inconsequential reference count operations, elimination of
17 /// redundant weak pointer operations, and numerous minor simplifications.
19 /// WARNING: This file knows about certain library functions. It recognizes them
20 /// by name, and hardwires knowledge of their semantics.
22 /// WARNING: This file knows about how certain Objective-C library functions are
23 /// used. Naive LLVM IR transformations which would otherwise be
24 /// behavior-preserving may break these assumptions.
26 //===----------------------------------------------------------------------===//
28 #include "ARCRuntimeEntryPoints.h"
29 #include "BlotMapVector.h"
30 #include "DependencyAnalysis.h"
32 #include "ProvenanceAnalysis.h"
34 #include "llvm/ADT/DenseMap.h"
35 #include "llvm/ADT/None.h"
36 #include "llvm/ADT/STLExtras.h"
37 #include "llvm/ADT/SmallPtrSet.h"
38 #include "llvm/ADT/SmallVector.h"
39 #include "llvm/ADT/Statistic.h"
40 #include "llvm/Analysis/AliasAnalysis.h"
41 #include "llvm/Analysis/ObjCARCAliasAnalysis.h"
42 #include "llvm/Analysis/ObjCARCAnalysisUtils.h"
43 #include "llvm/Analysis/ObjCARCInstKind.h"
44 #include "llvm/IR/BasicBlock.h"
45 #include "llvm/IR/CFG.h"
46 #include "llvm/IR/CallSite.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/GlobalVariable.h"
52 #include "llvm/IR/InstIterator.h"
53 #include "llvm/IR/InstrTypes.h"
54 #include "llvm/IR/Instruction.h"
55 #include "llvm/IR/Instructions.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Type.h"
59 #include "llvm/IR/User.h"
60 #include "llvm/IR/Value.h"
61 #include "llvm/Pass.h"
62 #include "llvm/Support/Casting.h"
63 #include "llvm/Support/Compiler.h"
64 #include "llvm/Support/Debug.h"
65 #include "llvm/Support/ErrorHandling.h"
66 #include "llvm/Support/raw_ostream.h"
72 using namespace llvm::objcarc;
74 #define DEBUG_TYPE "objc-arc-opts"
76 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
79 /// \brief This is similar to GetRCIdentityRoot but it stops as soon
80 /// as it finds a value with multiple uses.
81 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
82 // ConstantData (like ConstantPointerNull and UndefValue) is used across
83 // modules. It's never a single-use value.
84 if (isa<ConstantData>(Arg))
87 if (Arg->hasOneUse()) {
88 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
89 return FindSingleUseIdentifiedObject(BC->getOperand(0));
90 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
91 if (GEP->hasAllZeroIndices())
92 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
93 if (IsForwarding(GetBasicARCInstKind(Arg)))
94 return FindSingleUseIdentifiedObject(
95 cast<CallInst>(Arg)->getArgOperand(0));
96 if (!IsObjCIdentifiedObject(Arg))
101 // If we found an identifiable object but it has multiple uses, but they are
102 // trivial uses, we can still consider this to be a single-use value.
103 if (IsObjCIdentifiedObject(Arg)) {
104 for (const User *U : Arg->users())
105 if (!U->use_empty() || GetRCIdentityRoot(U) != Arg)
116 /// \defgroup ARCOpt ARC Optimization.
119 // TODO: On code like this:
122 // stuff_that_cannot_release()
123 // objc_autorelease(%x)
124 // stuff_that_cannot_release()
126 // stuff_that_cannot_release()
127 // objc_autorelease(%x)
129 // The second retain and autorelease can be deleted.
131 // TODO: It should be possible to delete
132 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
133 // pairs if nothing is actually autoreleased between them. Also, autorelease
134 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
135 // after inlining) can be turned into plain release calls.
137 // TODO: Critical-edge splitting. If the optimial insertion point is
138 // a critical edge, the current algorithm has to fail, because it doesn't
139 // know how to split edges. It should be possible to make the optimizer
140 // think in terms of edges, rather than blocks, and then split critical
143 // TODO: OptimizeSequences could generalized to be Interprocedural.
145 // TODO: Recognize that a bunch of other objc runtime calls have
146 // non-escaping arguments and non-releasing arguments, and may be
147 // non-autoreleasing.
149 // TODO: Sink autorelease calls as far as possible. Unfortunately we
150 // usually can't sink them past other calls, which would be the main
151 // case where it would be useful.
153 // TODO: The pointer returned from objc_loadWeakRetained is retained.
155 // TODO: Delete release+retain pairs (rare).
157 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
158 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
159 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
160 STATISTIC(NumRets, "Number of return value forwarding "
161 "retain+autoreleases eliminated");
162 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
163 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
165 STATISTIC(NumRetainsBeforeOpt,
166 "Number of retains before optimization");
167 STATISTIC(NumReleasesBeforeOpt,
168 "Number of releases before optimization");
169 STATISTIC(NumRetainsAfterOpt,
170 "Number of retains after optimization");
171 STATISTIC(NumReleasesAfterOpt,
172 "Number of releases after optimization");
177 /// \brief Per-BasicBlock state.
179 /// The number of unique control paths from the entry which can reach this
181 unsigned TopDownPathCount = 0;
183 /// The number of unique control paths to exits from this block.
184 unsigned BottomUpPathCount = 0;
186 /// The top-down traversal uses this to record information known about a
187 /// pointer at the bottom of each block.
188 BlotMapVector<const Value *, TopDownPtrState> PerPtrTopDown;
190 /// The bottom-up traversal uses this to record information known about a
191 /// pointer at the top of each block.
192 BlotMapVector<const Value *, BottomUpPtrState> PerPtrBottomUp;
194 /// Effective predecessors of the current block ignoring ignorable edges and
195 /// ignored backedges.
196 SmallVector<BasicBlock *, 2> Preds;
198 /// Effective successors of the current block ignoring ignorable edges and
199 /// ignored backedges.
200 SmallVector<BasicBlock *, 2> Succs;
203 static const unsigned OverflowOccurredValue;
207 using top_down_ptr_iterator = decltype(PerPtrTopDown)::iterator;
208 using const_top_down_ptr_iterator = decltype(PerPtrTopDown)::const_iterator;
210 top_down_ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
211 top_down_ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
212 const_top_down_ptr_iterator top_down_ptr_begin() const {
213 return PerPtrTopDown.begin();
215 const_top_down_ptr_iterator top_down_ptr_end() const {
216 return PerPtrTopDown.end();
218 bool hasTopDownPtrs() const {
219 return !PerPtrTopDown.empty();
222 using bottom_up_ptr_iterator = decltype(PerPtrBottomUp)::iterator;
223 using const_bottom_up_ptr_iterator =
224 decltype(PerPtrBottomUp)::const_iterator;
226 bottom_up_ptr_iterator bottom_up_ptr_begin() {
227 return PerPtrBottomUp.begin();
229 bottom_up_ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
230 const_bottom_up_ptr_iterator bottom_up_ptr_begin() const {
231 return PerPtrBottomUp.begin();
233 const_bottom_up_ptr_iterator bottom_up_ptr_end() const {
234 return PerPtrBottomUp.end();
236 bool hasBottomUpPtrs() const {
237 return !PerPtrBottomUp.empty();
240 /// Mark this block as being an entry block, which has one path from the
241 /// entry by definition.
242 void SetAsEntry() { TopDownPathCount = 1; }
244 /// Mark this block as being an exit block, which has one path to an exit by
246 void SetAsExit() { BottomUpPathCount = 1; }
248 /// Attempt to find the PtrState object describing the top down state for
249 /// pointer Arg. Return a new initialized PtrState describing the top down
250 /// state for Arg if we do not find one.
251 TopDownPtrState &getPtrTopDownState(const Value *Arg) {
252 return PerPtrTopDown[Arg];
255 /// Attempt to find the PtrState object describing the bottom up state for
256 /// pointer Arg. Return a new initialized PtrState describing the bottom up
257 /// state for Arg if we do not find one.
258 BottomUpPtrState &getPtrBottomUpState(const Value *Arg) {
259 return PerPtrBottomUp[Arg];
262 /// Attempt to find the PtrState object describing the bottom up state for
264 bottom_up_ptr_iterator findPtrBottomUpState(const Value *Arg) {
265 return PerPtrBottomUp.find(Arg);
268 void clearBottomUpPointers() {
269 PerPtrBottomUp.clear();
272 void clearTopDownPointers() {
273 PerPtrTopDown.clear();
276 void InitFromPred(const BBState &Other);
277 void InitFromSucc(const BBState &Other);
278 void MergePred(const BBState &Other);
279 void MergeSucc(const BBState &Other);
281 /// Compute the number of possible unique paths from an entry to an exit
282 /// which pass through this block. This is only valid after both the
283 /// top-down and bottom-up traversals are complete.
285 /// Returns true if overflow occurred. Returns false if overflow did not
287 bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
288 if (TopDownPathCount == OverflowOccurredValue ||
289 BottomUpPathCount == OverflowOccurredValue)
291 unsigned long long Product =
292 (unsigned long long)TopDownPathCount*BottomUpPathCount;
293 // Overflow occurred if any of the upper bits of Product are set or if all
294 // the lower bits of Product are all set.
295 return (Product >> 32) ||
296 ((PathCount = Product) == OverflowOccurredValue);
299 // Specialized CFG utilities.
300 using edge_iterator = SmallVectorImpl<BasicBlock *>::const_iterator;
302 edge_iterator pred_begin() const { return Preds.begin(); }
303 edge_iterator pred_end() const { return Preds.end(); }
304 edge_iterator succ_begin() const { return Succs.begin(); }
305 edge_iterator succ_end() const { return Succs.end(); }
307 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
308 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
310 bool isExit() const { return Succs.empty(); }
313 } // end anonymous namespace
315 const unsigned BBState::OverflowOccurredValue = 0xffffffff;
319 raw_ostream &operator<<(raw_ostream &OS,
320 BBState &BBState) LLVM_ATTRIBUTE_UNUSED;
322 } // end namespace llvm
324 void BBState::InitFromPred(const BBState &Other) {
325 PerPtrTopDown = Other.PerPtrTopDown;
326 TopDownPathCount = Other.TopDownPathCount;
329 void BBState::InitFromSucc(const BBState &Other) {
330 PerPtrBottomUp = Other.PerPtrBottomUp;
331 BottomUpPathCount = Other.BottomUpPathCount;
334 /// The top-down traversal uses this to merge information about predecessors to
335 /// form the initial state for a new block.
336 void BBState::MergePred(const BBState &Other) {
337 if (TopDownPathCount == OverflowOccurredValue)
340 // Other.TopDownPathCount can be 0, in which case it is either dead or a
341 // loop backedge. Loop backedges are special.
342 TopDownPathCount += Other.TopDownPathCount;
344 // In order to be consistent, we clear the top down pointers when by adding
345 // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
347 if (TopDownPathCount == OverflowOccurredValue) {
348 clearTopDownPointers();
352 // Check for overflow. If we have overflow, fall back to conservative
354 if (TopDownPathCount < Other.TopDownPathCount) {
355 TopDownPathCount = OverflowOccurredValue;
356 clearTopDownPointers();
360 // For each entry in the other set, if our set has an entry with the same key,
361 // merge the entries. Otherwise, copy the entry and merge it with an empty
363 for (auto MI = Other.top_down_ptr_begin(), ME = Other.top_down_ptr_end();
365 auto Pair = PerPtrTopDown.insert(*MI);
366 Pair.first->second.Merge(Pair.second ? TopDownPtrState() : MI->second,
370 // For each entry in our set, if the other set doesn't have an entry with the
371 // same key, force it to merge with an empty entry.
372 for (auto MI = top_down_ptr_begin(), ME = top_down_ptr_end(); MI != ME; ++MI)
373 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
374 MI->second.Merge(TopDownPtrState(), /*TopDown=*/true);
377 /// The bottom-up traversal uses this to merge information about successors to
378 /// form the initial state for a new block.
379 void BBState::MergeSucc(const BBState &Other) {
380 if (BottomUpPathCount == OverflowOccurredValue)
383 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
384 // loop backedge. Loop backedges are special.
385 BottomUpPathCount += Other.BottomUpPathCount;
387 // In order to be consistent, we clear the top down pointers when by adding
388 // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
390 if (BottomUpPathCount == OverflowOccurredValue) {
391 clearBottomUpPointers();
395 // Check for overflow. If we have overflow, fall back to conservative
397 if (BottomUpPathCount < Other.BottomUpPathCount) {
398 BottomUpPathCount = OverflowOccurredValue;
399 clearBottomUpPointers();
403 // For each entry in the other set, if our set has an entry with the
404 // same key, merge the entries. Otherwise, copy the entry and merge
405 // it with an empty entry.
406 for (auto MI = Other.bottom_up_ptr_begin(), ME = Other.bottom_up_ptr_end();
408 auto Pair = PerPtrBottomUp.insert(*MI);
409 Pair.first->second.Merge(Pair.second ? BottomUpPtrState() : MI->second,
413 // For each entry in our set, if the other set doesn't have an entry
414 // with the same key, force it to merge with an empty entry.
415 for (auto MI = bottom_up_ptr_begin(), ME = bottom_up_ptr_end(); MI != ME;
417 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
418 MI->second.Merge(BottomUpPtrState(), /*TopDown=*/false);
421 raw_ostream &llvm::operator<<(raw_ostream &OS, BBState &BBInfo) {
422 // Dump the pointers we are tracking.
423 OS << " TopDown State:\n";
424 if (!BBInfo.hasTopDownPtrs()) {
425 DEBUG(dbgs() << " NONE!\n");
427 for (auto I = BBInfo.top_down_ptr_begin(), E = BBInfo.top_down_ptr_end();
429 const PtrState &P = I->second;
430 OS << " Ptr: " << *I->first
431 << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
432 << "\n ImpreciseRelease: "
433 << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
434 << " HasCFGHazards: "
435 << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
436 << " KnownPositive: "
437 << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
439 << P.GetSeq() << "\n";
443 OS << " BottomUp State:\n";
444 if (!BBInfo.hasBottomUpPtrs()) {
445 DEBUG(dbgs() << " NONE!\n");
447 for (auto I = BBInfo.bottom_up_ptr_begin(), E = BBInfo.bottom_up_ptr_end();
449 const PtrState &P = I->second;
450 OS << " Ptr: " << *I->first
451 << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
452 << "\n ImpreciseRelease: "
453 << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
454 << " HasCFGHazards: "
455 << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
456 << " KnownPositive: "
457 << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
459 << P.GetSeq() << "\n";
468 /// \brief The main ARC optimization pass.
469 class ObjCARCOpt : public FunctionPass {
471 ProvenanceAnalysis PA;
473 /// A cache of references to runtime entry point constants.
474 ARCRuntimeEntryPoints EP;
476 /// A cache of MDKinds that can be passed into other functions to propagate
477 /// MDKind identifiers.
478 ARCMDKindCache MDKindCache;
480 /// A flag indicating whether this optimization pass should run.
483 /// Flags which determine whether each of the interesting runtime functions
484 /// is in fact used in the current function.
485 unsigned UsedInThisFunction;
487 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
488 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
490 void OptimizeIndividualCalls(Function &F);
492 void CheckForCFGHazards(const BasicBlock *BB,
493 DenseMap<const BasicBlock *, BBState> &BBStates,
494 BBState &MyStates) const;
495 bool VisitInstructionBottomUp(Instruction *Inst, BasicBlock *BB,
496 BlotMapVector<Value *, RRInfo> &Retains,
498 bool VisitBottomUp(BasicBlock *BB,
499 DenseMap<const BasicBlock *, BBState> &BBStates,
500 BlotMapVector<Value *, RRInfo> &Retains);
501 bool VisitInstructionTopDown(Instruction *Inst,
502 DenseMap<Value *, RRInfo> &Releases,
504 bool VisitTopDown(BasicBlock *BB,
505 DenseMap<const BasicBlock *, BBState> &BBStates,
506 DenseMap<Value *, RRInfo> &Releases);
507 bool Visit(Function &F, DenseMap<const BasicBlock *, BBState> &BBStates,
508 BlotMapVector<Value *, RRInfo> &Retains,
509 DenseMap<Value *, RRInfo> &Releases);
511 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
512 BlotMapVector<Value *, RRInfo> &Retains,
513 DenseMap<Value *, RRInfo> &Releases,
514 SmallVectorImpl<Instruction *> &DeadInsts, Module *M);
517 PairUpRetainsAndReleases(DenseMap<const BasicBlock *, BBState> &BBStates,
518 BlotMapVector<Value *, RRInfo> &Retains,
519 DenseMap<Value *, RRInfo> &Releases, Module *M,
520 Instruction * Retain,
521 SmallVectorImpl<Instruction *> &DeadInsts,
522 RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
523 Value *Arg, bool KnownSafe,
524 bool &AnyPairsCompletelyEliminated);
526 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
527 BlotMapVector<Value *, RRInfo> &Retains,
528 DenseMap<Value *, RRInfo> &Releases, Module *M);
530 void OptimizeWeakCalls(Function &F);
532 bool OptimizeSequences(Function &F);
534 void OptimizeReturns(Function &F);
537 void GatherStatistics(Function &F, bool AfterOptimization = false);
540 void getAnalysisUsage(AnalysisUsage &AU) const override;
541 bool doInitialization(Module &M) override;
542 bool runOnFunction(Function &F) override;
543 void releaseMemory() override;
548 ObjCARCOpt() : FunctionPass(ID) {
549 initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
553 } // end anonymous namespace
555 char ObjCARCOpt::ID = 0;
557 INITIALIZE_PASS_BEGIN(ObjCARCOpt,
558 "objc-arc", "ObjC ARC optimization", false, false)
559 INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)
560 INITIALIZE_PASS_END(ObjCARCOpt,
561 "objc-arc", "ObjC ARC optimization", false, false)
563 Pass *llvm::createObjCARCOptPass() {
564 return new ObjCARCOpt();
567 void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
568 AU.addRequired<ObjCARCAAWrapperPass>();
569 AU.addRequired<AAResultsWrapperPass>();
570 // ARC optimization doesn't currently split critical edges.
571 AU.setPreservesCFG();
574 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
575 /// not a return value. Or, if it can be paired with an
576 /// objc_autoreleaseReturnValue, delete the pair and return true.
578 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
579 // Check for the argument being from an immediately preceding call or invoke.
580 const Value *Arg = GetArgRCIdentityRoot(RetainRV);
581 ImmutableCallSite CS(Arg);
582 if (const Instruction *Call = CS.getInstruction()) {
583 if (Call->getParent() == RetainRV->getParent()) {
584 BasicBlock::const_iterator I(Call);
586 while (IsNoopInstruction(&*I))
590 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
591 BasicBlock *RetainRVParent = RetainRV->getParent();
592 if (II->getNormalDest() == RetainRVParent) {
593 BasicBlock::const_iterator I = RetainRVParent->begin();
594 while (IsNoopInstruction(&*I))
602 // Check for being preceded by an objc_autoreleaseReturnValue on the same
603 // pointer. In this case, we can delete the pair.
604 BasicBlock::iterator I = RetainRV->getIterator(),
605 Begin = RetainRV->getParent()->begin();
609 while (I != Begin && IsNoopInstruction(&*I));
610 if (GetBasicARCInstKind(&*I) == ARCInstKind::AutoreleaseRV &&
611 GetArgRCIdentityRoot(&*I) == Arg) {
615 DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n"
616 << "Erasing " << *RetainRV << "\n");
618 EraseInstruction(&*I);
619 EraseInstruction(RetainRV);
624 // Turn it to a plain objc_retain.
628 DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
629 "objc_retain since the operand is not a return value.\n"
630 "Old = " << *RetainRV << "\n");
632 Constant *NewDecl = EP.get(ARCRuntimeEntryPointKind::Retain);
633 cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
635 DEBUG(dbgs() << "New = " << *RetainRV << "\n");
640 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
641 /// used as a return value.
642 void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
643 Instruction *AutoreleaseRV,
644 ARCInstKind &Class) {
645 // Check for a return of the pointer value.
646 const Value *Ptr = GetArgRCIdentityRoot(AutoreleaseRV);
648 // If the argument is ConstantPointerNull or UndefValue, its other users
649 // aren't actually interesting to look at.
650 if (isa<ConstantData>(Ptr))
653 SmallVector<const Value *, 2> Users;
654 Users.push_back(Ptr);
656 Ptr = Users.pop_back_val();
657 for (const User *U : Ptr->users()) {
658 if (isa<ReturnInst>(U) || GetBasicARCInstKind(U) == ARCInstKind::RetainRV)
660 if (isa<BitCastInst>(U))
663 } while (!Users.empty());
668 DEBUG(dbgs() << "Transforming objc_autoreleaseReturnValue => "
669 "objc_autorelease since its operand is not used as a return "
671 "Old = " << *AutoreleaseRV << "\n");
673 CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
674 Constant *NewDecl = EP.get(ARCRuntimeEntryPointKind::Autorelease);
675 AutoreleaseRVCI->setCalledFunction(NewDecl);
676 AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
677 Class = ARCInstKind::Autorelease;
679 DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
682 /// Visit each call, one at a time, and make simplifications without doing any
683 /// additional analysis.
684 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
685 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
686 // Reset all the flags in preparation for recomputing them.
687 UsedInThisFunction = 0;
689 // Visit all objc_* calls in F.
690 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
691 Instruction *Inst = &*I++;
693 ARCInstKind Class = GetBasicARCInstKind(Inst);
695 DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
700 // Delete no-op casts. These function calls have special semantics, but
701 // the semantics are entirely implemented via lowering in the front-end,
702 // so by the time they reach the optimizer, they are just no-op calls
703 // which return their argument.
705 // There are gray areas here, as the ability to cast reference-counted
706 // pointers to raw void* and back allows code to break ARC assumptions,
707 // however these are currently considered to be unimportant.
708 case ARCInstKind::NoopCast:
711 DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
712 EraseInstruction(Inst);
715 // If the pointer-to-weak-pointer is null, it's undefined behavior.
716 case ARCInstKind::StoreWeak:
717 case ARCInstKind::LoadWeak:
718 case ARCInstKind::LoadWeakRetained:
719 case ARCInstKind::InitWeak:
720 case ARCInstKind::DestroyWeak: {
721 CallInst *CI = cast<CallInst>(Inst);
722 if (IsNullOrUndef(CI->getArgOperand(0))) {
724 Type *Ty = CI->getArgOperand(0)->getType();
725 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
726 Constant::getNullValue(Ty),
728 Value *NewValue = UndefValue::get(CI->getType());
729 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
730 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
731 CI->replaceAllUsesWith(NewValue);
732 CI->eraseFromParent();
737 case ARCInstKind::CopyWeak:
738 case ARCInstKind::MoveWeak: {
739 CallInst *CI = cast<CallInst>(Inst);
740 if (IsNullOrUndef(CI->getArgOperand(0)) ||
741 IsNullOrUndef(CI->getArgOperand(1))) {
743 Type *Ty = CI->getArgOperand(0)->getType();
744 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
745 Constant::getNullValue(Ty),
748 Value *NewValue = UndefValue::get(CI->getType());
749 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
750 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
752 CI->replaceAllUsesWith(NewValue);
753 CI->eraseFromParent();
758 case ARCInstKind::RetainRV:
759 if (OptimizeRetainRVCall(F, Inst))
762 case ARCInstKind::AutoreleaseRV:
763 OptimizeAutoreleaseRVCall(F, Inst, Class);
767 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
768 if (IsAutorelease(Class) && Inst->use_empty()) {
769 CallInst *Call = cast<CallInst>(Inst);
770 const Value *Arg = Call->getArgOperand(0);
771 Arg = FindSingleUseIdentifiedObject(Arg);
776 // Create the declaration lazily.
777 LLVMContext &C = Inst->getContext();
779 Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
780 CallInst *NewCall = CallInst::Create(Decl, Call->getArgOperand(0), "",
782 NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease),
783 MDNode::get(C, None));
785 DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
786 "since x is otherwise unused.\nOld: " << *Call << "\nNew: "
787 << *NewCall << "\n");
789 EraseInstruction(Call);
791 Class = ARCInstKind::Release;
795 // For functions which can never be passed stack arguments, add
797 if (IsAlwaysTail(Class)) {
799 DEBUG(dbgs() << "Adding tail keyword to function since it can never be "
800 "passed stack args: " << *Inst << "\n");
801 cast<CallInst>(Inst)->setTailCall();
804 // Ensure that functions that can never have a "tail" keyword due to the
805 // semantics of ARC truly do not do so.
806 if (IsNeverTail(Class)) {
808 DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst <<
810 cast<CallInst>(Inst)->setTailCall(false);
813 // Set nounwind as needed.
814 if (IsNoThrow(Class)) {
816 DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
818 cast<CallInst>(Inst)->setDoesNotThrow();
821 if (!IsNoopOnNull(Class)) {
822 UsedInThisFunction |= 1 << unsigned(Class);
826 const Value *Arg = GetArgRCIdentityRoot(Inst);
828 // ARC calls with null are no-ops. Delete them.
829 if (IsNullOrUndef(Arg)) {
832 DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
834 EraseInstruction(Inst);
838 // Keep track of which of retain, release, autorelease, and retain_block
839 // are actually present in this function.
840 UsedInThisFunction |= 1 << unsigned(Class);
842 // If Arg is a PHI, and one or more incoming values to the
843 // PHI are null, and the call is control-equivalent to the PHI, and there
844 // are no relevant side effects between the PHI and the call, and the call
845 // is not a release that doesn't have the clang.imprecise_release tag, the
846 // call could be pushed up to just those paths with non-null incoming
847 // values. For now, don't bother splitting critical edges for this.
848 if (Class == ARCInstKind::Release &&
849 !Inst->getMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease)))
852 SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
853 Worklist.push_back(std::make_pair(Inst, Arg));
855 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
859 const PHINode *PN = dyn_cast<PHINode>(Arg);
862 // Determine if the PHI has any null operands, or any incoming
864 bool HasNull = false;
865 bool HasCriticalEdges = false;
866 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
868 GetRCIdentityRoot(PN->getIncomingValue(i));
869 if (IsNullOrUndef(Incoming))
871 else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back())
872 .getNumSuccessors() != 1) {
873 HasCriticalEdges = true;
877 // If we have null operands and no critical edges, optimize.
878 if (!HasCriticalEdges && HasNull) {
879 SmallPtrSet<Instruction *, 4> DependingInstructions;
880 SmallPtrSet<const BasicBlock *, 4> Visited;
882 // Check that there is nothing that cares about the reference
883 // count between the call and the phi.
885 case ARCInstKind::Retain:
886 case ARCInstKind::RetainBlock:
887 // These can always be moved up.
889 case ARCInstKind::Release:
890 // These can't be moved across things that care about the retain
892 FindDependencies(NeedsPositiveRetainCount, Arg,
893 Inst->getParent(), Inst,
894 DependingInstructions, Visited, PA);
896 case ARCInstKind::Autorelease:
897 // These can't be moved across autorelease pool scope boundaries.
898 FindDependencies(AutoreleasePoolBoundary, Arg,
899 Inst->getParent(), Inst,
900 DependingInstructions, Visited, PA);
902 case ARCInstKind::ClaimRV:
903 case ARCInstKind::RetainRV:
904 case ARCInstKind::AutoreleaseRV:
905 // Don't move these; the RV optimization depends on the autoreleaseRV
906 // being tail called, and the retainRV being immediately after a call
907 // (which might still happen if we get lucky with codegen layout, but
908 // it's not worth taking the chance).
911 llvm_unreachable("Invalid dependence flavor");
914 if (DependingInstructions.size() == 1 &&
915 *DependingInstructions.begin() == PN) {
918 // Clone the call into each predecessor that has a non-null value.
919 CallInst *CInst = cast<CallInst>(Inst);
920 Type *ParamTy = CInst->getArgOperand(0)->getType();
921 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
923 GetRCIdentityRoot(PN->getIncomingValue(i));
924 if (!IsNullOrUndef(Incoming)) {
925 CallInst *Clone = cast<CallInst>(CInst->clone());
926 Value *Op = PN->getIncomingValue(i);
927 Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
928 if (Op->getType() != ParamTy)
929 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
930 Clone->setArgOperand(0, Op);
931 Clone->insertBefore(InsertPos);
933 DEBUG(dbgs() << "Cloning "
935 "And inserting clone at " << *InsertPos << "\n");
936 Worklist.push_back(std::make_pair(Clone, Incoming));
939 // Erase the original call.
940 DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
941 EraseInstruction(CInst);
945 } while (!Worklist.empty());
949 /// If we have a top down pointer in the S_Use state, make sure that there are
950 /// no CFG hazards by checking the states of various bottom up pointers.
951 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
952 const bool SuccSRRIKnownSafe,
954 bool &SomeSuccHasSame,
955 bool &AllSuccsHaveSame,
956 bool &NotAllSeqEqualButKnownSafe,
957 bool &ShouldContinue) {
960 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
961 S.ClearSequenceProgress();
964 S.SetCFGHazardAfflicted(true);
965 ShouldContinue = true;
969 SomeSuccHasSame = true;
973 case S_MovableRelease:
974 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
975 AllSuccsHaveSame = false;
977 NotAllSeqEqualButKnownSafe = true;
980 llvm_unreachable("bottom-up pointer in retain state!");
982 llvm_unreachable("This should have been handled earlier.");
986 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
987 /// there are no CFG hazards by checking the states of various bottom up
989 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
990 const bool SuccSRRIKnownSafe,
992 bool &SomeSuccHasSame,
993 bool &AllSuccsHaveSame,
994 bool &NotAllSeqEqualButKnownSafe) {
997 SomeSuccHasSame = true;
1001 case S_MovableRelease:
1003 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1004 AllSuccsHaveSame = false;
1006 NotAllSeqEqualButKnownSafe = true;
1009 llvm_unreachable("bottom-up pointer in retain state!");
1011 llvm_unreachable("This should have been handled earlier.");
1015 /// Check for critical edges, loop boundaries, irreducible control flow, or
1016 /// other CFG structures where moving code across the edge would result in it
1017 /// being executed more.
1019 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1020 DenseMap<const BasicBlock *, BBState> &BBStates,
1021 BBState &MyStates) const {
1022 // If any top-down local-use or possible-dec has a succ which is earlier in
1023 // the sequence, forget it.
1024 for (auto I = MyStates.top_down_ptr_begin(), E = MyStates.top_down_ptr_end();
1026 TopDownPtrState &S = I->second;
1027 const Sequence Seq = I->second.GetSeq();
1029 // We only care about S_Retain, S_CanRelease, and S_Use.
1033 // Make sure that if extra top down states are added in the future that this
1034 // code is updated to handle it.
1035 assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1036 "Unknown top down sequence state.");
1038 const Value *Arg = I->first;
1039 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
1040 bool SomeSuccHasSame = false;
1041 bool AllSuccsHaveSame = true;
1042 bool NotAllSeqEqualButKnownSafe = false;
1044 succ_const_iterator SI(TI), SE(TI, false);
1046 for (; SI != SE; ++SI) {
1047 // If VisitBottomUp has pointer information for this successor, take
1048 // what we know about it.
1049 const DenseMap<const BasicBlock *, BBState>::iterator BBI =
1051 assert(BBI != BBStates.end());
1052 const BottomUpPtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1053 const Sequence SuccSSeq = SuccS.GetSeq();
1055 // If bottom up, the pointer is in an S_None state, clear the sequence
1056 // progress since the sequence in the bottom up state finished
1057 // suggesting a mismatch in between retains/releases. This is true for
1058 // all three cases that we are handling here: S_Retain, S_Use, and
1060 if (SuccSSeq == S_None) {
1061 S.ClearSequenceProgress();
1065 // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1067 const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
1069 // *NOTE* We do not use Seq from above here since we are allowing for
1070 // S.GetSeq() to change while we are visiting basic blocks.
1071 switch(S.GetSeq()) {
1073 bool ShouldContinue = false;
1074 CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
1075 AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
1082 CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1083 SomeSuccHasSame, AllSuccsHaveSame,
1084 NotAllSeqEqualButKnownSafe);
1090 case S_MovableRelease:
1095 // If the state at the other end of any of the successor edges
1096 // matches the current state, require all edges to match. This
1097 // guards against loops in the middle of a sequence.
1098 if (SomeSuccHasSame && !AllSuccsHaveSame) {
1099 S.ClearSequenceProgress();
1100 } else if (NotAllSeqEqualButKnownSafe) {
1101 // If we would have cleared the state foregoing the fact that we are known
1102 // safe, stop code motion. This is because whether or not it is safe to
1103 // remove RR pairs via KnownSafe is an orthogonal concept to whether we
1104 // are allowed to perform code motion.
1105 S.SetCFGHazardAfflicted(true);
1110 bool ObjCARCOpt::VisitInstructionBottomUp(
1111 Instruction *Inst, BasicBlock *BB, BlotMapVector<Value *, RRInfo> &Retains,
1112 BBState &MyStates) {
1113 bool NestingDetected = false;
1114 ARCInstKind Class = GetARCInstKind(Inst);
1115 const Value *Arg = nullptr;
1117 DEBUG(dbgs() << " Class: " << Class << "\n");
1120 case ARCInstKind::Release: {
1121 Arg = GetArgRCIdentityRoot(Inst);
1123 BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1124 NestingDetected |= S.InitBottomUp(MDKindCache, Inst);
1127 case ARCInstKind::RetainBlock:
1128 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1129 // objc_retainBlocks to objc_retains. Thus at this point any
1130 // objc_retainBlocks that we see are not optimizable.
1132 case ARCInstKind::Retain:
1133 case ARCInstKind::RetainRV: {
1134 Arg = GetArgRCIdentityRoot(Inst);
1135 BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1136 if (S.MatchWithRetain()) {
1137 // Don't do retain+release tracking for ARCInstKind::RetainRV, because
1138 // it's better to let it remain as the first instruction after a call.
1139 if (Class != ARCInstKind::RetainRV) {
1140 DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
1141 Retains[Inst] = S.GetRRInfo();
1143 S.ClearSequenceProgress();
1145 // A retain moving bottom up can be a use.
1148 case ARCInstKind::AutoreleasepoolPop:
1149 // Conservatively, clear MyStates for all known pointers.
1150 MyStates.clearBottomUpPointers();
1151 return NestingDetected;
1152 case ARCInstKind::AutoreleasepoolPush:
1153 case ARCInstKind::None:
1154 // These are irrelevant.
1155 return NestingDetected;
1160 // Consider any other possible effects of this instruction on each
1161 // pointer being tracked.
1162 for (auto MI = MyStates.bottom_up_ptr_begin(),
1163 ME = MyStates.bottom_up_ptr_end();
1165 const Value *Ptr = MI->first;
1167 continue; // Handled above.
1168 BottomUpPtrState &S = MI->second;
1170 if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
1173 S.HandlePotentialUse(BB, Inst, Ptr, PA, Class);
1176 return NestingDetected;
1179 bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1180 DenseMap<const BasicBlock *, BBState> &BBStates,
1181 BlotMapVector<Value *, RRInfo> &Retains) {
1182 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
1184 bool NestingDetected = false;
1185 BBState &MyStates = BBStates[BB];
1187 // Merge the states from each successor to compute the initial state
1188 // for the current block.
1189 BBState::edge_iterator SI(MyStates.succ_begin()),
1190 SE(MyStates.succ_end());
1192 const BasicBlock *Succ = *SI;
1193 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
1194 assert(I != BBStates.end());
1195 MyStates.InitFromSucc(I->second);
1197 for (; SI != SE; ++SI) {
1199 I = BBStates.find(Succ);
1200 assert(I != BBStates.end());
1201 MyStates.MergeSucc(I->second);
1205 DEBUG(dbgs() << "Before:\n" << BBStates[BB] << "\n"
1206 << "Performing Dataflow:\n");
1208 // Visit all the instructions, bottom-up.
1209 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
1210 Instruction *Inst = &*std::prev(I);
1212 // Invoke instructions are visited as part of their successors (below).
1213 if (isa<InvokeInst>(Inst))
1216 DEBUG(dbgs() << " Visiting " << *Inst << "\n");
1218 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
1221 // If there's a predecessor with an invoke, visit the invoke as if it were
1222 // part of this block, since we can't insert code after an invoke in its own
1223 // block, and we don't want to split critical edges.
1224 for (BBState::edge_iterator PI(MyStates.pred_begin()),
1225 PE(MyStates.pred_end()); PI != PE; ++PI) {
1226 BasicBlock *Pred = *PI;
1227 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
1228 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
1231 DEBUG(dbgs() << "\nFinal State:\n" << BBStates[BB] << "\n");
1233 return NestingDetected;
1237 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
1238 DenseMap<Value *, RRInfo> &Releases,
1239 BBState &MyStates) {
1240 bool NestingDetected = false;
1241 ARCInstKind Class = GetARCInstKind(Inst);
1242 const Value *Arg = nullptr;
1244 DEBUG(dbgs() << " Class: " << Class << "\n");
1247 case ARCInstKind::RetainBlock:
1248 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1249 // objc_retainBlocks to objc_retains. Thus at this point any
1250 // objc_retainBlocks that we see are not optimizable. We need to break since
1251 // a retain can be a potential use.
1253 case ARCInstKind::Retain:
1254 case ARCInstKind::RetainRV: {
1255 Arg = GetArgRCIdentityRoot(Inst);
1256 TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1257 NestingDetected |= S.InitTopDown(Class, Inst);
1258 // A retain can be a potential use; proceed to the generic checking
1262 case ARCInstKind::Release: {
1263 Arg = GetArgRCIdentityRoot(Inst);
1264 TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1265 // Try to form a tentative pair in between this release instruction and the
1266 // top down pointers that we are tracking.
1267 if (S.MatchWithRelease(MDKindCache, Inst)) {
1268 // If we succeed, copy S's RRInfo into the Release -> {Retain Set
1269 // Map}. Then we clear S.
1270 DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
1271 Releases[Inst] = S.GetRRInfo();
1272 S.ClearSequenceProgress();
1276 case ARCInstKind::AutoreleasepoolPop:
1277 // Conservatively, clear MyStates for all known pointers.
1278 MyStates.clearTopDownPointers();
1280 case ARCInstKind::AutoreleasepoolPush:
1281 case ARCInstKind::None:
1282 // These can not be uses of
1288 // Consider any other possible effects of this instruction on each
1289 // pointer being tracked.
1290 for (auto MI = MyStates.top_down_ptr_begin(),
1291 ME = MyStates.top_down_ptr_end();
1293 const Value *Ptr = MI->first;
1295 continue; // Handled above.
1296 TopDownPtrState &S = MI->second;
1297 if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
1300 S.HandlePotentialUse(Inst, Ptr, PA, Class);
1303 return NestingDetected;
1307 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
1308 DenseMap<const BasicBlock *, BBState> &BBStates,
1309 DenseMap<Value *, RRInfo> &Releases) {
1310 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
1311 bool NestingDetected = false;
1312 BBState &MyStates = BBStates[BB];
1314 // Merge the states from each predecessor to compute the initial state
1315 // for the current block.
1316 BBState::edge_iterator PI(MyStates.pred_begin()),
1317 PE(MyStates.pred_end());
1319 const BasicBlock *Pred = *PI;
1320 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
1321 assert(I != BBStates.end());
1322 MyStates.InitFromPred(I->second);
1324 for (; PI != PE; ++PI) {
1326 I = BBStates.find(Pred);
1327 assert(I != BBStates.end());
1328 MyStates.MergePred(I->second);
1332 DEBUG(dbgs() << "Before:\n" << BBStates[BB] << "\n"
1333 << "Performing Dataflow:\n");
1335 // Visit all the instructions, top-down.
1336 for (Instruction &Inst : *BB) {
1337 DEBUG(dbgs() << " Visiting " << Inst << "\n");
1339 NestingDetected |= VisitInstructionTopDown(&Inst, Releases, MyStates);
1342 DEBUG(dbgs() << "\nState Before Checking for CFG Hazards:\n"
1343 << BBStates[BB] << "\n\n");
1344 CheckForCFGHazards(BB, BBStates, MyStates);
1345 DEBUG(dbgs() << "Final State:\n" << BBStates[BB] << "\n");
1346 return NestingDetected;
1350 ComputePostOrders(Function &F,
1351 SmallVectorImpl<BasicBlock *> &PostOrder,
1352 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
1353 unsigned NoObjCARCExceptionsMDKind,
1354 DenseMap<const BasicBlock *, BBState> &BBStates) {
1355 /// The visited set, for doing DFS walks.
1356 SmallPtrSet<BasicBlock *, 16> Visited;
1358 // Do DFS, computing the PostOrder.
1359 SmallPtrSet<BasicBlock *, 16> OnStack;
1360 SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
1362 // Functions always have exactly one entry block, and we don't have
1363 // any other block that we treat like an entry block.
1364 BasicBlock *EntryBB = &F.getEntryBlock();
1365 BBState &MyStates = BBStates[EntryBB];
1366 MyStates.SetAsEntry();
1367 TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
1368 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
1369 Visited.insert(EntryBB);
1370 OnStack.insert(EntryBB);
1373 BasicBlock *CurrBB = SuccStack.back().first;
1374 TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
1375 succ_iterator SE(TI, false);
1377 while (SuccStack.back().second != SE) {
1378 BasicBlock *SuccBB = *SuccStack.back().second++;
1379 if (Visited.insert(SuccBB).second) {
1380 TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
1381 SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
1382 BBStates[CurrBB].addSucc(SuccBB);
1383 BBState &SuccStates = BBStates[SuccBB];
1384 SuccStates.addPred(CurrBB);
1385 OnStack.insert(SuccBB);
1389 if (!OnStack.count(SuccBB)) {
1390 BBStates[CurrBB].addSucc(SuccBB);
1391 BBStates[SuccBB].addPred(CurrBB);
1394 OnStack.erase(CurrBB);
1395 PostOrder.push_back(CurrBB);
1396 SuccStack.pop_back();
1397 } while (!SuccStack.empty());
1401 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
1402 // Functions may have many exits, and there also blocks which we treat
1403 // as exits due to ignored edges.
1404 SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
1405 for (BasicBlock &ExitBB : F) {
1406 BBState &MyStates = BBStates[&ExitBB];
1407 if (!MyStates.isExit())
1410 MyStates.SetAsExit();
1412 PredStack.push_back(std::make_pair(&ExitBB, MyStates.pred_begin()));
1413 Visited.insert(&ExitBB);
1414 while (!PredStack.empty()) {
1415 reverse_dfs_next_succ:
1416 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
1417 while (PredStack.back().second != PE) {
1418 BasicBlock *BB = *PredStack.back().second++;
1419 if (Visited.insert(BB).second) {
1420 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
1421 goto reverse_dfs_next_succ;
1424 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
1429 // Visit the function both top-down and bottom-up.
1430 bool ObjCARCOpt::Visit(Function &F,
1431 DenseMap<const BasicBlock *, BBState> &BBStates,
1432 BlotMapVector<Value *, RRInfo> &Retains,
1433 DenseMap<Value *, RRInfo> &Releases) {
1434 // Use reverse-postorder traversals, because we magically know that loops
1435 // will be well behaved, i.e. they won't repeatedly call retain on a single
1436 // pointer without doing a release. We can't use the ReversePostOrderTraversal
1437 // class here because we want the reverse-CFG postorder to consider each
1438 // function exit point, and we want to ignore selected cycle edges.
1439 SmallVector<BasicBlock *, 16> PostOrder;
1440 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
1441 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
1442 MDKindCache.get(ARCMDKindID::NoObjCARCExceptions),
1445 // Use reverse-postorder on the reverse CFG for bottom-up.
1446 bool BottomUpNestingDetected = false;
1447 for (BasicBlock *BB : llvm::reverse(ReverseCFGPostOrder))
1448 BottomUpNestingDetected |= VisitBottomUp(BB, BBStates, Retains);
1450 // Use reverse-postorder for top-down.
1451 bool TopDownNestingDetected = false;
1452 for (BasicBlock *BB : llvm::reverse(PostOrder))
1453 TopDownNestingDetected |= VisitTopDown(BB, BBStates, Releases);
1455 return TopDownNestingDetected && BottomUpNestingDetected;
1458 /// Move the calls in RetainsToMove and ReleasesToMove.
1459 void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
1460 RRInfo &ReleasesToMove,
1461 BlotMapVector<Value *, RRInfo> &Retains,
1462 DenseMap<Value *, RRInfo> &Releases,
1463 SmallVectorImpl<Instruction *> &DeadInsts,
1465 Type *ArgTy = Arg->getType();
1466 Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
1468 DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
1470 // Insert the new retain and release calls.
1471 for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
1472 Value *MyArg = ArgTy == ParamTy ? Arg :
1473 new BitCastInst(Arg, ParamTy, "", InsertPt);
1474 Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
1475 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1476 Call->setDoesNotThrow();
1477 Call->setTailCall();
1479 DEBUG(dbgs() << "Inserting new Retain: " << *Call << "\n"
1480 "At insertion point: " << *InsertPt << "\n");
1482 for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
1483 Value *MyArg = ArgTy == ParamTy ? Arg :
1484 new BitCastInst(Arg, ParamTy, "", InsertPt);
1485 Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
1486 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1487 // Attach a clang.imprecise_release metadata tag, if appropriate.
1488 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
1489 Call->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease), M);
1490 Call->setDoesNotThrow();
1491 if (ReleasesToMove.IsTailCallRelease)
1492 Call->setTailCall();
1494 DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n"
1495 "At insertion point: " << *InsertPt << "\n");
1498 // Delete the original retain and release calls.
1499 for (Instruction *OrigRetain : RetainsToMove.Calls) {
1500 Retains.blot(OrigRetain);
1501 DeadInsts.push_back(OrigRetain);
1502 DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
1504 for (Instruction *OrigRelease : ReleasesToMove.Calls) {
1505 Releases.erase(OrigRelease);
1506 DeadInsts.push_back(OrigRelease);
1507 DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
1511 bool ObjCARCOpt::PairUpRetainsAndReleases(
1512 DenseMap<const BasicBlock *, BBState> &BBStates,
1513 BlotMapVector<Value *, RRInfo> &Retains,
1514 DenseMap<Value *, RRInfo> &Releases, Module *M,
1515 Instruction *Retain,
1516 SmallVectorImpl<Instruction *> &DeadInsts, RRInfo &RetainsToMove,
1517 RRInfo &ReleasesToMove, Value *Arg, bool KnownSafe,
1518 bool &AnyPairsCompletelyEliminated) {
1519 // If a pair happens in a region where it is known that the reference count
1520 // is already incremented, we can similarly ignore possible decrements unless
1521 // we are dealing with a retainable object with multiple provenance sources.
1522 bool KnownSafeTD = true, KnownSafeBU = true;
1523 bool CFGHazardAfflicted = false;
1525 // Connect the dots between the top-down-collected RetainsToMove and
1526 // bottom-up-collected ReleasesToMove to form sets of related calls.
1527 // This is an iterative process so that we connect multiple releases
1528 // to multiple retains if needed.
1529 unsigned OldDelta = 0;
1530 unsigned NewDelta = 0;
1531 unsigned OldCount = 0;
1532 unsigned NewCount = 0;
1533 bool FirstRelease = true;
1534 for (SmallVector<Instruction *, 4> NewRetains{Retain};;) {
1535 SmallVector<Instruction *, 4> NewReleases;
1536 for (Instruction *NewRetain : NewRetains) {
1537 auto It = Retains.find(NewRetain);
1538 assert(It != Retains.end());
1539 const RRInfo &NewRetainRRI = It->second;
1540 KnownSafeTD &= NewRetainRRI.KnownSafe;
1541 for (Instruction *NewRetainRelease : NewRetainRRI.Calls) {
1542 auto Jt = Releases.find(NewRetainRelease);
1543 if (Jt == Releases.end())
1545 const RRInfo &NewRetainReleaseRRI = Jt->second;
1547 // If the release does not have a reference to the retain as well,
1548 // something happened which is unaccounted for. Do not do anything.
1550 // This can happen if we catch an additive overflow during path count
1552 if (!NewRetainReleaseRRI.Calls.count(NewRetain))
1555 if (ReleasesToMove.Calls.insert(NewRetainRelease).second) {
1556 // If we overflow when we compute the path count, don't remove/move
1558 const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
1559 unsigned PathCount = BBState::OverflowOccurredValue;
1560 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1562 assert(PathCount != BBState::OverflowOccurredValue &&
1563 "PathCount at this point can not be "
1564 "OverflowOccurredValue.");
1565 OldDelta -= PathCount;
1567 // Merge the ReleaseMetadata and IsTailCallRelease values.
1569 ReleasesToMove.ReleaseMetadata =
1570 NewRetainReleaseRRI.ReleaseMetadata;
1571 ReleasesToMove.IsTailCallRelease =
1572 NewRetainReleaseRRI.IsTailCallRelease;
1573 FirstRelease = false;
1575 if (ReleasesToMove.ReleaseMetadata !=
1576 NewRetainReleaseRRI.ReleaseMetadata)
1577 ReleasesToMove.ReleaseMetadata = nullptr;
1578 if (ReleasesToMove.IsTailCallRelease !=
1579 NewRetainReleaseRRI.IsTailCallRelease)
1580 ReleasesToMove.IsTailCallRelease = false;
1583 // Collect the optimal insertion points.
1585 for (Instruction *RIP : NewRetainReleaseRRI.ReverseInsertPts) {
1586 if (ReleasesToMove.ReverseInsertPts.insert(RIP).second) {
1587 // If we overflow when we compute the path count, don't
1588 // remove/move anything.
1589 const BBState &RIPBBState = BBStates[RIP->getParent()];
1590 PathCount = BBState::OverflowOccurredValue;
1591 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1593 assert(PathCount != BBState::OverflowOccurredValue &&
1594 "PathCount at this point can not be "
1595 "OverflowOccurredValue.");
1596 NewDelta -= PathCount;
1599 NewReleases.push_back(NewRetainRelease);
1604 if (NewReleases.empty()) break;
1606 // Back the other way.
1607 for (Instruction *NewRelease : NewReleases) {
1608 auto It = Releases.find(NewRelease);
1609 assert(It != Releases.end());
1610 const RRInfo &NewReleaseRRI = It->second;
1611 KnownSafeBU &= NewReleaseRRI.KnownSafe;
1612 CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
1613 for (Instruction *NewReleaseRetain : NewReleaseRRI.Calls) {
1614 auto Jt = Retains.find(NewReleaseRetain);
1615 if (Jt == Retains.end())
1617 const RRInfo &NewReleaseRetainRRI = Jt->second;
1619 // If the retain does not have a reference to the release as well,
1620 // something happened which is unaccounted for. Do not do anything.
1622 // This can happen if we catch an additive overflow during path count
1624 if (!NewReleaseRetainRRI.Calls.count(NewRelease))
1627 if (RetainsToMove.Calls.insert(NewReleaseRetain).second) {
1628 // If we overflow when we compute the path count, don't remove/move
1630 const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
1631 unsigned PathCount = BBState::OverflowOccurredValue;
1632 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1634 assert(PathCount != BBState::OverflowOccurredValue &&
1635 "PathCount at this point can not be "
1636 "OverflowOccurredValue.");
1637 OldDelta += PathCount;
1638 OldCount += PathCount;
1640 // Collect the optimal insertion points.
1642 for (Instruction *RIP : NewReleaseRetainRRI.ReverseInsertPts) {
1643 if (RetainsToMove.ReverseInsertPts.insert(RIP).second) {
1644 // If we overflow when we compute the path count, don't
1645 // remove/move anything.
1646 const BBState &RIPBBState = BBStates[RIP->getParent()];
1648 PathCount = BBState::OverflowOccurredValue;
1649 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1651 assert(PathCount != BBState::OverflowOccurredValue &&
1652 "PathCount at this point can not be "
1653 "OverflowOccurredValue.");
1654 NewDelta += PathCount;
1655 NewCount += PathCount;
1658 NewRetains.push_back(NewReleaseRetain);
1662 if (NewRetains.empty()) break;
1665 // We can only remove pointers if we are known safe in both directions.
1666 bool UnconditionallySafe = KnownSafeTD && KnownSafeBU;
1667 if (UnconditionallySafe) {
1668 RetainsToMove.ReverseInsertPts.clear();
1669 ReleasesToMove.ReverseInsertPts.clear();
1672 // Determine whether the new insertion points we computed preserve the
1673 // balance of retain and release calls through the program.
1674 // TODO: If the fully aggressive solution isn't valid, try to find a
1675 // less aggressive solution which is.
1679 // At this point, we are not going to remove any RR pairs, but we still are
1680 // able to move RR pairs. If one of our pointers is afflicted with
1681 // CFGHazards, we cannot perform such code motion so exit early.
1682 const bool WillPerformCodeMotion =
1683 !RetainsToMove.ReverseInsertPts.empty() ||
1684 !ReleasesToMove.ReverseInsertPts.empty();
1685 if (CFGHazardAfflicted && WillPerformCodeMotion)
1689 // Determine whether the original call points are balanced in the retain and
1690 // release calls through the program. If not, conservatively don't touch
1692 // TODO: It's theoretically possible to do code motion in this case, as
1693 // long as the existing imbalances are maintained.
1698 assert(OldCount != 0 && "Unreachable code?");
1699 NumRRs += OldCount - NewCount;
1700 // Set to true if we completely removed any RR pairs.
1701 AnyPairsCompletelyEliminated = NewCount == 0;
1703 // We can move calls!
1707 /// Identify pairings between the retains and releases, and delete and/or move
1709 bool ObjCARCOpt::PerformCodePlacement(
1710 DenseMap<const BasicBlock *, BBState> &BBStates,
1711 BlotMapVector<Value *, RRInfo> &Retains,
1712 DenseMap<Value *, RRInfo> &Releases, Module *M) {
1713 DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
1715 bool AnyPairsCompletelyEliminated = false;
1716 SmallVector<Instruction *, 8> DeadInsts;
1718 // Visit each retain.
1719 for (BlotMapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
1722 Value *V = I->first;
1723 if (!V) continue; // blotted
1725 Instruction *Retain = cast<Instruction>(V);
1727 DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
1729 Value *Arg = GetArgRCIdentityRoot(Retain);
1731 // If the object being released is in static or stack storage, we know it's
1732 // not being managed by ObjC reference counting, so we can delete pairs
1733 // regardless of what possible decrements or uses lie between them.
1734 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
1736 // A constant pointer can't be pointing to an object on the heap. It may
1737 // be reference-counted, but it won't be deleted.
1738 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
1739 if (const GlobalVariable *GV =
1740 dyn_cast<GlobalVariable>(
1741 GetRCIdentityRoot(LI->getPointerOperand())))
1742 if (GV->isConstant())
1745 // Connect the dots between the top-down-collected RetainsToMove and
1746 // bottom-up-collected ReleasesToMove to form sets of related calls.
1747 RRInfo RetainsToMove, ReleasesToMove;
1749 bool PerformMoveCalls = PairUpRetainsAndReleases(
1750 BBStates, Retains, Releases, M, Retain, DeadInsts,
1751 RetainsToMove, ReleasesToMove, Arg, KnownSafe,
1752 AnyPairsCompletelyEliminated);
1754 if (PerformMoveCalls) {
1755 // Ok, everything checks out and we're all set. Let's move/delete some
1757 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
1758 Retains, Releases, DeadInsts, M);
1762 // Now that we're done moving everything, we can delete the newly dead
1763 // instructions, as we no longer need them as insert points.
1764 while (!DeadInsts.empty())
1765 EraseInstruction(DeadInsts.pop_back_val());
1767 return AnyPairsCompletelyEliminated;
1770 /// Weak pointer optimizations.
1771 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
1772 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
1774 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
1775 // itself because it uses AliasAnalysis and we need to do provenance
1777 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1778 Instruction *Inst = &*I++;
1780 DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
1782 ARCInstKind Class = GetBasicARCInstKind(Inst);
1783 if (Class != ARCInstKind::LoadWeak &&
1784 Class != ARCInstKind::LoadWeakRetained)
1787 // Delete objc_loadWeak calls with no users.
1788 if (Class == ARCInstKind::LoadWeak && Inst->use_empty()) {
1789 Inst->eraseFromParent();
1793 // TODO: For now, just look for an earlier available version of this value
1794 // within the same block. Theoretically, we could do memdep-style non-local
1795 // analysis too, but that would want caching. A better approach would be to
1796 // use the technique that EarlyCSE uses.
1797 inst_iterator Current = std::prev(I);
1798 BasicBlock *CurrentBB = &*Current.getBasicBlockIterator();
1799 for (BasicBlock::iterator B = CurrentBB->begin(),
1800 J = Current.getInstructionIterator();
1802 Instruction *EarlierInst = &*std::prev(J);
1803 ARCInstKind EarlierClass = GetARCInstKind(EarlierInst);
1804 switch (EarlierClass) {
1805 case ARCInstKind::LoadWeak:
1806 case ARCInstKind::LoadWeakRetained: {
1807 // If this is loading from the same pointer, replace this load's value
1809 CallInst *Call = cast<CallInst>(Inst);
1810 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
1811 Value *Arg = Call->getArgOperand(0);
1812 Value *EarlierArg = EarlierCall->getArgOperand(0);
1813 switch (PA.getAA()->alias(Arg, EarlierArg)) {
1816 // If the load has a builtin retain, insert a plain retain for it.
1817 if (Class == ARCInstKind::LoadWeakRetained) {
1818 Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
1819 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
1822 // Zap the fully redundant load.
1823 Call->replaceAllUsesWith(EarlierCall);
1824 Call->eraseFromParent();
1834 case ARCInstKind::StoreWeak:
1835 case ARCInstKind::InitWeak: {
1836 // If this is storing to the same pointer and has the same size etc.
1837 // replace this load's value with the stored value.
1838 CallInst *Call = cast<CallInst>(Inst);
1839 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
1840 Value *Arg = Call->getArgOperand(0);
1841 Value *EarlierArg = EarlierCall->getArgOperand(0);
1842 switch (PA.getAA()->alias(Arg, EarlierArg)) {
1845 // If the load has a builtin retain, insert a plain retain for it.
1846 if (Class == ARCInstKind::LoadWeakRetained) {
1847 Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
1848 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
1851 // Zap the fully redundant load.
1852 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
1853 Call->eraseFromParent();
1863 case ARCInstKind::MoveWeak:
1864 case ARCInstKind::CopyWeak:
1865 // TOOD: Grab the copied value.
1867 case ARCInstKind::AutoreleasepoolPush:
1868 case ARCInstKind::None:
1869 case ARCInstKind::IntrinsicUser:
1870 case ARCInstKind::User:
1871 // Weak pointers are only modified through the weak entry points
1872 // (and arbitrary calls, which could call the weak entry points).
1875 // Anything else could modify the weak pointer.
1882 // Then, for each destroyWeak with an alloca operand, check to see if
1883 // the alloca and all its users can be zapped.
1884 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1885 Instruction *Inst = &*I++;
1886 ARCInstKind Class = GetBasicARCInstKind(Inst);
1887 if (Class != ARCInstKind::DestroyWeak)
1890 CallInst *Call = cast<CallInst>(Inst);
1891 Value *Arg = Call->getArgOperand(0);
1892 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
1893 for (User *U : Alloca->users()) {
1894 const Instruction *UserInst = cast<Instruction>(U);
1895 switch (GetBasicARCInstKind(UserInst)) {
1896 case ARCInstKind::InitWeak:
1897 case ARCInstKind::StoreWeak:
1898 case ARCInstKind::DestroyWeak:
1905 for (auto UI = Alloca->user_begin(), UE = Alloca->user_end(); UI != UE;) {
1906 CallInst *UserInst = cast<CallInst>(*UI++);
1907 switch (GetBasicARCInstKind(UserInst)) {
1908 case ARCInstKind::InitWeak:
1909 case ARCInstKind::StoreWeak:
1910 // These functions return their second argument.
1911 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
1913 case ARCInstKind::DestroyWeak:
1917 llvm_unreachable("alloca really is used!");
1919 UserInst->eraseFromParent();
1921 Alloca->eraseFromParent();
1927 /// Identify program paths which execute sequences of retains and releases which
1928 /// can be eliminated.
1929 bool ObjCARCOpt::OptimizeSequences(Function &F) {
1930 // Releases, Retains - These are used to store the results of the main flow
1931 // analysis. These use Value* as the key instead of Instruction* so that the
1932 // map stays valid when we get around to rewriting code and calls get
1933 // replaced by arguments.
1934 DenseMap<Value *, RRInfo> Releases;
1935 BlotMapVector<Value *, RRInfo> Retains;
1937 // This is used during the traversal of the function to track the
1938 // states for each identified object at each block.
1939 DenseMap<const BasicBlock *, BBState> BBStates;
1941 // Analyze the CFG of the function, and all instructions.
1942 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
1945 bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
1949 return AnyPairsCompletelyEliminated && NestingDetected;
1952 /// Check if there is a dependent call earlier that does not have anything in
1953 /// between the Retain and the call that can affect the reference count of their
1954 /// shared pointer argument. Note that Retain need not be in BB.
1956 HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
1957 SmallPtrSetImpl<Instruction *> &DepInsts,
1958 SmallPtrSetImpl<const BasicBlock *> &Visited,
1959 ProvenanceAnalysis &PA) {
1960 FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
1961 DepInsts, Visited, PA);
1962 if (DepInsts.size() != 1)
1965 auto *Call = dyn_cast_or_null<CallInst>(*DepInsts.begin());
1967 // Check that the pointer is the return value of the call.
1968 if (!Call || Arg != Call)
1971 // Check that the call is a regular call.
1972 ARCInstKind Class = GetBasicARCInstKind(Call);
1973 return Class == ARCInstKind::CallOrUser || Class == ARCInstKind::Call;
1976 /// Find a dependent retain that precedes the given autorelease for which there
1977 /// is nothing in between the two instructions that can affect the ref count of
1980 FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
1981 Instruction *Autorelease,
1982 SmallPtrSetImpl<Instruction *> &DepInsts,
1983 SmallPtrSetImpl<const BasicBlock *> &Visited,
1984 ProvenanceAnalysis &PA) {
1985 FindDependencies(CanChangeRetainCount, Arg,
1986 BB, Autorelease, DepInsts, Visited, PA);
1987 if (DepInsts.size() != 1)
1990 auto *Retain = dyn_cast_or_null<CallInst>(*DepInsts.begin());
1992 // Check that we found a retain with the same argument.
1993 if (!Retain || !IsRetain(GetBasicARCInstKind(Retain)) ||
1994 GetArgRCIdentityRoot(Retain) != Arg) {
2001 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
2002 /// no instructions dependent on Arg that need a positive ref count in between
2003 /// the autorelease and the ret.
2005 FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
2007 SmallPtrSetImpl<Instruction *> &DepInsts,
2008 SmallPtrSetImpl<const BasicBlock *> &V,
2009 ProvenanceAnalysis &PA) {
2010 FindDependencies(NeedsPositiveRetainCount, Arg,
2011 BB, Ret, DepInsts, V, PA);
2012 if (DepInsts.size() != 1)
2015 auto *Autorelease = dyn_cast_or_null<CallInst>(*DepInsts.begin());
2018 ARCInstKind AutoreleaseClass = GetBasicARCInstKind(Autorelease);
2019 if (!IsAutorelease(AutoreleaseClass))
2021 if (GetArgRCIdentityRoot(Autorelease) != Arg)
2027 /// Look for this pattern:
2029 /// %call = call i8* @something(...)
2030 /// %2 = call i8* @objc_retain(i8* %call)
2031 /// %3 = call i8* @objc_autorelease(i8* %2)
2034 /// And delete the retain and autorelease.
2035 void ObjCARCOpt::OptimizeReturns(Function &F) {
2036 if (!F.getReturnType()->isPointerTy())
2039 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
2041 SmallPtrSet<Instruction *, 4> DependingInstructions;
2042 SmallPtrSet<const BasicBlock *, 4> Visited;
2043 for (BasicBlock &BB: F) {
2044 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB.back());
2048 DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
2050 const Value *Arg = GetRCIdentityRoot(Ret->getOperand(0));
2052 // Look for an ``autorelease'' instruction that is a predecessor of Ret and
2053 // dependent on Arg such that there are no instructions dependent on Arg
2054 // that need a positive ref count in between the autorelease and Ret.
2055 CallInst *Autorelease = FindPredecessorAutoreleaseWithSafePath(
2056 Arg, &BB, Ret, DependingInstructions, Visited, PA);
2057 DependingInstructions.clear();
2063 CallInst *Retain = FindPredecessorRetainWithSafePath(
2064 Arg, Autorelease->getParent(), Autorelease, DependingInstructions,
2066 DependingInstructions.clear();
2072 // Check that there is nothing that can affect the reference count
2073 // between the retain and the call. Note that Retain need not be in BB.
2074 bool HasSafePathToCall = HasSafePathToPredecessorCall(Arg, Retain,
2075 DependingInstructions,
2077 DependingInstructions.clear();
2080 if (!HasSafePathToCall)
2083 // If so, we can zap the retain and autorelease.
2086 DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: "
2087 << *Autorelease << "\n");
2088 EraseInstruction(Retain);
2089 EraseInstruction(Autorelease);
2095 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
2096 Statistic &NumRetains =
2097 AfterOptimization ? NumRetainsAfterOpt : NumRetainsBeforeOpt;
2098 Statistic &NumReleases =
2099 AfterOptimization ? NumReleasesAfterOpt : NumReleasesBeforeOpt;
2101 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2102 Instruction *Inst = &*I++;
2103 switch (GetBasicARCInstKind(Inst)) {
2106 case ARCInstKind::Retain:
2109 case ARCInstKind::Release:
2117 bool ObjCARCOpt::doInitialization(Module &M) {
2121 // If nothing in the Module uses ARC, don't do anything.
2122 Run = ModuleHasARC(M);
2126 // Intuitively, objc_retain and others are nocapture, however in practice
2127 // they are not, because they return their argument value. And objc_release
2128 // calls finalizers which can have arbitrary side effects.
2129 MDKindCache.init(&M);
2131 // Initialize our runtime entry point cache.
2137 bool ObjCARCOpt::runOnFunction(Function &F) {
2141 // If nothing in the Module uses ARC, don't do anything.
2147 DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() << " >>>"
2150 PA.setAA(&getAnalysis<AAResultsWrapperPass>().getAAResults());
2153 if (AreStatisticsEnabled()) {
2154 GatherStatistics(F, false);
2158 // This pass performs several distinct transformations. As a compile-time aid
2159 // when compiling code that isn't ObjC, skip these if the relevant ObjC
2160 // library functions aren't declared.
2162 // Preliminary optimizations. This also computes UsedInThisFunction.
2163 OptimizeIndividualCalls(F);
2165 // Optimizations for weak pointers.
2166 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::LoadWeak)) |
2167 (1 << unsigned(ARCInstKind::LoadWeakRetained)) |
2168 (1 << unsigned(ARCInstKind::StoreWeak)) |
2169 (1 << unsigned(ARCInstKind::InitWeak)) |
2170 (1 << unsigned(ARCInstKind::CopyWeak)) |
2171 (1 << unsigned(ARCInstKind::MoveWeak)) |
2172 (1 << unsigned(ARCInstKind::DestroyWeak))))
2173 OptimizeWeakCalls(F);
2175 // Optimizations for retain+release pairs.
2176 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Retain)) |
2177 (1 << unsigned(ARCInstKind::RetainRV)) |
2178 (1 << unsigned(ARCInstKind::RetainBlock))))
2179 if (UsedInThisFunction & (1 << unsigned(ARCInstKind::Release)))
2180 // Run OptimizeSequences until it either stops making changes or
2181 // no retain+release pair nesting is detected.
2182 while (OptimizeSequences(F)) {}
2184 // Optimizations if objc_autorelease is used.
2185 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Autorelease)) |
2186 (1 << unsigned(ARCInstKind::AutoreleaseRV))))
2189 // Gather statistics after optimization.
2191 if (AreStatisticsEnabled()) {
2192 GatherStatistics(F, true);
2196 DEBUG(dbgs() << "\n");
2201 void ObjCARCOpt::releaseMemory() {