1 //===- SampleProfile.cpp - Incorporate sample profiles into the IR --------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the SampleProfileLoader transformation. This pass
10 // reads a profile file generated by a sampling profiler (e.g. Linux Perf -
11 // http://perf.wiki.kernel.org/) and generates IR metadata to reflect the
12 // profile information in the given profile.
14 // This pass generates branch weight annotations on the IR:
16 // - prof: Represents branch weights. This annotation is added to branches
17 // to indicate the weights of each edge coming out of the branch.
18 // The weight of each edge is the weight of the target block for
19 // that edge. The weight of a block B is computed as the maximum
20 // number of samples found in B.
22 //===----------------------------------------------------------------------===//
24 #include "llvm/Transforms/IPO/SampleProfile.h"
25 #include "llvm/ADT/ArrayRef.h"
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/ADT/DenseSet.h"
28 #include "llvm/ADT/MapVector.h"
29 #include "llvm/ADT/PriorityQueue.h"
30 #include "llvm/ADT/SCCIterator.h"
31 #include "llvm/ADT/SmallVector.h"
32 #include "llvm/ADT/Statistic.h"
33 #include "llvm/ADT/StringMap.h"
34 #include "llvm/ADT/StringRef.h"
35 #include "llvm/ADT/Twine.h"
36 #include "llvm/Analysis/AssumptionCache.h"
37 #include "llvm/Analysis/BlockFrequencyInfoImpl.h"
38 #include "llvm/Analysis/InlineAdvisor.h"
39 #include "llvm/Analysis/InlineCost.h"
40 #include "llvm/Analysis/LazyCallGraph.h"
41 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
42 #include "llvm/Analysis/ProfileSummaryInfo.h"
43 #include "llvm/Analysis/ReplayInlineAdvisor.h"
44 #include "llvm/Analysis/TargetLibraryInfo.h"
45 #include "llvm/Analysis/TargetTransformInfo.h"
46 #include "llvm/IR/BasicBlock.h"
47 #include "llvm/IR/DebugLoc.h"
48 #include "llvm/IR/DiagnosticInfo.h"
49 #include "llvm/IR/Function.h"
50 #include "llvm/IR/GlobalValue.h"
51 #include "llvm/IR/InstrTypes.h"
52 #include "llvm/IR/Instruction.h"
53 #include "llvm/IR/Instructions.h"
54 #include "llvm/IR/IntrinsicInst.h"
55 #include "llvm/IR/LLVMContext.h"
56 #include "llvm/IR/MDBuilder.h"
57 #include "llvm/IR/Module.h"
58 #include "llvm/IR/PassManager.h"
59 #include "llvm/IR/PseudoProbe.h"
60 #include "llvm/IR/ValueSymbolTable.h"
61 #include "llvm/ProfileData/InstrProf.h"
62 #include "llvm/ProfileData/SampleProf.h"
63 #include "llvm/ProfileData/SampleProfReader.h"
64 #include "llvm/Support/Casting.h"
65 #include "llvm/Support/CommandLine.h"
66 #include "llvm/Support/Debug.h"
67 #include "llvm/Support/ErrorOr.h"
68 #include "llvm/Support/VirtualFileSystem.h"
69 #include "llvm/Support/raw_ostream.h"
70 #include "llvm/Transforms/IPO.h"
71 #include "llvm/Transforms/IPO/ProfiledCallGraph.h"
72 #include "llvm/Transforms/IPO/SampleContextTracker.h"
73 #include "llvm/Transforms/IPO/SampleProfileProbe.h"
74 #include "llvm/Transforms/Instrumentation.h"
75 #include "llvm/Transforms/Utils/CallPromotionUtils.h"
76 #include "llvm/Transforms/Utils/Cloning.h"
77 #include "llvm/Transforms/Utils/MisExpect.h"
78 #include "llvm/Transforms/Utils/SampleProfileLoaderBaseImpl.h"
79 #include "llvm/Transforms/Utils/SampleProfileLoaderBaseUtil.h"
89 #include <system_error>
94 using namespace sampleprof;
95 using namespace llvm::sampleprofutil;
96 using ProfileCount = Function::ProfileCount;
97 #define DEBUG_TYPE "sample-profile"
98 #define CSINLINE_DEBUG DEBUG_TYPE "-inline"
100 STATISTIC(NumCSInlined,
101 "Number of functions inlined with context sensitive profile");
102 STATISTIC(NumCSNotInlined,
103 "Number of functions not inlined with context sensitive profile");
104 STATISTIC(NumMismatchedProfile,
105 "Number of functions with CFG mismatched profile");
106 STATISTIC(NumMatchedProfile, "Number of functions with CFG matched profile");
107 STATISTIC(NumDuplicatedInlinesite,
108 "Number of inlined callsites with a partial distribution factor");
110 STATISTIC(NumCSInlinedHitMinLimit,
111 "Number of functions with FDO inline stopped due to min size limit");
112 STATISTIC(NumCSInlinedHitMaxLimit,
113 "Number of functions with FDO inline stopped due to max size limit");
115 NumCSInlinedHitGrowthLimit,
116 "Number of functions with FDO inline stopped due to growth size limit");
118 // Command line option to specify the file to read samples from. This is
119 // mainly used for debugging.
120 static cl::opt<std::string> SampleProfileFile(
121 "sample-profile-file", cl::init(""), cl::value_desc("filename"),
122 cl::desc("Profile file loaded by -sample-profile"), cl::Hidden);
124 // The named file contains a set of transformations that may have been applied
125 // to the symbol names between the program from which the sample data was
126 // collected and the current program's symbols.
127 static cl::opt<std::string> SampleProfileRemappingFile(
128 "sample-profile-remapping-file", cl::init(""), cl::value_desc("filename"),
129 cl::desc("Profile remapping file loaded by -sample-profile"), cl::Hidden);
131 static cl::opt<bool> SalvageStaleProfile(
132 "salvage-stale-profile", cl::Hidden, cl::init(false),
133 cl::desc("Salvage stale profile by fuzzy matching and use the remapped "
134 "location for sample profile query."));
136 static cl::opt<bool> ReportProfileStaleness(
137 "report-profile-staleness", cl::Hidden, cl::init(false),
138 cl::desc("Compute and report stale profile statistical metrics."));
140 static cl::opt<bool> PersistProfileStaleness(
141 "persist-profile-staleness", cl::Hidden, cl::init(false),
142 cl::desc("Compute stale profile statistical metrics and write it into the "
143 "native object file(.llvm_stats section)."));
145 static cl::opt<bool> FlattenProfileForMatching(
146 "flatten-profile-for-matching", cl::Hidden, cl::init(true),
148 "Use flattened profile for stale profile detection and matching."));
150 static cl::opt<bool> ProfileSampleAccurate(
151 "profile-sample-accurate", cl::Hidden, cl::init(false),
152 cl::desc("If the sample profile is accurate, we will mark all un-sampled "
153 "callsite and function as having 0 samples. Otherwise, treat "
154 "un-sampled callsites and functions conservatively as unknown. "));
156 static cl::opt<bool> ProfileSampleBlockAccurate(
157 "profile-sample-block-accurate", cl::Hidden, cl::init(false),
158 cl::desc("If the sample profile is accurate, we will mark all un-sampled "
159 "branches and calls as having 0 samples. Otherwise, treat "
160 "them conservatively as unknown. "));
162 static cl::opt<bool> ProfileAccurateForSymsInList(
163 "profile-accurate-for-symsinlist", cl::Hidden, cl::init(true),
164 cl::desc("For symbols in profile symbol list, regard their profiles to "
165 "be accurate. It may be overriden by profile-sample-accurate. "));
167 static cl::opt<bool> ProfileMergeInlinee(
168 "sample-profile-merge-inlinee", cl::Hidden, cl::init(true),
169 cl::desc("Merge past inlinee's profile to outline version if sample "
170 "profile loader decided not to inline a call site. It will "
171 "only be enabled when top-down order of profile loading is "
174 static cl::opt<bool> ProfileTopDownLoad(
175 "sample-profile-top-down-load", cl::Hidden, cl::init(true),
176 cl::desc("Do profile annotation and inlining for functions in top-down "
177 "order of call graph during sample profile loading. It only "
178 "works for new pass manager. "));
181 UseProfiledCallGraph("use-profiled-call-graph", cl::init(true), cl::Hidden,
182 cl::desc("Process functions in a top-down order "
183 "defined by the profiled call graph when "
184 "-sample-profile-top-down-load is on."));
186 static cl::opt<bool> ProfileSizeInline(
187 "sample-profile-inline-size", cl::Hidden, cl::init(false),
188 cl::desc("Inline cold call sites in profile loader if it's beneficial "
191 // Since profiles are consumed by many passes, turning on this option has
192 // side effects. For instance, pre-link SCC inliner would see merged profiles
193 // and inline the hot functions (that are skipped in this pass).
194 static cl::opt<bool> DisableSampleLoaderInlining(
195 "disable-sample-loader-inlining", cl::Hidden, cl::init(false),
196 cl::desc("If true, artifically skip inline transformation in sample-loader "
197 "pass, and merge (or scale) profiles (as configured by "
198 "--sample-profile-merge-inlinee)."));
202 SortProfiledSCC("sort-profiled-scc-member", cl::init(true), cl::Hidden,
203 cl::desc("Sort profiled recursion by edge weights."));
205 cl::opt<int> ProfileInlineGrowthLimit(
206 "sample-profile-inline-growth-limit", cl::Hidden, cl::init(12),
207 cl::desc("The size growth ratio limit for proirity-based sample profile "
208 "loader inlining."));
210 cl::opt<int> ProfileInlineLimitMin(
211 "sample-profile-inline-limit-min", cl::Hidden, cl::init(100),
212 cl::desc("The lower bound of size growth limit for "
213 "proirity-based sample profile loader inlining."));
215 cl::opt<int> ProfileInlineLimitMax(
216 "sample-profile-inline-limit-max", cl::Hidden, cl::init(10000),
217 cl::desc("The upper bound of size growth limit for "
218 "proirity-based sample profile loader inlining."));
220 cl::opt<int> SampleHotCallSiteThreshold(
221 "sample-profile-hot-inline-threshold", cl::Hidden, cl::init(3000),
222 cl::desc("Hot callsite threshold for proirity-based sample profile loader "
225 cl::opt<int> SampleColdCallSiteThreshold(
226 "sample-profile-cold-inline-threshold", cl::Hidden, cl::init(45),
227 cl::desc("Threshold for inlining cold callsites"));
230 static cl::opt<unsigned> ProfileICPRelativeHotness(
231 "sample-profile-icp-relative-hotness", cl::Hidden, cl::init(25),
233 "Relative hotness percentage threshold for indirect "
234 "call promotion in proirity-based sample profile loader inlining."));
236 static cl::opt<unsigned> ProfileICPRelativeHotnessSkip(
237 "sample-profile-icp-relative-hotness-skip", cl::Hidden, cl::init(1),
239 "Skip relative hotness check for ICP up to given number of targets."));
241 static cl::opt<bool> CallsitePrioritizedInline(
242 "sample-profile-prioritized-inline", cl::Hidden,
244 cl::desc("Use call site prioritized inlining for sample profile loader."
245 "Currently only CSSPGO is supported."));
247 static cl::opt<bool> UsePreInlinerDecision(
248 "sample-profile-use-preinliner", cl::Hidden,
250 cl::desc("Use the preinliner decisions stored in profile context."));
252 static cl::opt<bool> AllowRecursiveInline(
253 "sample-profile-recursive-inline", cl::Hidden,
255 cl::desc("Allow sample loader inliner to inline recursive calls."));
257 static cl::opt<std::string> ProfileInlineReplayFile(
258 "sample-profile-inline-replay", cl::init(""), cl::value_desc("filename"),
260 "Optimization remarks file containing inline remarks to be replayed "
261 "by inlining from sample profile loader."),
264 static cl::opt<ReplayInlinerSettings::Scope> ProfileInlineReplayScope(
265 "sample-profile-inline-replay-scope",
266 cl::init(ReplayInlinerSettings::Scope::Function),
267 cl::values(clEnumValN(ReplayInlinerSettings::Scope::Function, "Function",
268 "Replay on functions that have remarks associated "
269 "with them (default)"),
270 clEnumValN(ReplayInlinerSettings::Scope::Module, "Module",
271 "Replay on the entire module")),
272 cl::desc("Whether inline replay should be applied to the entire "
273 "Module or just the Functions (default) that are present as "
274 "callers in remarks during sample profile inlining."),
277 static cl::opt<ReplayInlinerSettings::Fallback> ProfileInlineReplayFallback(
278 "sample-profile-inline-replay-fallback",
279 cl::init(ReplayInlinerSettings::Fallback::Original),
282 ReplayInlinerSettings::Fallback::Original, "Original",
283 "All decisions not in replay send to original advisor (default)"),
284 clEnumValN(ReplayInlinerSettings::Fallback::AlwaysInline,
285 "AlwaysInline", "All decisions not in replay are inlined"),
286 clEnumValN(ReplayInlinerSettings::Fallback::NeverInline, "NeverInline",
287 "All decisions not in replay are not inlined")),
288 cl::desc("How sample profile inline replay treats sites that don't come "
289 "from the replay. Original: defers to original advisor, "
290 "AlwaysInline: inline all sites not in replay, NeverInline: "
291 "inline no sites not in replay"),
294 static cl::opt<CallSiteFormat::Format> ProfileInlineReplayFormat(
295 "sample-profile-inline-replay-format",
296 cl::init(CallSiteFormat::Format::LineColumnDiscriminator),
298 clEnumValN(CallSiteFormat::Format::Line, "Line", "<Line Number>"),
299 clEnumValN(CallSiteFormat::Format::LineColumn, "LineColumn",
300 "<Line Number>:<Column Number>"),
301 clEnumValN(CallSiteFormat::Format::LineDiscriminator,
302 "LineDiscriminator", "<Line Number>.<Discriminator>"),
303 clEnumValN(CallSiteFormat::Format::LineColumnDiscriminator,
304 "LineColumnDiscriminator",
305 "<Line Number>:<Column Number>.<Discriminator> (default)")),
306 cl::desc("How sample profile inline replay file is formatted"), cl::Hidden);
308 static cl::opt<unsigned>
309 MaxNumPromotions("sample-profile-icp-max-prom", cl::init(3), cl::Hidden,
310 cl::desc("Max number of promotions for a single indirect "
311 "call callsite in sample profile loader"));
313 static cl::opt<bool> OverwriteExistingWeights(
314 "overwrite-existing-weights", cl::Hidden, cl::init(false),
315 cl::desc("Ignore existing branch weights on IR and always overwrite."));
317 static cl::opt<bool> AnnotateSampleProfileInlinePhase(
318 "annotate-sample-profile-inline-phase", cl::Hidden, cl::init(false),
319 cl::desc("Annotate LTO phase (prelink / postlink), or main (no LTO) for "
320 "sample-profile inline pass name."));
323 extern cl::opt<bool> EnableExtTspBlockPlacement;
328 using BlockWeightMap = DenseMap<const BasicBlock *, uint64_t>;
329 using EquivalenceClassMap = DenseMap<const BasicBlock *, const BasicBlock *>;
330 using Edge = std::pair<const BasicBlock *, const BasicBlock *>;
331 using EdgeWeightMap = DenseMap<Edge, uint64_t>;
333 DenseMap<const BasicBlock *, SmallVector<const BasicBlock *, 8>>;
335 class GUIDToFuncNameMapper {
337 GUIDToFuncNameMapper(Module &M, SampleProfileReader &Reader,
338 DenseMap<uint64_t, StringRef> &GUIDToFuncNameMap)
339 : CurrentReader(Reader), CurrentModule(M),
340 CurrentGUIDToFuncNameMap(GUIDToFuncNameMap) {
341 if (!CurrentReader.useMD5())
344 for (const auto &F : CurrentModule) {
345 StringRef OrigName = F.getName();
346 CurrentGUIDToFuncNameMap.insert(
347 {Function::getGUID(OrigName), OrigName});
349 // Local to global var promotion used by optimization like thinlto
350 // will rename the var and add suffix like ".llvm.xxx" to the
351 // original local name. In sample profile, the suffixes of function
352 // names are all stripped. Since it is possible that the mapper is
353 // built in post-thin-link phase and var promotion has been done,
354 // we need to add the substring of function name without the suffix
355 // into the GUIDToFuncNameMap.
356 StringRef CanonName = FunctionSamples::getCanonicalFnName(F);
357 if (CanonName != OrigName)
358 CurrentGUIDToFuncNameMap.insert(
359 {Function::getGUID(CanonName), CanonName});
362 // Update GUIDToFuncNameMap for each function including inlinees.
363 SetGUIDToFuncNameMapForAll(&CurrentGUIDToFuncNameMap);
366 ~GUIDToFuncNameMapper() {
367 if (!CurrentReader.useMD5())
370 CurrentGUIDToFuncNameMap.clear();
372 // Reset GUIDToFuncNameMap for of each function as they're no
373 // longer valid at this point.
374 SetGUIDToFuncNameMapForAll(nullptr);
378 void SetGUIDToFuncNameMapForAll(DenseMap<uint64_t, StringRef> *Map) {
379 std::queue<FunctionSamples *> FSToUpdate;
380 for (auto &IFS : CurrentReader.getProfiles()) {
381 FSToUpdate.push(&IFS.second);
384 while (!FSToUpdate.empty()) {
385 FunctionSamples *FS = FSToUpdate.front();
387 FS->GUIDToFuncNameMap = Map;
388 for (const auto &ICS : FS->getCallsiteSamples()) {
389 const FunctionSamplesMap &FSMap = ICS.second;
390 for (const auto &IFS : FSMap) {
391 FunctionSamples &FS = const_cast<FunctionSamples &>(IFS.second);
392 FSToUpdate.push(&FS);
398 SampleProfileReader &CurrentReader;
399 Module &CurrentModule;
400 DenseMap<uint64_t, StringRef> &CurrentGUIDToFuncNameMap;
403 // Inline candidate used by iterative callsite prioritized inliner
404 struct InlineCandidate {
406 const FunctionSamples *CalleeSamples;
407 // Prorated callsite count, which will be used to guide inlining. For example,
408 // if a callsite is duplicated in LTO prelink, then in LTO postlink the two
409 // copies will get their own distribution factors and their prorated counts
410 // will be used to decide if they should be inlined independently.
411 uint64_t CallsiteCount;
412 // Call site distribution factor to prorate the profile samples for a
413 // duplicated callsite. Default value is 1.0.
414 float CallsiteDistribution;
417 // Inline candidate comparer using call site weight
418 struct CandidateComparer {
419 bool operator()(const InlineCandidate &LHS, const InlineCandidate &RHS) {
420 if (LHS.CallsiteCount != RHS.CallsiteCount)
421 return LHS.CallsiteCount < RHS.CallsiteCount;
423 const FunctionSamples *LCS = LHS.CalleeSamples;
424 const FunctionSamples *RCS = RHS.CalleeSamples;
425 assert(LCS && RCS && "Expect non-null FunctionSamples");
427 // Tie breaker using number of samples try to favor smaller functions first
428 if (LCS->getBodySamples().size() != RCS->getBodySamples().size())
429 return LCS->getBodySamples().size() > RCS->getBodySamples().size();
431 // Tie breaker using GUID so we have stable/deterministic inlining order
432 return LCS->getGUID(LCS->getName()) < RCS->getGUID(RCS->getName());
436 using CandidateQueue =
437 PriorityQueue<InlineCandidate, std::vector<InlineCandidate>,
440 // Sample profile matching - fuzzy match.
441 class SampleProfileMatcher {
443 SampleProfileReader &Reader;
444 const PseudoProbeManager *ProbeManager;
445 SampleProfileMap FlattenedProfiles;
446 // For each function, the matcher generates a map, of which each entry is a
447 // mapping from the source location of current build to the source location in
449 StringMap<LocToLocMap> FuncMappings;
451 // Profile mismatching statstics.
452 uint64_t TotalProfiledCallsites = 0;
453 uint64_t NumMismatchedCallsites = 0;
454 uint64_t MismatchedCallsiteSamples = 0;
455 uint64_t TotalCallsiteSamples = 0;
456 uint64_t TotalProfiledFunc = 0;
457 uint64_t NumMismatchedFuncHash = 0;
458 uint64_t MismatchedFuncHashSamples = 0;
459 uint64_t TotalFuncHashSamples = 0;
462 SampleProfileMatcher(Module &M, SampleProfileReader &Reader,
463 const PseudoProbeManager *ProbeManager)
464 : M(M), Reader(Reader), ProbeManager(ProbeManager) {
465 if (FlattenProfileForMatching) {
466 ProfileConverter::flattenProfile(Reader.getProfiles(), FlattenedProfiles,
467 FunctionSamples::ProfileIsCS);
473 FunctionSamples *getFlattenedSamplesFor(const Function &F) {
474 StringRef CanonFName = FunctionSamples::getCanonicalFnName(F);
475 auto It = FlattenedProfiles.find(CanonFName);
476 if (It != FlattenedProfiles.end())
480 void runOnFunction(const Function &F, const FunctionSamples &FS);
481 void countProfileMismatches(
482 const FunctionSamples &FS,
483 const std::unordered_set<LineLocation, LineLocationHash>
484 &MatchedCallsiteLocs,
485 uint64_t &FuncMismatchedCallsites, uint64_t &FuncProfiledCallsites);
487 LocToLocMap &getIRToProfileLocationMap(const Function &F) {
488 auto Ret = FuncMappings.try_emplace(
489 FunctionSamples::getCanonicalFnName(F.getName()), LocToLocMap());
490 return Ret.first->second;
492 void distributeIRToProfileLocationMap();
493 void distributeIRToProfileLocationMap(FunctionSamples &FS);
494 void populateProfileCallsites(
495 const FunctionSamples &FS,
496 StringMap<std::set<LineLocation>> &CalleeToCallsitesMap);
497 void runStaleProfileMatching(
498 const std::map<LineLocation, StringRef> &IRLocations,
499 StringMap<std::set<LineLocation>> &CalleeToCallsitesMap,
500 LocToLocMap &IRToProfileLocationMap);
503 /// Sample profile pass.
505 /// This pass reads profile data from the file specified by
506 /// -sample-profile-file and annotates every affected function with the
507 /// profile information found in that file.
508 class SampleProfileLoader final : public SampleProfileLoaderBaseImpl<Function> {
511 StringRef Name, StringRef RemapName, ThinOrFullLTOPhase LTOPhase,
512 IntrusiveRefCntPtr<vfs::FileSystem> FS,
513 std::function<AssumptionCache &(Function &)> GetAssumptionCache,
514 std::function<TargetTransformInfo &(Function &)> GetTargetTransformInfo,
515 std::function<const TargetLibraryInfo &(Function &)> GetTLI)
516 : SampleProfileLoaderBaseImpl(std::string(Name), std::string(RemapName),
518 GetAC(std::move(GetAssumptionCache)),
519 GetTTI(std::move(GetTargetTransformInfo)), GetTLI(std::move(GetTLI)),
521 AnnotatedPassName(AnnotateSampleProfileInlinePhase
522 ? llvm::AnnotateInlinePassName(InlineContext{
523 LTOPhase, InlinePass::SampleProfileInliner})
526 bool doInitialization(Module &M, FunctionAnalysisManager *FAM = nullptr);
527 bool runOnModule(Module &M, ModuleAnalysisManager *AM,
528 ProfileSummaryInfo *_PSI, LazyCallGraph &CG);
531 bool runOnFunction(Function &F, ModuleAnalysisManager *AM);
532 bool emitAnnotations(Function &F);
533 ErrorOr<uint64_t> getInstWeight(const Instruction &I) override;
534 const FunctionSamples *findCalleeFunctionSamples(const CallBase &I) const;
535 const FunctionSamples *
536 findFunctionSamples(const Instruction &I) const override;
537 std::vector<const FunctionSamples *>
538 findIndirectCallFunctionSamples(const Instruction &I, uint64_t &Sum) const;
539 void findExternalInlineCandidate(CallBase *CB, const FunctionSamples *Samples,
540 DenseSet<GlobalValue::GUID> &InlinedGUIDs,
541 const StringMap<Function *> &SymbolMap,
543 // Attempt to promote indirect call and also inline the promoted call
544 bool tryPromoteAndInlineCandidate(
545 Function &F, InlineCandidate &Candidate, uint64_t SumOrigin,
546 uint64_t &Sum, SmallVector<CallBase *, 8> *InlinedCallSites = nullptr);
548 bool inlineHotFunctions(Function &F,
549 DenseSet<GlobalValue::GUID> &InlinedGUIDs);
550 std::optional<InlineCost> getExternalInlineAdvisorCost(CallBase &CB);
551 bool getExternalInlineAdvisorShouldInline(CallBase &CB);
552 InlineCost shouldInlineCandidate(InlineCandidate &Candidate);
553 bool getInlineCandidate(InlineCandidate *NewCandidate, CallBase *CB);
555 tryInlineCandidate(InlineCandidate &Candidate,
556 SmallVector<CallBase *, 8> *InlinedCallSites = nullptr);
558 inlineHotFunctionsWithPriority(Function &F,
559 DenseSet<GlobalValue::GUID> &InlinedGUIDs);
560 // Inline cold/small functions in addition to hot ones
561 bool shouldInlineColdCallee(CallBase &CallInst);
562 void emitOptimizationRemarksForInlineCandidates(
563 const SmallVectorImpl<CallBase *> &Candidates, const Function &F,
565 void promoteMergeNotInlinedContextSamples(
566 MapVector<CallBase *, const FunctionSamples *> NonInlinedCallSites,
568 std::vector<Function *> buildFunctionOrder(Module &M, LazyCallGraph &CG);
569 std::unique_ptr<ProfiledCallGraph> buildProfiledCallGraph(Module &M);
570 void generateMDProfMetadata(Function &F);
572 /// Map from function name to Function *. Used to find the function from
573 /// the function name. If the function name contains suffix, additional
574 /// entry is added to map from the stripped name to the function if there
575 /// is one-to-one mapping.
576 StringMap<Function *> SymbolMap;
578 std::function<AssumptionCache &(Function &)> GetAC;
579 std::function<TargetTransformInfo &(Function &)> GetTTI;
580 std::function<const TargetLibraryInfo &(Function &)> GetTLI;
582 /// Profile tracker for different context.
583 std::unique_ptr<SampleContextTracker> ContextTracker;
585 /// Flag indicating which LTO/ThinLTO phase the pass is invoked in.
587 /// We need to know the LTO phase because for example in ThinLTOPrelink
588 /// phase, in annotation, we should not promote indirect calls. Instead,
589 /// we will mark GUIDs that needs to be annotated to the function.
590 const ThinOrFullLTOPhase LTOPhase;
591 const std::string AnnotatedPassName;
593 /// Profle Symbol list tells whether a function name appears in the binary
594 /// used to generate the current profile.
595 std::unique_ptr<ProfileSymbolList> PSL;
597 /// Total number of samples collected in this profile.
599 /// This is the sum of all the samples collected in all the functions executed
601 uint64_t TotalCollectedSamples = 0;
603 // Information recorded when we declined to inline a call site
604 // because we have determined it is too cold is accumulated for
605 // each callee function. Initially this is just the entry count.
606 struct NotInlinedProfileInfo {
609 DenseMap<Function *, NotInlinedProfileInfo> notInlinedCallInfo;
611 // GUIDToFuncNameMap saves the mapping from GUID to the symbol name, for
612 // all the function symbols defined or declared in current module.
613 DenseMap<uint64_t, StringRef> GUIDToFuncNameMap;
615 // All the Names used in FunctionSamples including outline function
616 // names, inline instance names and call target names.
617 StringSet<> NamesInProfile;
619 // For symbol in profile symbol list, whether to regard their profiles
620 // to be accurate. It is mainly decided by existance of profile symbol
621 // list and -profile-accurate-for-symsinlist flag, but it can be
622 // overriden by -profile-sample-accurate or profile-sample-accurate
624 bool ProfAccForSymsInList;
626 // External inline advisor used to replay inline decision from remarks.
627 std::unique_ptr<InlineAdvisor> ExternalInlineAdvisor;
629 // A helper to implement the sample profile matching algorithm.
630 std::unique_ptr<SampleProfileMatcher> MatchingManager;
633 const char *getAnnotatedRemarkPassName() const {
634 return AnnotatedPassName.c_str();
637 } // end anonymous namespace
641 inline bool SampleProfileInference<Function>::isExit(const BasicBlock *BB) {
642 return succ_empty(BB);
646 inline void SampleProfileInference<Function>::findUnlikelyJumps(
647 const std::vector<const BasicBlockT *> &BasicBlocks,
648 BlockEdgeMap &Successors, FlowFunction &Func) {
649 for (auto &Jump : Func.Jumps) {
650 const auto *BB = BasicBlocks[Jump.Source];
651 const auto *Succ = BasicBlocks[Jump.Target];
652 const Instruction *TI = BB->getTerminator();
653 // Check if a block ends with InvokeInst and mark non-taken branch unlikely.
654 // In that case block Succ should be a landing pad
655 if (Successors[BB].size() == 2 && Successors[BB].back() == Succ) {
656 if (isa<InvokeInst>(TI)) {
657 Jump.IsUnlikely = true;
660 const Instruction *SuccTI = Succ->getTerminator();
661 // Check if the target block contains UnreachableInst and mark it unlikely
662 if (SuccTI->getNumSuccessors() == 0) {
663 if (isa<UnreachableInst>(SuccTI)) {
664 Jump.IsUnlikely = true;
671 void SampleProfileLoaderBaseImpl<Function>::computeDominanceAndLoopInfo(
673 DT.reset(new DominatorTree);
676 PDT.reset(new PostDominatorTree(F));
678 LI.reset(new LoopInfo);
683 ErrorOr<uint64_t> SampleProfileLoader::getInstWeight(const Instruction &Inst) {
684 if (FunctionSamples::ProfileIsProbeBased)
685 return getProbeWeight(Inst);
687 const DebugLoc &DLoc = Inst.getDebugLoc();
689 return std::error_code();
691 // Ignore all intrinsics, phinodes and branch instructions.
692 // Branch and phinodes instruction usually contains debug info from sources
693 // outside of the residing basic block, thus we ignore them during annotation.
694 if (isa<BranchInst>(Inst) || isa<IntrinsicInst>(Inst) || isa<PHINode>(Inst))
695 return std::error_code();
697 // For non-CS profile, if a direct call/invoke instruction is inlined in
698 // profile (findCalleeFunctionSamples returns non-empty result), but not
699 // inlined here, it means that the inlined callsite has no sample, thus the
700 // call instruction should have 0 count.
701 // For CS profile, the callsite count of previously inlined callees is
702 // populated with the entry count of the callees.
703 if (!FunctionSamples::ProfileIsCS)
704 if (const auto *CB = dyn_cast<CallBase>(&Inst))
705 if (!CB->isIndirectCall() && findCalleeFunctionSamples(*CB))
708 return getInstWeightImpl(Inst);
711 /// Get the FunctionSamples for a call instruction.
713 /// The FunctionSamples of a call/invoke instruction \p Inst is the inlined
714 /// instance in which that call instruction is calling to. It contains
715 /// all samples that resides in the inlined instance. We first find the
716 /// inlined instance in which the call instruction is from, then we
717 /// traverse its children to find the callsite with the matching
720 /// \param Inst Call/Invoke instruction to query.
722 /// \returns The FunctionSamples pointer to the inlined instance.
723 const FunctionSamples *
724 SampleProfileLoader::findCalleeFunctionSamples(const CallBase &Inst) const {
725 const DILocation *DIL = Inst.getDebugLoc();
730 StringRef CalleeName;
731 if (Function *Callee = Inst.getCalledFunction())
732 CalleeName = Callee->getName();
734 if (FunctionSamples::ProfileIsCS)
735 return ContextTracker->getCalleeContextSamplesFor(Inst, CalleeName);
737 const FunctionSamples *FS = findFunctionSamples(Inst);
741 return FS->findFunctionSamplesAt(FunctionSamples::getCallSiteIdentifier(DIL),
742 CalleeName, Reader->getRemapper());
745 /// Returns a vector of FunctionSamples that are the indirect call targets
746 /// of \p Inst. The vector is sorted by the total number of samples. Stores
747 /// the total call count of the indirect call in \p Sum.
748 std::vector<const FunctionSamples *>
749 SampleProfileLoader::findIndirectCallFunctionSamples(
750 const Instruction &Inst, uint64_t &Sum) const {
751 const DILocation *DIL = Inst.getDebugLoc();
752 std::vector<const FunctionSamples *> R;
758 auto FSCompare = [](const FunctionSamples *L, const FunctionSamples *R) {
759 assert(L && R && "Expect non-null FunctionSamples");
760 if (L->getHeadSamplesEstimate() != R->getHeadSamplesEstimate())
761 return L->getHeadSamplesEstimate() > R->getHeadSamplesEstimate();
762 return FunctionSamples::getGUID(L->getName()) <
763 FunctionSamples::getGUID(R->getName());
766 if (FunctionSamples::ProfileIsCS) {
768 ContextTracker->getIndirectCalleeContextSamplesFor(DIL);
769 if (CalleeSamples.empty())
772 // For CSSPGO, we only use target context profile's entry count
773 // as that already includes both inlined callee and non-inlined ones..
775 for (const auto *const FS : CalleeSamples) {
776 Sum += FS->getHeadSamplesEstimate();
779 llvm::sort(R, FSCompare);
783 const FunctionSamples *FS = findFunctionSamples(Inst);
787 auto CallSite = FunctionSamples::getCallSiteIdentifier(DIL);
788 auto T = FS->findCallTargetMapAt(CallSite);
791 for (const auto &T_C : T.get())
793 if (const FunctionSamplesMap *M = FS->findFunctionSamplesMapAt(CallSite)) {
796 for (const auto &NameFS : *M) {
797 Sum += NameFS.second.getHeadSamplesEstimate();
798 R.push_back(&NameFS.second);
800 llvm::sort(R, FSCompare);
805 const FunctionSamples *
806 SampleProfileLoader::findFunctionSamples(const Instruction &Inst) const {
807 if (FunctionSamples::ProfileIsProbeBased) {
808 std::optional<PseudoProbe> Probe = extractProbe(Inst);
813 const DILocation *DIL = Inst.getDebugLoc();
817 auto it = DILocation2SampleMap.try_emplace(DIL,nullptr);
819 if (FunctionSamples::ProfileIsCS)
820 it.first->second = ContextTracker->getContextSamplesFor(DIL);
823 Samples->findFunctionSamples(DIL, Reader->getRemapper());
825 return it.first->second;
828 /// Check whether the indirect call promotion history of \p Inst allows
829 /// the promotion for \p Candidate.
830 /// If the profile count for the promotion candidate \p Candidate is
831 /// NOMORE_ICP_MAGICNUM, it means \p Candidate has already been promoted
832 /// for \p Inst. If we already have at least MaxNumPromotions
833 /// NOMORE_ICP_MAGICNUM count values in the value profile of \p Inst, we
834 /// cannot promote for \p Inst anymore.
835 static bool doesHistoryAllowICP(const Instruction &Inst, StringRef Candidate) {
836 uint32_t NumVals = 0;
837 uint64_t TotalCount = 0;
838 std::unique_ptr<InstrProfValueData[]> ValueData =
839 std::make_unique<InstrProfValueData[]>(MaxNumPromotions);
841 getValueProfDataFromInst(Inst, IPVK_IndirectCallTarget, MaxNumPromotions,
842 ValueData.get(), NumVals, TotalCount, true);
843 // No valid value profile so no promoted targets have been recorded
844 // before. Ok to do ICP.
848 unsigned NumPromoted = 0;
849 for (uint32_t I = 0; I < NumVals; I++) {
850 if (ValueData[I].Count != NOMORE_ICP_MAGICNUM)
853 // If the promotion candidate has NOMORE_ICP_MAGICNUM count in the
854 // metadata, it means the candidate has been promoted for this
856 if (ValueData[I].Value == Function::getGUID(Candidate))
859 // If already have MaxNumPromotions promotion, don't do it anymore.
860 if (NumPromoted == MaxNumPromotions)
866 /// Update indirect call target profile metadata for \p Inst.
867 /// Usually \p Sum is the sum of counts of all the targets for \p Inst.
868 /// If it is 0, it means updateIDTMetaData is used to mark a
869 /// certain target to be promoted already. If it is not zero,
870 /// we expect to use it to update the total count in the value profile.
872 updateIDTMetaData(Instruction &Inst,
873 const SmallVectorImpl<InstrProfValueData> &CallTargets,
875 // Bail out early if MaxNumPromotions is zero.
876 // This prevents allocating an array of zero length below.
878 // Note `updateIDTMetaData` is called in two places so check
879 // `MaxNumPromotions` inside it.
880 if (MaxNumPromotions == 0)
882 uint32_t NumVals = 0;
883 // OldSum is the existing total count in the value profile data.
885 std::unique_ptr<InstrProfValueData[]> ValueData =
886 std::make_unique<InstrProfValueData[]>(MaxNumPromotions);
888 getValueProfDataFromInst(Inst, IPVK_IndirectCallTarget, MaxNumPromotions,
889 ValueData.get(), NumVals, OldSum, true);
891 DenseMap<uint64_t, uint64_t> ValueCountMap;
893 assert((CallTargets.size() == 1 &&
894 CallTargets[0].Count == NOMORE_ICP_MAGICNUM) &&
895 "If sum is 0, assume only one element in CallTargets "
896 "with count being NOMORE_ICP_MAGICNUM");
897 // Initialize ValueCountMap with existing value profile data.
899 for (uint32_t I = 0; I < NumVals; I++)
900 ValueCountMap[ValueData[I].Value] = ValueData[I].Count;
903 ValueCountMap.try_emplace(CallTargets[0].Value, CallTargets[0].Count);
904 // If the target already exists in value profile, decrease the total
905 // count OldSum and reset the target's count to NOMORE_ICP_MAGICNUM.
907 OldSum -= Pair.first->second;
908 Pair.first->second = NOMORE_ICP_MAGICNUM;
912 // Initialize ValueCountMap with existing NOMORE_ICP_MAGICNUM
913 // counts in the value profile.
915 for (uint32_t I = 0; I < NumVals; I++) {
916 if (ValueData[I].Count == NOMORE_ICP_MAGICNUM)
917 ValueCountMap[ValueData[I].Value] = ValueData[I].Count;
921 for (const auto &Data : CallTargets) {
922 auto Pair = ValueCountMap.try_emplace(Data.Value, Data.Count);
925 // The target represented by Data.Value has already been promoted.
926 // Keep the count as NOMORE_ICP_MAGICNUM in the profile and decrease
927 // Sum by Data.Count.
928 assert(Sum >= Data.Count && "Sum should never be less than Data.Count");
933 SmallVector<InstrProfValueData, 8> NewCallTargets;
934 for (const auto &ValueCount : ValueCountMap) {
935 NewCallTargets.emplace_back(
936 InstrProfValueData{ValueCount.first, ValueCount.second});
939 llvm::sort(NewCallTargets,
940 [](const InstrProfValueData &L, const InstrProfValueData &R) {
941 if (L.Count != R.Count)
942 return L.Count > R.Count;
943 return L.Value > R.Value;
946 uint32_t MaxMDCount =
947 std::min(NewCallTargets.size(), static_cast<size_t>(MaxNumPromotions));
948 annotateValueSite(*Inst.getParent()->getParent()->getParent(), Inst,
949 NewCallTargets, Sum, IPVK_IndirectCallTarget, MaxMDCount);
952 /// Attempt to promote indirect call and also inline the promoted call.
954 /// \param F Caller function.
955 /// \param Candidate ICP and inline candidate.
956 /// \param SumOrigin Original sum of target counts for indirect call before
957 /// promoting given candidate.
958 /// \param Sum Prorated sum of remaining target counts for indirect call
959 /// after promoting given candidate.
960 /// \param InlinedCallSite Output vector for new call sites exposed after
962 bool SampleProfileLoader::tryPromoteAndInlineCandidate(
963 Function &F, InlineCandidate &Candidate, uint64_t SumOrigin, uint64_t &Sum,
964 SmallVector<CallBase *, 8> *InlinedCallSite) {
965 // Bail out early if sample-loader inliner is disabled.
966 if (DisableSampleLoaderInlining)
969 // Bail out early if MaxNumPromotions is zero.
970 // This prevents allocating an array of zero length in callees below.
971 if (MaxNumPromotions == 0)
973 auto CalleeFunctionName = Candidate.CalleeSamples->getFuncName();
974 auto R = SymbolMap.find(CalleeFunctionName);
975 if (R == SymbolMap.end() || !R->getValue())
978 auto &CI = *Candidate.CallInstr;
979 if (!doesHistoryAllowICP(CI, R->getValue()->getName()))
982 const char *Reason = "Callee function not available";
983 // R->getValue() != &F is to prevent promoting a recursive call.
984 // If it is a recursive call, we do not inline it as it could bloat
985 // the code exponentially. There is way to better handle this, e.g.
986 // clone the caller first, and inline the cloned caller if it is
987 // recursive. As llvm does not inline recursive calls, we will
988 // simply ignore it instead of handling it explicitly.
989 if (!R->getValue()->isDeclaration() && R->getValue()->getSubprogram() &&
990 R->getValue()->hasFnAttribute("use-sample-profile") &&
991 R->getValue() != &F && isLegalToPromote(CI, R->getValue(), &Reason)) {
992 // For promoted target, set its value with NOMORE_ICP_MAGICNUM count
993 // in the value profile metadata so the target won't be promoted again.
994 SmallVector<InstrProfValueData, 1> SortedCallTargets = {InstrProfValueData{
995 Function::getGUID(R->getValue()->getName()), NOMORE_ICP_MAGICNUM}};
996 updateIDTMetaData(CI, SortedCallTargets, 0);
998 auto *DI = &pgo::promoteIndirectCall(
999 CI, R->getValue(), Candidate.CallsiteCount, Sum, false, ORE);
1001 Sum -= Candidate.CallsiteCount;
1002 // Do not prorate the indirect callsite distribution since the original
1003 // distribution will be used to scale down non-promoted profile target
1004 // counts later. By doing this we lose track of the real callsite count
1005 // for the leftover indirect callsite as a trade off for accurate call
1007 // TODO: Ideally we would have two separate factors, one for call site
1008 // counts and one is used to prorate call target counts.
1009 // Do not update the promoted direct callsite distribution at this
1010 // point since the original distribution combined with the callee profile
1011 // will be used to prorate callsites from the callee if inlined. Once not
1012 // inlined, the direct callsite distribution should be prorated so that
1013 // the it will reflect the real callsite counts.
1014 Candidate.CallInstr = DI;
1015 if (isa<CallInst>(DI) || isa<InvokeInst>(DI)) {
1016 bool Inlined = tryInlineCandidate(Candidate, InlinedCallSite);
1018 // Prorate the direct callsite distribution so that it reflects real
1020 setProbeDistributionFactor(
1021 *DI, static_cast<float>(Candidate.CallsiteCount) / SumOrigin);
1027 LLVM_DEBUG(dbgs() << "\nFailed to promote indirect call to "
1028 << Candidate.CalleeSamples->getFuncName() << " because "
1034 bool SampleProfileLoader::shouldInlineColdCallee(CallBase &CallInst) {
1035 if (!ProfileSizeInline)
1038 Function *Callee = CallInst.getCalledFunction();
1039 if (Callee == nullptr)
1042 InlineCost Cost = getInlineCost(CallInst, getInlineParams(), GetTTI(*Callee),
1048 if (Cost.isAlways())
1051 return Cost.getCost() <= SampleColdCallSiteThreshold;
1054 void SampleProfileLoader::emitOptimizationRemarksForInlineCandidates(
1055 const SmallVectorImpl<CallBase *> &Candidates, const Function &F,
1057 for (auto *I : Candidates) {
1058 Function *CalledFunction = I->getCalledFunction();
1059 if (CalledFunction) {
1060 ORE->emit(OptimizationRemarkAnalysis(getAnnotatedRemarkPassName(),
1061 "InlineAttempt", I->getDebugLoc(),
1063 << "previous inlining reattempted for "
1064 << (Hot ? "hotness: '" : "size: '")
1065 << ore::NV("Callee", CalledFunction) << "' into '"
1066 << ore::NV("Caller", &F) << "'");
1071 void SampleProfileLoader::findExternalInlineCandidate(
1072 CallBase *CB, const FunctionSamples *Samples,
1073 DenseSet<GlobalValue::GUID> &InlinedGUIDs,
1074 const StringMap<Function *> &SymbolMap, uint64_t Threshold) {
1076 // If ExternalInlineAdvisor(ReplayInlineAdvisor) wants to inline an external
1077 // function make sure it's imported
1078 if (CB && getExternalInlineAdvisorShouldInline(*CB)) {
1079 // Samples may not exist for replayed function, if so
1080 // just add the direct GUID and move on
1082 InlinedGUIDs.insert(
1083 FunctionSamples::getGUID(CB->getCalledFunction()->getName()));
1086 // Otherwise, drop the threshold to import everything that we can
1090 // In some rare cases, call instruction could be changed after being pushed
1091 // into inline candidate queue, this is because earlier inlining may expose
1092 // constant propagation which can change indirect call to direct call. When
1093 // this happens, we may fail to find matching function samples for the
1094 // candidate later, even if a match was found when the candidate was enqueued.
1098 // For AutoFDO profile, retrieve candidate profiles by walking over
1099 // the nested inlinee profiles.
1100 if (!FunctionSamples::ProfileIsCS) {
1101 Samples->findInlinedFunctions(InlinedGUIDs, SymbolMap, Threshold);
1105 ContextTrieNode *Caller = ContextTracker->getContextNodeForProfile(Samples);
1106 std::queue<ContextTrieNode *> CalleeList;
1107 CalleeList.push(Caller);
1108 while (!CalleeList.empty()) {
1109 ContextTrieNode *Node = CalleeList.front();
1111 FunctionSamples *CalleeSample = Node->getFunctionSamples();
1112 // For CSSPGO profile, retrieve candidate profile by walking over the
1113 // trie built for context profile. Note that also take call targets
1114 // even if callee doesn't have a corresponding context profile.
1118 // If pre-inliner decision is used, honor that for importing as well.
1120 UsePreInlinerDecision &&
1121 CalleeSample->getContext().hasAttribute(ContextShouldBeInlined);
1122 if (!PreInline && CalleeSample->getHeadSamplesEstimate() < Threshold)
1125 StringRef Name = CalleeSample->getFuncName();
1126 Function *Func = SymbolMap.lookup(Name);
1127 // Add to the import list only when it's defined out of module.
1128 if (!Func || Func->isDeclaration())
1129 InlinedGUIDs.insert(FunctionSamples::getGUID(CalleeSample->getName()));
1131 // Import hot CallTargets, which may not be available in IR because full
1132 // profile annotation cannot be done until backend compilation in ThinLTO.
1133 for (const auto &BS : CalleeSample->getBodySamples())
1134 for (const auto &TS : BS.second.getCallTargets())
1135 if (TS.getValue() > Threshold) {
1136 StringRef CalleeName = CalleeSample->getFuncName(TS.getKey());
1137 const Function *Callee = SymbolMap.lookup(CalleeName);
1138 if (!Callee || Callee->isDeclaration())
1139 InlinedGUIDs.insert(FunctionSamples::getGUID(TS.getKey()));
1142 // Import hot child context profile associted with callees. Note that this
1143 // may have some overlap with the call target loop above, but doing this
1144 // based child context profile again effectively allow us to use the max of
1145 // entry count and call target count to determine importing.
1146 for (auto &Child : Node->getAllChildContext()) {
1147 ContextTrieNode *CalleeNode = &Child.second;
1148 CalleeList.push(CalleeNode);
1153 /// Iteratively inline hot callsites of a function.
1155 /// Iteratively traverse all callsites of the function \p F, so as to
1156 /// find out callsites with corresponding inline instances.
1158 /// For such callsites,
1159 /// - If it is hot enough, inline the callsites and adds callsites of the callee
1160 /// into the caller. If the call is an indirect call, first promote
1161 /// it to direct call. Each indirect call is limited with a single target.
1163 /// - If a callsite is not inlined, merge the its profile to the outline
1164 /// version (if --sample-profile-merge-inlinee is true), or scale the
1165 /// counters of standalone function based on the profile of inlined
1166 /// instances (if --sample-profile-merge-inlinee is false).
1168 /// Later passes may consume the updated profiles.
1170 /// \param F function to perform iterative inlining.
1171 /// \param InlinedGUIDs a set to be updated to include all GUIDs that are
1172 /// inlined in the profiled binary.
1174 /// \returns True if there is any inline happened.
1175 bool SampleProfileLoader::inlineHotFunctions(
1176 Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) {
1177 // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure
1178 // Profile symbol list is ignored when profile-sample-accurate is on.
1179 assert((!ProfAccForSymsInList ||
1180 (!ProfileSampleAccurate &&
1181 !F.hasFnAttribute("profile-sample-accurate"))) &&
1182 "ProfAccForSymsInList should be false when profile-sample-accurate "
1185 MapVector<CallBase *, const FunctionSamples *> LocalNotInlinedCallSites;
1186 bool Changed = false;
1187 bool LocalChanged = true;
1188 while (LocalChanged) {
1189 LocalChanged = false;
1190 SmallVector<CallBase *, 10> CIS;
1191 for (auto &BB : F) {
1193 SmallVector<CallBase *, 10> AllCandidates;
1194 SmallVector<CallBase *, 10> ColdCandidates;
1195 for (auto &I : BB) {
1196 const FunctionSamples *FS = nullptr;
1197 if (auto *CB = dyn_cast<CallBase>(&I)) {
1198 if (!isa<IntrinsicInst>(I)) {
1199 if ((FS = findCalleeFunctionSamples(*CB))) {
1200 assert((!FunctionSamples::UseMD5 || FS->GUIDToFuncNameMap) &&
1201 "GUIDToFuncNameMap has to be populated");
1202 AllCandidates.push_back(CB);
1203 if (FS->getHeadSamplesEstimate() > 0 ||
1204 FunctionSamples::ProfileIsCS)
1205 LocalNotInlinedCallSites.insert({CB, FS});
1206 if (callsiteIsHot(FS, PSI, ProfAccForSymsInList))
1208 else if (shouldInlineColdCallee(*CB))
1209 ColdCandidates.push_back(CB);
1210 } else if (getExternalInlineAdvisorShouldInline(*CB)) {
1211 AllCandidates.push_back(CB);
1216 if (Hot || ExternalInlineAdvisor) {
1217 CIS.insert(CIS.begin(), AllCandidates.begin(), AllCandidates.end());
1218 emitOptimizationRemarksForInlineCandidates(AllCandidates, F, true);
1220 CIS.insert(CIS.begin(), ColdCandidates.begin(), ColdCandidates.end());
1221 emitOptimizationRemarksForInlineCandidates(ColdCandidates, F, false);
1224 for (CallBase *I : CIS) {
1225 Function *CalledFunction = I->getCalledFunction();
1226 InlineCandidate Candidate = {I, LocalNotInlinedCallSites.lookup(I),
1227 0 /* dummy count */,
1228 1.0 /* dummy distribution factor */};
1229 // Do not inline recursive calls.
1230 if (CalledFunction == &F)
1232 if (I->isIndirectCall()) {
1234 for (const auto *FS : findIndirectCallFunctionSamples(*I, Sum)) {
1235 uint64_t SumOrigin = Sum;
1236 if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1237 findExternalInlineCandidate(I, FS, InlinedGUIDs, SymbolMap,
1238 PSI->getOrCompHotCountThreshold());
1241 if (!callsiteIsHot(FS, PSI, ProfAccForSymsInList))
1244 Candidate = {I, FS, FS->getHeadSamplesEstimate(), 1.0};
1245 if (tryPromoteAndInlineCandidate(F, Candidate, SumOrigin, Sum)) {
1246 LocalNotInlinedCallSites.erase(I);
1247 LocalChanged = true;
1250 } else if (CalledFunction && CalledFunction->getSubprogram() &&
1251 !CalledFunction->isDeclaration()) {
1252 if (tryInlineCandidate(Candidate)) {
1253 LocalNotInlinedCallSites.erase(I);
1254 LocalChanged = true;
1256 } else if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1257 findExternalInlineCandidate(I, findCalleeFunctionSamples(*I),
1258 InlinedGUIDs, SymbolMap,
1259 PSI->getOrCompHotCountThreshold());
1262 Changed |= LocalChanged;
1265 // For CS profile, profile for not inlined context will be merged when
1266 // base profile is being retrieved.
1267 if (!FunctionSamples::ProfileIsCS)
1268 promoteMergeNotInlinedContextSamples(LocalNotInlinedCallSites, F);
1272 bool SampleProfileLoader::tryInlineCandidate(
1273 InlineCandidate &Candidate, SmallVector<CallBase *, 8> *InlinedCallSites) {
1274 // Do not attempt to inline a candidate if
1275 // --disable-sample-loader-inlining is true.
1276 if (DisableSampleLoaderInlining)
1279 CallBase &CB = *Candidate.CallInstr;
1280 Function *CalledFunction = CB.getCalledFunction();
1281 assert(CalledFunction && "Expect a callee with definition");
1282 DebugLoc DLoc = CB.getDebugLoc();
1283 BasicBlock *BB = CB.getParent();
1285 InlineCost Cost = shouldInlineCandidate(Candidate);
1286 if (Cost.isNever()) {
1287 ORE->emit(OptimizationRemarkAnalysis(getAnnotatedRemarkPassName(),
1288 "InlineFail", DLoc, BB)
1289 << "incompatible inlining");
1296 InlineFunctionInfo IFI(GetAC);
1297 IFI.UpdateProfile = false;
1298 InlineResult IR = InlineFunction(CB, IFI,
1299 /*MergeAttributes=*/true);
1300 if (!IR.isSuccess())
1303 // The call to InlineFunction erases I, so we can't pass it here.
1304 emitInlinedIntoBasedOnCost(*ORE, DLoc, BB, *CalledFunction, *BB->getParent(),
1305 Cost, true, getAnnotatedRemarkPassName());
1307 // Now populate the list of newly exposed call sites.
1308 if (InlinedCallSites) {
1309 InlinedCallSites->clear();
1310 for (auto &I : IFI.InlinedCallSites)
1311 InlinedCallSites->push_back(I);
1314 if (FunctionSamples::ProfileIsCS)
1315 ContextTracker->markContextSamplesInlined(Candidate.CalleeSamples);
1318 // Prorate inlined probes for a duplicated inlining callsite which probably
1319 // has a distribution less than 100%. Samples for an inlinee should be
1320 // distributed among the copies of the original callsite based on each
1321 // callsite's distribution factor for counts accuracy. Note that an inlined
1322 // probe may come with its own distribution factor if it has been duplicated
1323 // in the inlinee body. The two factor are multiplied to reflect the
1324 // aggregation of duplication.
1325 if (Candidate.CallsiteDistribution < 1) {
1326 for (auto &I : IFI.InlinedCallSites) {
1327 if (std::optional<PseudoProbe> Probe = extractProbe(*I))
1328 setProbeDistributionFactor(*I, Probe->Factor *
1329 Candidate.CallsiteDistribution);
1331 NumDuplicatedInlinesite++;
1337 bool SampleProfileLoader::getInlineCandidate(InlineCandidate *NewCandidate,
1339 assert(CB && "Expect non-null call instruction");
1341 if (isa<IntrinsicInst>(CB))
1344 // Find the callee's profile. For indirect call, find hottest target profile.
1345 const FunctionSamples *CalleeSamples = findCalleeFunctionSamples(*CB);
1346 // If ExternalInlineAdvisor wants to inline this site, do so even
1347 // if Samples are not present.
1348 if (!CalleeSamples && !getExternalInlineAdvisorShouldInline(*CB))
1352 if (std::optional<PseudoProbe> Probe = extractProbe(*CB))
1353 Factor = Probe->Factor;
1355 uint64_t CallsiteCount =
1356 CalleeSamples ? CalleeSamples->getHeadSamplesEstimate() * Factor : 0;
1357 *NewCandidate = {CB, CalleeSamples, CallsiteCount, Factor};
1361 std::optional<InlineCost>
1362 SampleProfileLoader::getExternalInlineAdvisorCost(CallBase &CB) {
1363 std::unique_ptr<InlineAdvice> Advice = nullptr;
1364 if (ExternalInlineAdvisor) {
1365 Advice = ExternalInlineAdvisor->getAdvice(CB);
1367 if (!Advice->isInliningRecommended()) {
1368 Advice->recordUnattemptedInlining();
1369 return InlineCost::getNever("not previously inlined");
1371 Advice->recordInlining();
1372 return InlineCost::getAlways("previously inlined");
1379 bool SampleProfileLoader::getExternalInlineAdvisorShouldInline(CallBase &CB) {
1380 std::optional<InlineCost> Cost = getExternalInlineAdvisorCost(CB);
1381 return Cost ? !!*Cost : false;
1385 SampleProfileLoader::shouldInlineCandidate(InlineCandidate &Candidate) {
1386 if (std::optional<InlineCost> ReplayCost =
1387 getExternalInlineAdvisorCost(*Candidate.CallInstr))
1389 // Adjust threshold based on call site hotness, only do this for callsite
1390 // prioritized inliner because otherwise cost-benefit check is done earlier.
1391 int SampleThreshold = SampleColdCallSiteThreshold;
1392 if (CallsitePrioritizedInline) {
1393 if (Candidate.CallsiteCount > PSI->getHotCountThreshold())
1394 SampleThreshold = SampleHotCallSiteThreshold;
1395 else if (!ProfileSizeInline)
1396 return InlineCost::getNever("cold callsite");
1399 Function *Callee = Candidate.CallInstr->getCalledFunction();
1400 assert(Callee && "Expect a definition for inline candidate of direct call");
1402 InlineParams Params = getInlineParams();
1403 // We will ignore the threshold from inline cost, so always get full cost.
1404 Params.ComputeFullInlineCost = true;
1405 Params.AllowRecursiveCall = AllowRecursiveInline;
1406 // Checks if there is anything in the reachable portion of the callee at
1407 // this callsite that makes this inlining potentially illegal. Need to
1408 // set ComputeFullInlineCost, otherwise getInlineCost may return early
1409 // when cost exceeds threshold without checking all IRs in the callee.
1410 // The acutal cost does not matter because we only checks isNever() to
1411 // see if it is legal to inline the callsite.
1412 InlineCost Cost = getInlineCost(*Candidate.CallInstr, Callee, Params,
1413 GetTTI(*Callee), GetAC, GetTLI);
1415 // Honor always inline and never inline from call analyzer
1416 if (Cost.isNever() || Cost.isAlways())
1419 // With CSSPGO, the preinliner in llvm-profgen can estimate global inline
1420 // decisions based on hotness as well as accurate function byte sizes for
1421 // given context using function/inlinee sizes from previous build. It
1422 // stores the decision in profile, and also adjust/merge context profile
1423 // aiming at better context-sensitive post-inline profile quality, assuming
1424 // all inline decision estimates are going to be honored by compiler. Here
1425 // we replay that inline decision under `sample-profile-use-preinliner`.
1426 // Note that we don't need to handle negative decision from preinliner as
1427 // context profile for not inlined calls are merged by preinliner already.
1428 if (UsePreInlinerDecision && Candidate.CalleeSamples) {
1429 // Once two node are merged due to promotion, we're losing some context
1430 // so the original context-sensitive preinliner decision should be ignored
1431 // for SyntheticContext.
1432 SampleContext &Context = Candidate.CalleeSamples->getContext();
1433 if (!Context.hasState(SyntheticContext) &&
1434 Context.hasAttribute(ContextShouldBeInlined))
1435 return InlineCost::getAlways("preinliner");
1438 // For old FDO inliner, we inline the call site as long as cost is not
1439 // "Never". The cost-benefit check is done earlier.
1440 if (!CallsitePrioritizedInline) {
1441 return InlineCost::get(Cost.getCost(), INT_MAX);
1444 // Otherwise only use the cost from call analyzer, but overwite threshold with
1445 // Sample PGO threshold.
1446 return InlineCost::get(Cost.getCost(), SampleThreshold);
1449 bool SampleProfileLoader::inlineHotFunctionsWithPriority(
1450 Function &F, DenseSet<GlobalValue::GUID> &InlinedGUIDs) {
1451 // ProfAccForSymsInList is used in callsiteIsHot. The assertion makes sure
1452 // Profile symbol list is ignored when profile-sample-accurate is on.
1453 assert((!ProfAccForSymsInList ||
1454 (!ProfileSampleAccurate &&
1455 !F.hasFnAttribute("profile-sample-accurate"))) &&
1456 "ProfAccForSymsInList should be false when profile-sample-accurate "
1459 // Populating worklist with initial call sites from root inliner, along
1460 // with call site weights.
1461 CandidateQueue CQueue;
1462 InlineCandidate NewCandidate;
1463 for (auto &BB : F) {
1464 for (auto &I : BB) {
1465 auto *CB = dyn_cast<CallBase>(&I);
1468 if (getInlineCandidate(&NewCandidate, CB))
1469 CQueue.push(NewCandidate);
1473 // Cap the size growth from profile guided inlining. This is needed even
1474 // though cost of each inline candidate already accounts for callee size,
1475 // because with top-down inlining, we can grow inliner size significantly
1476 // with large number of smaller inlinees each pass the cost check.
1477 assert(ProfileInlineLimitMax >= ProfileInlineLimitMin &&
1478 "Max inline size limit should not be smaller than min inline size "
1480 unsigned SizeLimit = F.getInstructionCount() * ProfileInlineGrowthLimit;
1481 SizeLimit = std::min(SizeLimit, (unsigned)ProfileInlineLimitMax);
1482 SizeLimit = std::max(SizeLimit, (unsigned)ProfileInlineLimitMin);
1483 if (ExternalInlineAdvisor)
1484 SizeLimit = std::numeric_limits<unsigned>::max();
1486 MapVector<CallBase *, const FunctionSamples *> LocalNotInlinedCallSites;
1488 // Perform iterative BFS call site prioritized inlining
1489 bool Changed = false;
1490 while (!CQueue.empty() && F.getInstructionCount() < SizeLimit) {
1491 InlineCandidate Candidate = CQueue.top();
1493 CallBase *I = Candidate.CallInstr;
1494 Function *CalledFunction = I->getCalledFunction();
1496 if (CalledFunction == &F)
1498 if (I->isIndirectCall()) {
1500 auto CalleeSamples = findIndirectCallFunctionSamples(*I, Sum);
1501 uint64_t SumOrigin = Sum;
1502 Sum *= Candidate.CallsiteDistribution;
1503 unsigned ICPCount = 0;
1504 for (const auto *FS : CalleeSamples) {
1505 // TODO: Consider disable pre-lTO ICP for MonoLTO as well
1506 if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1507 findExternalInlineCandidate(I, FS, InlinedGUIDs, SymbolMap,
1508 PSI->getOrCompHotCountThreshold());
1511 uint64_t EntryCountDistributed =
1512 FS->getHeadSamplesEstimate() * Candidate.CallsiteDistribution;
1513 // In addition to regular inline cost check, we also need to make sure
1514 // ICP isn't introducing excessive speculative checks even if individual
1515 // target looks beneficial to promote and inline. That means we should
1516 // only do ICP when there's a small number dominant targets.
1517 if (ICPCount >= ProfileICPRelativeHotnessSkip &&
1518 EntryCountDistributed * 100 < SumOrigin * ProfileICPRelativeHotness)
1520 // TODO: Fix CallAnalyzer to handle all indirect calls.
1521 // For indirect call, we don't run CallAnalyzer to get InlineCost
1522 // before actual inlining. This is because we could see two different
1523 // types from the same definition, which makes CallAnalyzer choke as
1524 // it's expecting matching parameter type on both caller and callee
1525 // side. See example from PR18962 for the triggering cases (the bug was
1526 // fixed, but we generate different types).
1527 if (!PSI->isHotCount(EntryCountDistributed))
1529 SmallVector<CallBase *, 8> InlinedCallSites;
1530 // Attach function profile for promoted indirect callee, and update
1531 // call site count for the promoted inline candidate too.
1532 Candidate = {I, FS, EntryCountDistributed,
1533 Candidate.CallsiteDistribution};
1534 if (tryPromoteAndInlineCandidate(F, Candidate, SumOrigin, Sum,
1535 &InlinedCallSites)) {
1536 for (auto *CB : InlinedCallSites) {
1537 if (getInlineCandidate(&NewCandidate, CB))
1538 CQueue.emplace(NewCandidate);
1542 } else if (!ContextTracker) {
1543 LocalNotInlinedCallSites.insert({I, FS});
1546 } else if (CalledFunction && CalledFunction->getSubprogram() &&
1547 !CalledFunction->isDeclaration()) {
1548 SmallVector<CallBase *, 8> InlinedCallSites;
1549 if (tryInlineCandidate(Candidate, &InlinedCallSites)) {
1550 for (auto *CB : InlinedCallSites) {
1551 if (getInlineCandidate(&NewCandidate, CB))
1552 CQueue.emplace(NewCandidate);
1555 } else if (!ContextTracker) {
1556 LocalNotInlinedCallSites.insert({I, Candidate.CalleeSamples});
1558 } else if (LTOPhase == ThinOrFullLTOPhase::ThinLTOPreLink) {
1559 findExternalInlineCandidate(I, findCalleeFunctionSamples(*I),
1560 InlinedGUIDs, SymbolMap,
1561 PSI->getOrCompHotCountThreshold());
1565 if (!CQueue.empty()) {
1566 if (SizeLimit == (unsigned)ProfileInlineLimitMax)
1567 ++NumCSInlinedHitMaxLimit;
1568 else if (SizeLimit == (unsigned)ProfileInlineLimitMin)
1569 ++NumCSInlinedHitMinLimit;
1571 ++NumCSInlinedHitGrowthLimit;
1574 // For CS profile, profile for not inlined context will be merged when
1575 // base profile is being retrieved.
1576 if (!FunctionSamples::ProfileIsCS)
1577 promoteMergeNotInlinedContextSamples(LocalNotInlinedCallSites, F);
1581 void SampleProfileLoader::promoteMergeNotInlinedContextSamples(
1582 MapVector<CallBase *, const FunctionSamples *> NonInlinedCallSites,
1583 const Function &F) {
1584 // Accumulate not inlined callsite information into notInlinedSamples
1585 for (const auto &Pair : NonInlinedCallSites) {
1586 CallBase *I = Pair.first;
1587 Function *Callee = I->getCalledFunction();
1588 if (!Callee || Callee->isDeclaration())
1592 OptimizationRemarkAnalysis(getAnnotatedRemarkPassName(), "NotInline",
1593 I->getDebugLoc(), I->getParent())
1594 << "previous inlining not repeated: '" << ore::NV("Callee", Callee)
1595 << "' into '" << ore::NV("Caller", &F) << "'");
1598 const FunctionSamples *FS = Pair.second;
1599 if (FS->getTotalSamples() == 0 && FS->getHeadSamplesEstimate() == 0) {
1603 // Do not merge a context that is already duplicated into the base profile.
1604 if (FS->getContext().hasAttribute(sampleprof::ContextDuplicatedIntoBase))
1607 if (ProfileMergeInlinee) {
1608 // A function call can be replicated by optimizations like callsite
1609 // splitting or jump threading and the replicates end up sharing the
1610 // sample nested callee profile instead of slicing the original
1611 // inlinee's profile. We want to do merge exactly once by filtering out
1612 // callee profiles with a non-zero head sample count.
1613 if (FS->getHeadSamples() == 0) {
1614 // Use entry samples as head samples during the merge, as inlinees
1615 // don't have head samples.
1616 const_cast<FunctionSamples *>(FS)->addHeadSamples(
1617 FS->getHeadSamplesEstimate());
1619 // Note that we have to do the merge right after processing function.
1620 // This allows OutlineFS's profile to be used for annotation during
1621 // top-down processing of functions' annotation.
1622 FunctionSamples *OutlineFS = Reader->getOrCreateSamplesFor(*Callee);
1623 OutlineFS->merge(*FS, 1);
1624 // Set outlined profile to be synthetic to not bias the inliner.
1625 OutlineFS->SetContextSynthetic();
1629 notInlinedCallInfo.try_emplace(Callee, NotInlinedProfileInfo{0});
1630 pair.first->second.entryCount += FS->getHeadSamplesEstimate();
1635 /// Returns the sorted CallTargetMap \p M by count in descending order.
1636 static SmallVector<InstrProfValueData, 2>
1637 GetSortedValueDataFromCallTargets(const SampleRecord::CallTargetMap &M) {
1638 SmallVector<InstrProfValueData, 2> R;
1639 for (const auto &I : SampleRecord::SortCallTargets(M)) {
1641 InstrProfValueData{FunctionSamples::getGUID(I.first), I.second});
1646 // Generate MD_prof metadata for every branch instruction using the
1647 // edge weights computed during propagation.
1648 void SampleProfileLoader::generateMDProfMetadata(Function &F) {
1649 // Generate MD_prof metadata for every branch instruction using the
1650 // edge weights computed during propagation.
1651 LLVM_DEBUG(dbgs() << "\nPropagation complete. Setting branch weights\n");
1652 LLVMContext &Ctx = F.getContext();
1654 for (auto &BI : F) {
1655 BasicBlock *BB = &BI;
1657 if (BlockWeights[BB]) {
1658 for (auto &I : *BB) {
1659 if (!isa<CallInst>(I) && !isa<InvokeInst>(I))
1661 if (!cast<CallBase>(I).getCalledFunction()) {
1662 const DebugLoc &DLoc = I.getDebugLoc();
1665 const DILocation *DIL = DLoc;
1666 const FunctionSamples *FS = findFunctionSamples(I);
1669 auto CallSite = FunctionSamples::getCallSiteIdentifier(DIL);
1670 auto T = FS->findCallTargetMapAt(CallSite);
1671 if (!T || T.get().empty())
1673 if (FunctionSamples::ProfileIsProbeBased) {
1674 // Prorate the callsite counts based on the pre-ICP distribution
1675 // factor to reflect what is already done to the callsite before
1676 // ICP, such as calliste cloning.
1677 if (std::optional<PseudoProbe> Probe = extractProbe(I)) {
1678 if (Probe->Factor < 1)
1679 T = SampleRecord::adjustCallTargets(T.get(), Probe->Factor);
1682 SmallVector<InstrProfValueData, 2> SortedCallTargets =
1683 GetSortedValueDataFromCallTargets(T.get());
1685 for (const auto &C : T.get())
1687 // With CSSPGO all indirect call targets are counted torwards the
1688 // original indirect call site in the profile, including both
1689 // inlined and non-inlined targets.
1690 if (!FunctionSamples::ProfileIsCS) {
1691 if (const FunctionSamplesMap *M =
1692 FS->findFunctionSamplesMapAt(CallSite)) {
1693 for (const auto &NameFS : *M)
1694 Sum += NameFS.second.getHeadSamplesEstimate();
1698 updateIDTMetaData(I, SortedCallTargets, Sum);
1699 else if (OverwriteExistingWeights)
1700 I.setMetadata(LLVMContext::MD_prof, nullptr);
1701 } else if (!isa<IntrinsicInst>(&I)) {
1702 I.setMetadata(LLVMContext::MD_prof,
1703 MDB.createBranchWeights(
1704 {static_cast<uint32_t>(BlockWeights[BB])}));
1707 } else if (OverwriteExistingWeights || ProfileSampleBlockAccurate) {
1708 // Set profile metadata (possibly annotated by LTO prelink) to zero or
1709 // clear it for cold code.
1710 for (auto &I : *BB) {
1711 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
1712 if (cast<CallBase>(I).isIndirectCall())
1713 I.setMetadata(LLVMContext::MD_prof, nullptr);
1715 I.setMetadata(LLVMContext::MD_prof, MDB.createBranchWeights(0));
1720 Instruction *TI = BB->getTerminator();
1721 if (TI->getNumSuccessors() == 1)
1723 if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI) &&
1724 !isa<IndirectBrInst>(TI))
1727 DebugLoc BranchLoc = TI->getDebugLoc();
1728 LLVM_DEBUG(dbgs() << "\nGetting weights for branch at line "
1729 << ((BranchLoc) ? Twine(BranchLoc.getLine())
1730 : Twine("<UNKNOWN LOCATION>"))
1732 SmallVector<uint32_t, 4> Weights;
1733 uint32_t MaxWeight = 0;
1734 Instruction *MaxDestInst;
1735 // Since profi treats multiple edges (multiway branches) as a single edge,
1736 // we need to distribute the computed weight among the branches. We do
1737 // this by evenly splitting the edge weight among destinations.
1738 DenseMap<const BasicBlock *, uint64_t> EdgeMultiplicity;
1739 std::vector<uint64_t> EdgeIndex;
1740 if (SampleProfileUseProfi) {
1741 EdgeIndex.resize(TI->getNumSuccessors());
1742 for (unsigned I = 0; I < TI->getNumSuccessors(); ++I) {
1743 const BasicBlock *Succ = TI->getSuccessor(I);
1744 EdgeIndex[I] = EdgeMultiplicity[Succ];
1745 EdgeMultiplicity[Succ]++;
1748 for (unsigned I = 0; I < TI->getNumSuccessors(); ++I) {
1749 BasicBlock *Succ = TI->getSuccessor(I);
1750 Edge E = std::make_pair(BB, Succ);
1751 uint64_t Weight = EdgeWeights[E];
1752 LLVM_DEBUG(dbgs() << "\t"; printEdgeWeight(dbgs(), E));
1753 // Use uint32_t saturated arithmetic to adjust the incoming weights,
1754 // if needed. Sample counts in profiles are 64-bit unsigned values,
1755 // but internally branch weights are expressed as 32-bit values.
1756 if (Weight > std::numeric_limits<uint32_t>::max()) {
1757 LLVM_DEBUG(dbgs() << " (saturated due to uint32_t overflow)");
1758 Weight = std::numeric_limits<uint32_t>::max();
1760 if (!SampleProfileUseProfi) {
1761 // Weight is added by one to avoid propagation errors introduced by
1763 Weights.push_back(static_cast<uint32_t>(Weight + 1));
1765 // Profi creates proper weights that do not require "+1" adjustments but
1766 // we evenly split the weight among branches with the same destination.
1767 uint64_t W = Weight / EdgeMultiplicity[Succ];
1768 // Rounding up, if needed, so that first branches are hotter.
1769 if (EdgeIndex[I] < Weight % EdgeMultiplicity[Succ])
1771 Weights.push_back(static_cast<uint32_t>(W));
1774 if (Weight > MaxWeight) {
1776 MaxDestInst = Succ->getFirstNonPHIOrDbgOrLifetime();
1781 misexpect::checkExpectAnnotations(*TI, Weights, /*IsFrontend=*/false);
1783 uint64_t TempWeight;
1784 // Only set weights if there is at least one non-zero weight.
1785 // In any other case, let the analyzer set weights.
1786 // Do not set weights if the weights are present unless under
1787 // OverwriteExistingWeights. In ThinLTO, the profile annotation is done
1788 // twice. If the first annotation already set the weights, the second pass
1789 // does not need to set it. With OverwriteExistingWeights, Blocks with zero
1790 // weight should have their existing metadata (possibly annotated by LTO
1791 // prelink) cleared.
1792 if (MaxWeight > 0 &&
1793 (!TI->extractProfTotalWeight(TempWeight) || OverwriteExistingWeights)) {
1794 LLVM_DEBUG(dbgs() << "SUCCESS. Found non-zero weights.\n");
1795 TI->setMetadata(LLVMContext::MD_prof, MDB.createBranchWeights(Weights));
1797 return OptimizationRemark(DEBUG_TYPE, "PopularDest", MaxDestInst)
1798 << "most popular destination for conditional branches at "
1799 << ore::NV("CondBranchesLoc", BranchLoc);
1802 if (OverwriteExistingWeights) {
1803 TI->setMetadata(LLVMContext::MD_prof, nullptr);
1804 LLVM_DEBUG(dbgs() << "CLEARED. All branch weights are zero.\n");
1806 LLVM_DEBUG(dbgs() << "SKIPPED. All branch weights are zero.\n");
1812 /// Once all the branch weights are computed, we emit the MD_prof
1813 /// metadata on BB using the computed values for each of its branches.
1815 /// \param F The function to query.
1817 /// \returns true if \p F was modified. Returns false, otherwise.
1818 bool SampleProfileLoader::emitAnnotations(Function &F) {
1819 bool Changed = false;
1821 if (FunctionSamples::ProfileIsProbeBased) {
1822 if (!ProbeManager->profileIsValid(F, *Samples)) {
1824 dbgs() << "Profile is invalid due to CFG mismatch for Function "
1825 << F.getName() << "\n");
1826 ++NumMismatchedProfile;
1827 if (!SalvageStaleProfile)
1830 ++NumMatchedProfile;
1832 if (getFunctionLoc(F) == 0)
1835 LLVM_DEBUG(dbgs() << "Line number for the first instruction in "
1836 << F.getName() << ": " << getFunctionLoc(F) << "\n");
1839 DenseSet<GlobalValue::GUID> InlinedGUIDs;
1840 if (CallsitePrioritizedInline)
1841 Changed |= inlineHotFunctionsWithPriority(F, InlinedGUIDs);
1843 Changed |= inlineHotFunctions(F, InlinedGUIDs);
1845 Changed |= computeAndPropagateWeights(F, InlinedGUIDs);
1848 generateMDProfMetadata(F);
1850 emitCoverageRemarks(F);
1854 std::unique_ptr<ProfiledCallGraph>
1855 SampleProfileLoader::buildProfiledCallGraph(Module &M) {
1856 std::unique_ptr<ProfiledCallGraph> ProfiledCG;
1857 if (FunctionSamples::ProfileIsCS)
1858 ProfiledCG = std::make_unique<ProfiledCallGraph>(*ContextTracker);
1860 ProfiledCG = std::make_unique<ProfiledCallGraph>(Reader->getProfiles());
1862 // Add all functions into the profiled call graph even if they are not in
1863 // the profile. This makes sure functions missing from the profile still
1864 // gets a chance to be processed.
1865 for (Function &F : M) {
1866 if (F.isDeclaration() || !F.hasFnAttribute("use-sample-profile"))
1868 ProfiledCG->addProfiledFunction(FunctionSamples::getCanonicalFnName(F));
1874 std::vector<Function *>
1875 SampleProfileLoader::buildFunctionOrder(Module &M, LazyCallGraph &CG) {
1876 std::vector<Function *> FunctionOrderList;
1877 FunctionOrderList.reserve(M.size());
1879 if (!ProfileTopDownLoad && UseProfiledCallGraph)
1880 errs() << "WARNING: -use-profiled-call-graph ignored, should be used "
1881 "together with -sample-profile-top-down-load.\n";
1883 if (!ProfileTopDownLoad) {
1884 if (ProfileMergeInlinee) {
1885 // Disable ProfileMergeInlinee if profile is not loaded in top down order,
1886 // because the profile for a function may be used for the profile
1887 // annotation of its outline copy before the profile merging of its
1888 // non-inlined inline instances, and that is not the way how
1889 // ProfileMergeInlinee is supposed to work.
1890 ProfileMergeInlinee = false;
1893 for (Function &F : M)
1894 if (!F.isDeclaration() && F.hasFnAttribute("use-sample-profile"))
1895 FunctionOrderList.push_back(&F);
1896 return FunctionOrderList;
1899 if (UseProfiledCallGraph || (FunctionSamples::ProfileIsCS &&
1900 !UseProfiledCallGraph.getNumOccurrences())) {
1901 // Use profiled call edges to augment the top-down order. There are cases
1902 // that the top-down order computed based on the static call graph doesn't
1903 // reflect real execution order. For example
1905 // 1. Incomplete static call graph due to unknown indirect call targets.
1906 // Adjusting the order by considering indirect call edges from the
1907 // profile can enable the inlining of indirect call targets by allowing
1908 // the caller processed before them.
1909 // 2. Mutual call edges in an SCC. The static processing order computed for
1910 // an SCC may not reflect the call contexts in the context-sensitive
1911 // profile, thus may cause potential inlining to be overlooked. The
1912 // function order in one SCC is being adjusted to a top-down order based
1913 // on the profile to favor more inlining. This is only a problem with CS
1915 // 3. Transitive indirect call edges due to inlining. When a callee function
1916 // (say B) is inlined into into a caller function (say A) in LTO prelink,
1917 // every call edge originated from the callee B will be transferred to
1918 // the caller A. If any transferred edge (say A->C) is indirect, the
1919 // original profiled indirect edge B->C, even if considered, would not
1920 // enforce a top-down order from the caller A to the potential indirect
1921 // call target C in LTO postlink since the inlined callee B is gone from
1922 // the static call graph.
1923 // 4. #3 can happen even for direct call targets, due to functions defined
1924 // in header files. A header function (say A), when included into source
1925 // files, is defined multiple times but only one definition survives due
1926 // to ODR. Therefore, the LTO prelink inlining done on those dropped
1927 // definitions can be useless based on a local file scope. More
1928 // importantly, the inlinee (say B), once fully inlined to a
1929 // to-be-dropped A, will have no profile to consume when its outlined
1930 // version is compiled. This can lead to a profile-less prelink
1931 // compilation for the outlined version of B which may be called from
1932 // external modules. while this isn't easy to fix, we rely on the
1933 // postlink AutoFDO pipeline to optimize B. Since the survived copy of
1934 // the A can be inlined in its local scope in prelink, it may not exist
1935 // in the merged IR in postlink, and we'll need the profiled call edges
1936 // to enforce a top-down order for the rest of the functions.
1938 // Considering those cases, a profiled call graph completely independent of
1939 // the static call graph is constructed based on profile data, where
1940 // function objects are not even needed to handle case #3 and case 4.
1942 // Note that static callgraph edges are completely ignored since they
1943 // can be conflicting with profiled edges for cyclic SCCs and may result in
1944 // an SCC order incompatible with profile-defined one. Using strictly
1945 // profile order ensures a maximum inlining experience. On the other hand,
1946 // static call edges are not so important when they don't correspond to a
1947 // context in the profile.
1949 std::unique_ptr<ProfiledCallGraph> ProfiledCG = buildProfiledCallGraph(M);
1950 scc_iterator<ProfiledCallGraph *> CGI = scc_begin(ProfiledCG.get());
1951 while (!CGI.isAtEnd()) {
1953 if (SortProfiledSCC) {
1954 // Sort nodes in one SCC based on callsite hotness.
1955 scc_member_iterator<ProfiledCallGraph *> SI(*CGI);
1958 for (auto *Node : Range) {
1959 Function *F = SymbolMap.lookup(Node->Name);
1960 if (F && !F->isDeclaration() && F->hasFnAttribute("use-sample-profile"))
1961 FunctionOrderList.push_back(F);
1967 for (LazyCallGraph::RefSCC &RC : CG.postorder_ref_sccs()) {
1968 for (LazyCallGraph::SCC &C : RC) {
1969 for (LazyCallGraph::Node &N : C) {
1970 Function &F = N.getFunction();
1971 if (!F.isDeclaration() && F.hasFnAttribute("use-sample-profile"))
1972 FunctionOrderList.push_back(&F);
1978 std::reverse(FunctionOrderList.begin(), FunctionOrderList.end());
1981 dbgs() << "Function processing order:\n";
1982 for (auto F : FunctionOrderList) {
1983 dbgs() << F->getName() << "\n";
1987 return FunctionOrderList;
1990 bool SampleProfileLoader::doInitialization(Module &M,
1991 FunctionAnalysisManager *FAM) {
1992 auto &Ctx = M.getContext();
1994 auto ReaderOrErr = SampleProfileReader::create(
1995 Filename, Ctx, *FS, FSDiscriminatorPass::Base, RemappingFilename);
1996 if (std::error_code EC = ReaderOrErr.getError()) {
1997 std::string Msg = "Could not open profile: " + EC.message();
1998 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg));
2001 Reader = std::move(ReaderOrErr.get());
2002 Reader->setSkipFlatProf(LTOPhase == ThinOrFullLTOPhase::ThinLTOPostLink);
2003 // set module before reading the profile so reader may be able to only
2004 // read the function profiles which are used by the current module.
2005 Reader->setModule(&M);
2006 if (std::error_code EC = Reader->read()) {
2007 std::string Msg = "profile reading failed: " + EC.message();
2008 Ctx.diagnose(DiagnosticInfoSampleProfile(Filename, Msg));
2012 PSL = Reader->getProfileSymbolList();
2014 // While profile-sample-accurate is on, ignore symbol list.
2015 ProfAccForSymsInList =
2016 ProfileAccurateForSymsInList && PSL && !ProfileSampleAccurate;
2017 if (ProfAccForSymsInList) {
2018 NamesInProfile.clear();
2019 if (auto NameTable = Reader->getNameTable())
2020 NamesInProfile.insert(NameTable->begin(), NameTable->end());
2021 CoverageTracker.setProfAccForSymsInList(true);
2024 if (FAM && !ProfileInlineReplayFile.empty()) {
2025 ExternalInlineAdvisor = getReplayInlineAdvisor(
2026 M, *FAM, Ctx, /*OriginalAdvisor=*/nullptr,
2027 ReplayInlinerSettings{ProfileInlineReplayFile,
2028 ProfileInlineReplayScope,
2029 ProfileInlineReplayFallback,
2030 {ProfileInlineReplayFormat}},
2031 /*EmitRemarks=*/false, InlineContext{LTOPhase, InlinePass::ReplaySampleProfileInliner});
2034 // Apply tweaks if context-sensitive or probe-based profile is available.
2035 if (Reader->profileIsCS() || Reader->profileIsPreInlined() ||
2036 Reader->profileIsProbeBased()) {
2037 if (!UseIterativeBFIInference.getNumOccurrences())
2038 UseIterativeBFIInference = true;
2039 if (!SampleProfileUseProfi.getNumOccurrences())
2040 SampleProfileUseProfi = true;
2041 if (!EnableExtTspBlockPlacement.getNumOccurrences())
2042 EnableExtTspBlockPlacement = true;
2043 // Enable priority-base inliner and size inline by default for CSSPGO.
2044 if (!ProfileSizeInline.getNumOccurrences())
2045 ProfileSizeInline = true;
2046 if (!CallsitePrioritizedInline.getNumOccurrences())
2047 CallsitePrioritizedInline = true;
2048 // For CSSPGO, we also allow recursive inline to best use context profile.
2049 if (!AllowRecursiveInline.getNumOccurrences())
2050 AllowRecursiveInline = true;
2052 if (Reader->profileIsPreInlined()) {
2053 if (!UsePreInlinerDecision.getNumOccurrences())
2054 UsePreInlinerDecision = true;
2057 // Enable stale profile matching by default for probe-based profile.
2058 // Currently the matching relies on if the checksum mismatch is detected,
2059 // which is currently only available for pseudo-probe mode. Removing the
2060 // checksum check could cause regressions for some cases, so further tuning
2061 // might be needed if we want to enable it for all cases.
2062 if (Reader->profileIsProbeBased() &&
2063 !SalvageStaleProfile.getNumOccurrences()) {
2064 SalvageStaleProfile = true;
2067 if (!Reader->profileIsCS()) {
2068 // Non-CS profile should be fine without a function size budget for the
2069 // inliner since the contexts in the profile are either all from inlining
2070 // in the prevoius build or pre-computed by the preinliner with a size
2071 // cap, thus they are bounded.
2072 if (!ProfileInlineLimitMin.getNumOccurrences())
2073 ProfileInlineLimitMin = std::numeric_limits<unsigned>::max();
2074 if (!ProfileInlineLimitMax.getNumOccurrences())
2075 ProfileInlineLimitMax = std::numeric_limits<unsigned>::max();
2079 if (Reader->profileIsCS()) {
2080 // Tracker for profiles under different context
2081 ContextTracker = std::make_unique<SampleContextTracker>(
2082 Reader->getProfiles(), &GUIDToFuncNameMap);
2085 // Load pseudo probe descriptors for probe-based function samples.
2086 if (Reader->profileIsProbeBased()) {
2087 ProbeManager = std::make_unique<PseudoProbeManager>(M);
2088 if (!ProbeManager->moduleIsProbed(M)) {
2090 "Pseudo-probe-based profile requires SampleProfileProbePass";
2091 Ctx.diagnose(DiagnosticInfoSampleProfile(M.getModuleIdentifier(), Msg,
2097 if (ReportProfileStaleness || PersistProfileStaleness ||
2098 SalvageStaleProfile) {
2100 std::make_unique<SampleProfileMatcher>(M, *Reader, ProbeManager.get());
2106 void SampleProfileMatcher::countProfileMismatches(
2107 const FunctionSamples &FS,
2108 const std::unordered_set<LineLocation, LineLocationHash>
2109 &MatchedCallsiteLocs,
2110 uint64_t &FuncMismatchedCallsites, uint64_t &FuncProfiledCallsites) {
2112 auto isInvalidLineOffset = [](uint32_t LineOffset) {
2113 return LineOffset & 0x8000;
2116 // Check if there are any callsites in the profile that does not match to any
2117 // IR callsites, those callsite samples will be discarded.
2118 for (auto &I : FS.getBodySamples()) {
2119 const LineLocation &Loc = I.first;
2120 if (isInvalidLineOffset(Loc.LineOffset))
2123 uint64_t Count = I.second.getSamples();
2124 if (!I.second.getCallTargets().empty()) {
2125 TotalCallsiteSamples += Count;
2126 FuncProfiledCallsites++;
2127 if (!MatchedCallsiteLocs.count(Loc)) {
2128 MismatchedCallsiteSamples += Count;
2129 FuncMismatchedCallsites++;
2134 for (auto &I : FS.getCallsiteSamples()) {
2135 const LineLocation &Loc = I.first;
2136 if (isInvalidLineOffset(Loc.LineOffset))
2140 for (auto &FM : I.second) {
2141 Count += FM.second.getHeadSamplesEstimate();
2143 TotalCallsiteSamples += Count;
2144 FuncProfiledCallsites++;
2145 if (!MatchedCallsiteLocs.count(Loc)) {
2146 MismatchedCallsiteSamples += Count;
2147 FuncMismatchedCallsites++;
2152 // Populate the anchors(direct callee name) from profile.
2153 void SampleProfileMatcher::populateProfileCallsites(
2154 const FunctionSamples &FS,
2155 StringMap<std::set<LineLocation>> &CalleeToCallsitesMap) {
2156 for (const auto &I : FS.getBodySamples()) {
2157 const auto &Loc = I.first;
2158 const auto &CTM = I.second.getCallTargets();
2159 // Filter out possible indirect calls, use direct callee name as anchor.
2160 if (CTM.size() == 1) {
2161 StringRef CalleeName = CTM.begin()->first();
2162 const auto &Candidates = CalleeToCallsitesMap.try_emplace(
2163 CalleeName, std::set<LineLocation>());
2164 Candidates.first->second.insert(Loc);
2168 for (const auto &I : FS.getCallsiteSamples()) {
2169 const LineLocation &Loc = I.first;
2170 const auto &CalleeMap = I.second;
2171 // Filter out possible indirect calls, use direct callee name as anchor.
2172 if (CalleeMap.size() == 1) {
2173 StringRef CalleeName = CalleeMap.begin()->first;
2174 const auto &Candidates = CalleeToCallsitesMap.try_emplace(
2175 CalleeName, std::set<LineLocation>());
2176 Candidates.first->second.insert(Loc);
2181 // Call target name anchor based profile fuzzy matching.
2183 // For IR locations, the anchor is the callee name of direct callsite; For
2184 // profile locations, it's the call target name for BodySamples or inlinee's
2185 // profile name for CallsiteSamples.
2186 // Matching heuristic:
2187 // First match all the anchors in lexical order, then split the non-anchor
2188 // locations between the two anchors evenly, first half are matched based on the
2189 // start anchor, second half are matched based on the end anchor.
2190 // For example, given:
2191 // IR locations: [1, 2(foo), 3, 5, 6(bar), 7]
2192 // Profile locations: [1, 2, 3(foo), 4, 7, 8(bar), 9]
2193 // The matching gives:
2194 // [1, 2(foo), 3, 5, 6(bar), 7]
2196 // [1, 2, 3(foo), 4, 7, 8(bar), 9]
2197 // The output mapping: [2->3, 3->4, 5->7, 6->8, 7->9].
2198 void SampleProfileMatcher::runStaleProfileMatching(
2199 const std::map<LineLocation, StringRef> &IRLocations,
2200 StringMap<std::set<LineLocation>> &CalleeToCallsitesMap,
2201 LocToLocMap &IRToProfileLocationMap) {
2202 assert(IRToProfileLocationMap.empty() &&
2203 "Run stale profile matching only once per function");
2205 auto InsertMatching = [&](const LineLocation &From, const LineLocation &To) {
2206 // Skip the unchanged location mapping to save memory.
2208 IRToProfileLocationMap.insert({From, To});
2211 // Use function's beginning location as the initial anchor.
2212 int32_t LocationDelta = 0;
2213 SmallVector<LineLocation> LastMatchedNonAnchors;
2215 for (const auto &IR : IRLocations) {
2216 const auto &Loc = IR.first;
2217 StringRef CalleeName = IR.second;
2218 bool IsMatchedAnchor = false;
2219 // Match the anchor location in lexical order.
2220 if (!CalleeName.empty()) {
2221 auto ProfileAnchors = CalleeToCallsitesMap.find(CalleeName);
2222 if (ProfileAnchors != CalleeToCallsitesMap.end() &&
2223 !ProfileAnchors->second.empty()) {
2224 auto CI = ProfileAnchors->second.begin();
2225 const auto Candidate = *CI;
2226 ProfileAnchors->second.erase(CI);
2227 InsertMatching(Loc, Candidate);
2228 LLVM_DEBUG(dbgs() << "Callsite with callee:" << CalleeName
2229 << " is matched from " << Loc << " to " << Candidate
2231 LocationDelta = Candidate.LineOffset - Loc.LineOffset;
2233 // Match backwards for non-anchor locations.
2234 // The locations in LastMatchedNonAnchors have been matched forwards
2235 // based on the previous anchor, spilt it evenly and overwrite the
2236 // second half based on the current anchor.
2237 for (size_t I = (LastMatchedNonAnchors.size() + 1) / 2;
2238 I < LastMatchedNonAnchors.size(); I++) {
2239 const auto &L = LastMatchedNonAnchors[I];
2240 uint32_t CandidateLineOffset = L.LineOffset + LocationDelta;
2241 LineLocation Candidate(CandidateLineOffset, L.Discriminator);
2242 InsertMatching(L, Candidate);
2243 LLVM_DEBUG(dbgs() << "Location is rematched backwards from " << L
2244 << " to " << Candidate << "\n");
2247 IsMatchedAnchor = true;
2248 LastMatchedNonAnchors.clear();
2252 // Match forwards for non-anchor locations.
2253 if (!IsMatchedAnchor) {
2254 uint32_t CandidateLineOffset = Loc.LineOffset + LocationDelta;
2255 LineLocation Candidate(CandidateLineOffset, Loc.Discriminator);
2256 InsertMatching(Loc, Candidate);
2257 LLVM_DEBUG(dbgs() << "Location is matched from " << Loc << " to "
2258 << Candidate << "\n");
2259 LastMatchedNonAnchors.emplace_back(Loc);
2264 void SampleProfileMatcher::runOnFunction(const Function &F,
2265 const FunctionSamples &FS) {
2266 bool IsFuncHashMismatch = false;
2267 if (FunctionSamples::ProfileIsProbeBased) {
2268 uint64_t Count = FS.getTotalSamples();
2269 TotalFuncHashSamples += Count;
2270 TotalProfiledFunc++;
2271 if (!ProbeManager->profileIsValid(F, FS)) {
2272 MismatchedFuncHashSamples += Count;
2273 NumMismatchedFuncHash++;
2274 IsFuncHashMismatch = true;
2278 std::unordered_set<LineLocation, LineLocationHash> MatchedCallsiteLocs;
2279 // The value of the map is the name of direct callsite and use empty StringRef
2280 // for non-direct-call site.
2281 std::map<LineLocation, StringRef> IRLocations;
2283 // Extract profile matching anchors and profile mismatch metrics in the IR.
2284 for (auto &BB : F) {
2285 for (auto &I : BB) {
2286 // TODO: Support line-number based location(AutoFDO).
2287 if (FunctionSamples::ProfileIsProbeBased && isa<PseudoProbeInst>(&I)) {
2288 if (std::optional<PseudoProbe> Probe = extractProbe(I))
2289 IRLocations.emplace(LineLocation(Probe->Id, 0), StringRef());
2292 if (!isa<CallBase>(&I) || isa<IntrinsicInst>(&I))
2295 const auto *CB = dyn_cast<CallBase>(&I);
2296 if (auto &DLoc = I.getDebugLoc()) {
2297 LineLocation IRCallsite = FunctionSamples::getCallSiteIdentifier(DLoc);
2299 StringRef CalleeName;
2300 if (Function *Callee = CB->getCalledFunction())
2301 CalleeName = FunctionSamples::getCanonicalFnName(Callee->getName());
2303 // Force to overwrite the callee name in case any non-call location was
2305 auto R = IRLocations.emplace(IRCallsite, CalleeName);
2306 R.first->second = CalleeName;
2307 assert((!FunctionSamples::ProfileIsProbeBased || R.second ||
2308 R.first->second == CalleeName) &&
2309 "Overwrite non-call or different callee name location for "
2310 "pseudo probe callsite");
2312 // Go through all the callsites on the IR and flag the callsite if the
2313 // target name is the same as the one in the profile.
2314 const auto CTM = FS.findCallTargetMapAt(IRCallsite);
2315 const auto CallsiteFS = FS.findFunctionSamplesMapAt(IRCallsite);
2317 // Indirect call case.
2318 if (CalleeName.empty()) {
2319 // Since indirect call does not have the CalleeName, check
2320 // conservatively if callsite in the profile is a callsite location.
2321 // This is to avoid nums of false positive since otherwise all the
2322 // indirect call samples will be reported as mismatching.
2323 if ((CTM && !CTM->empty()) || (CallsiteFS && !CallsiteFS->empty()))
2324 MatchedCallsiteLocs.insert(IRCallsite);
2326 // Check if the call target name is matched for direct call case.
2327 if ((CTM && CTM->count(CalleeName)) ||
2328 (CallsiteFS && CallsiteFS->count(CalleeName)))
2329 MatchedCallsiteLocs.insert(IRCallsite);
2335 // Detect profile mismatch for profile staleness metrics report.
2336 if (ReportProfileStaleness || PersistProfileStaleness) {
2337 uint64_t FuncMismatchedCallsites = 0;
2338 uint64_t FuncProfiledCallsites = 0;
2339 countProfileMismatches(FS, MatchedCallsiteLocs, FuncMismatchedCallsites,
2340 FuncProfiledCallsites);
2341 TotalProfiledCallsites += FuncProfiledCallsites;
2342 NumMismatchedCallsites += FuncMismatchedCallsites;
2344 if (FunctionSamples::ProfileIsProbeBased && !IsFuncHashMismatch &&
2345 FuncMismatchedCallsites)
2346 dbgs() << "Function checksum is matched but there are "
2347 << FuncMismatchedCallsites << "/" << FuncProfiledCallsites
2348 << " mismatched callsites.\n";
2352 if (IsFuncHashMismatch && SalvageStaleProfile) {
2353 LLVM_DEBUG(dbgs() << "Run stale profile matching for " << F.getName()
2356 StringMap<std::set<LineLocation>> CalleeToCallsitesMap;
2357 populateProfileCallsites(FS, CalleeToCallsitesMap);
2359 // The matching result will be saved to IRToProfileLocationMap, create a new
2360 // map for each function.
2361 auto &IRToProfileLocationMap = getIRToProfileLocationMap(F);
2363 runStaleProfileMatching(IRLocations, CalleeToCallsitesMap,
2364 IRToProfileLocationMap);
2368 void SampleProfileMatcher::runOnModule() {
2370 if (F.isDeclaration() || !F.hasFnAttribute("use-sample-profile"))
2372 FunctionSamples *FS = nullptr;
2373 if (FlattenProfileForMatching)
2374 FS = getFlattenedSamplesFor(F);
2376 FS = Reader.getSamplesFor(F);
2379 runOnFunction(F, *FS);
2381 if (SalvageStaleProfile)
2382 distributeIRToProfileLocationMap();
2384 if (ReportProfileStaleness) {
2385 if (FunctionSamples::ProfileIsProbeBased) {
2386 errs() << "(" << NumMismatchedFuncHash << "/" << TotalProfiledFunc << ")"
2387 << " of functions' profile are invalid and "
2388 << " (" << MismatchedFuncHashSamples << "/" << TotalFuncHashSamples
2390 << " of samples are discarded due to function hash mismatch.\n";
2392 errs() << "(" << NumMismatchedCallsites << "/" << TotalProfiledCallsites
2394 << " of callsites' profile are invalid and "
2395 << "(" << MismatchedCallsiteSamples << "/" << TotalCallsiteSamples
2397 << " of samples are discarded due to callsite location mismatch.\n";
2400 if (PersistProfileStaleness) {
2401 LLVMContext &Ctx = M.getContext();
2404 SmallVector<std::pair<StringRef, uint64_t>> ProfStatsVec;
2405 if (FunctionSamples::ProfileIsProbeBased) {
2406 ProfStatsVec.emplace_back("NumMismatchedFuncHash", NumMismatchedFuncHash);
2407 ProfStatsVec.emplace_back("TotalProfiledFunc", TotalProfiledFunc);
2408 ProfStatsVec.emplace_back("MismatchedFuncHashSamples",
2409 MismatchedFuncHashSamples);
2410 ProfStatsVec.emplace_back("TotalFuncHashSamples", TotalFuncHashSamples);
2413 ProfStatsVec.emplace_back("NumMismatchedCallsites", NumMismatchedCallsites);
2414 ProfStatsVec.emplace_back("TotalProfiledCallsites", TotalProfiledCallsites);
2415 ProfStatsVec.emplace_back("MismatchedCallsiteSamples",
2416 MismatchedCallsiteSamples);
2417 ProfStatsVec.emplace_back("TotalCallsiteSamples", TotalCallsiteSamples);
2419 auto *MD = MDB.createLLVMStats(ProfStatsVec);
2420 auto *NMD = M.getOrInsertNamedMetadata("llvm.stats");
2421 NMD->addOperand(MD);
2425 void SampleProfileMatcher::distributeIRToProfileLocationMap(
2426 FunctionSamples &FS) {
2427 const auto ProfileMappings = FuncMappings.find(FS.getName());
2428 if (ProfileMappings != FuncMappings.end()) {
2429 FS.setIRToProfileLocationMap(&(ProfileMappings->second));
2432 for (auto &Inlinees : FS.getCallsiteSamples()) {
2433 for (auto FS : Inlinees.second) {
2434 distributeIRToProfileLocationMap(FS.second);
2439 // Use a central place to distribute the matching results. Outlined and inlined
2440 // profile with the function name will be set to the same pointer.
2441 void SampleProfileMatcher::distributeIRToProfileLocationMap() {
2442 for (auto &I : Reader.getProfiles()) {
2443 distributeIRToProfileLocationMap(I.second);
2447 bool SampleProfileLoader::runOnModule(Module &M, ModuleAnalysisManager *AM,
2448 ProfileSummaryInfo *_PSI,
2449 LazyCallGraph &CG) {
2450 GUIDToFuncNameMapper Mapper(M, *Reader, GUIDToFuncNameMap);
2453 if (M.getProfileSummary(/* IsCS */ false) == nullptr) {
2454 M.setProfileSummary(Reader->getSummary().getMD(M.getContext()),
2455 ProfileSummary::PSK_Sample);
2458 // Compute the total number of samples collected in this profile.
2459 for (const auto &I : Reader->getProfiles())
2460 TotalCollectedSamples += I.second.getTotalSamples();
2462 auto Remapper = Reader->getRemapper();
2463 // Populate the symbol map.
2464 for (const auto &N_F : M.getValueSymbolTable()) {
2465 StringRef OrigName = N_F.getKey();
2466 Function *F = dyn_cast<Function>(N_F.getValue());
2467 if (F == nullptr || OrigName.empty())
2469 SymbolMap[OrigName] = F;
2470 StringRef NewName = FunctionSamples::getCanonicalFnName(*F);
2471 if (OrigName != NewName && !NewName.empty()) {
2472 auto r = SymbolMap.insert(std::make_pair(NewName, F));
2473 // Failiing to insert means there is already an entry in SymbolMap,
2474 // thus there are multiple functions that are mapped to the same
2475 // stripped name. In this case of name conflicting, set the value
2476 // to nullptr to avoid confusion.
2478 r.first->second = nullptr;
2481 // Insert the remapped names into SymbolMap.
2483 if (auto MapName = Remapper->lookUpNameInProfile(OrigName)) {
2484 if (*MapName != OrigName && !MapName->empty())
2485 SymbolMap.insert(std::make_pair(*MapName, F));
2489 assert(SymbolMap.count(StringRef()) == 0 &&
2490 "No empty StringRef should be added in SymbolMap");
2492 if (ReportProfileStaleness || PersistProfileStaleness ||
2493 SalvageStaleProfile) {
2494 MatchingManager->runOnModule();
2497 bool retval = false;
2498 for (auto *F : buildFunctionOrder(M, CG)) {
2499 assert(!F->isDeclaration());
2500 clearFunctionData();
2501 retval |= runOnFunction(*F, AM);
2504 // Account for cold calls not inlined....
2505 if (!FunctionSamples::ProfileIsCS)
2506 for (const std::pair<Function *, NotInlinedProfileInfo> &pair :
2508 updateProfileCallee(pair.first, pair.second.entryCount);
2513 bool SampleProfileLoader::runOnFunction(Function &F, ModuleAnalysisManager *AM) {
2514 LLVM_DEBUG(dbgs() << "\n\nProcessing Function " << F.getName() << "\n");
2515 DILocation2SampleMap.clear();
2516 // By default the entry count is initialized to -1, which will be treated
2517 // conservatively by getEntryCount as the same as unknown (None). This is
2518 // to avoid newly added code to be treated as cold. If we have samples
2519 // this will be overwritten in emitAnnotations.
2520 uint64_t initialEntryCount = -1;
2522 ProfAccForSymsInList = ProfileAccurateForSymsInList && PSL;
2523 if (ProfileSampleAccurate || F.hasFnAttribute("profile-sample-accurate")) {
2524 // initialize all the function entry counts to 0. It means all the
2525 // functions without profile will be regarded as cold.
2526 initialEntryCount = 0;
2527 // profile-sample-accurate is a user assertion which has a higher precedence
2528 // than symbol list. When profile-sample-accurate is on, ignore symbol list.
2529 ProfAccForSymsInList = false;
2531 CoverageTracker.setProfAccForSymsInList(ProfAccForSymsInList);
2533 // PSL -- profile symbol list include all the symbols in sampled binary.
2534 // If ProfileAccurateForSymsInList is enabled, PSL is used to treat
2535 // old functions without samples being cold, without having to worry
2536 // about new and hot functions being mistakenly treated as cold.
2537 if (ProfAccForSymsInList) {
2538 // Initialize the entry count to 0 for functions in the list.
2539 if (PSL->contains(F.getName()))
2540 initialEntryCount = 0;
2542 // Function in the symbol list but without sample will be regarded as
2543 // cold. To minimize the potential negative performance impact it could
2544 // have, we want to be a little conservative here saying if a function
2545 // shows up in the profile, no matter as outline function, inline instance
2546 // or call targets, treat the function as not being cold. This will handle
2547 // the cases such as most callsites of a function are inlined in sampled
2548 // binary but not inlined in current build (because of source code drift,
2549 // imprecise debug information, or the callsites are all cold individually
2550 // but not cold accumulatively...), so the outline function showing up as
2551 // cold in sampled binary will actually not be cold after current build.
2552 StringRef CanonName = FunctionSamples::getCanonicalFnName(F);
2553 if (NamesInProfile.count(CanonName))
2554 initialEntryCount = -1;
2557 // Initialize entry count when the function has no existing entry
2559 if (!F.getEntryCount())
2560 F.setEntryCount(ProfileCount(initialEntryCount, Function::PCT_Real));
2561 std::unique_ptr<OptimizationRemarkEmitter> OwnedORE;
2564 AM->getResult<FunctionAnalysisManagerModuleProxy>(*F.getParent())
2566 ORE = &FAM.getResult<OptimizationRemarkEmitterAnalysis>(F);
2568 OwnedORE = std::make_unique<OptimizationRemarkEmitter>(&F);
2569 ORE = OwnedORE.get();
2572 if (FunctionSamples::ProfileIsCS)
2573 Samples = ContextTracker->getBaseSamplesFor(F);
2575 Samples = Reader->getSamplesFor(F);
2577 if (Samples && !Samples->empty())
2578 return emitAnnotations(F);
2581 SampleProfileLoaderPass::SampleProfileLoaderPass(
2582 std::string File, std::string RemappingFile, ThinOrFullLTOPhase LTOPhase,
2583 IntrusiveRefCntPtr<vfs::FileSystem> FS)
2584 : ProfileFileName(File), ProfileRemappingFileName(RemappingFile),
2585 LTOPhase(LTOPhase), FS(std::move(FS)) {}
2587 PreservedAnalyses SampleProfileLoaderPass::run(Module &M,
2588 ModuleAnalysisManager &AM) {
2589 FunctionAnalysisManager &FAM =
2590 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
2592 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
2593 return FAM.getResult<AssumptionAnalysis>(F);
2595 auto GetTTI = [&](Function &F) -> TargetTransformInfo & {
2596 return FAM.getResult<TargetIRAnalysis>(F);
2598 auto GetTLI = [&](Function &F) -> const TargetLibraryInfo & {
2599 return FAM.getResult<TargetLibraryAnalysis>(F);
2603 FS = vfs::getRealFileSystem();
2605 SampleProfileLoader SampleLoader(
2606 ProfileFileName.empty() ? SampleProfileFile : ProfileFileName,
2607 ProfileRemappingFileName.empty() ? SampleProfileRemappingFile
2608 : ProfileRemappingFileName,
2609 LTOPhase, FS, GetAssumptionCache, GetTTI, GetTLI);
2611 if (!SampleLoader.doInitialization(M, &FAM))
2612 return PreservedAnalyses::all();
2614 ProfileSummaryInfo *PSI = &AM.getResult<ProfileSummaryAnalysis>(M);
2615 LazyCallGraph &CG = AM.getResult<LazyCallGraphAnalysis>(M);
2616 if (!SampleLoader.runOnModule(M, &AM, PSI, CG))
2617 return PreservedAnalyses::all();
2619 return PreservedAnalyses::none();