1 //===-- AMDGPUAlwaysInlinePass.cpp - Promote Allocas ----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This pass marks all internal functions as always_inline and creates
12 /// duplicates of all other functions and marks the duplicates as always_inline.
14 //===----------------------------------------------------------------------===//
17 #include "AMDGPUTargetMachine.h"
18 #include "Utils/AMDGPUBaseInfo.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/IR/Module.h"
21 #include "llvm/Transforms/Utils/Cloning.h"
27 static cl::opt<bool> StressCalls(
28 "amdgpu-stress-function-calls",
30 cl::desc("Force all functions to be noinline"),
33 class AMDGPUAlwaysInline : public ModulePass {
36 void recursivelyVisitUsers(GlobalValue &GV,
37 SmallPtrSetImpl<Function *> &FuncsToAlwaysInline);
41 AMDGPUAlwaysInline(bool GlobalOpt = false) :
42 ModulePass(ID), GlobalOpt(GlobalOpt) { }
43 bool runOnModule(Module &M) override;
45 void getAnalysisUsage(AnalysisUsage &AU) const override {
50 } // End anonymous namespace
52 INITIALIZE_PASS(AMDGPUAlwaysInline, "amdgpu-always-inline",
53 "AMDGPU Inline All Functions", false, false)
55 char AMDGPUAlwaysInline::ID = 0;
57 void AMDGPUAlwaysInline::recursivelyVisitUsers(
59 SmallPtrSetImpl<Function *> &FuncsToAlwaysInline) {
60 SmallVector<User *, 16> Stack;
62 SmallPtrSet<const Value *, 8> Visited;
64 for (User *U : GV.users())
67 while (!Stack.empty()) {
68 User *U = Stack.pop_back_val();
69 if (!Visited.insert(U).second)
72 if (Instruction *I = dyn_cast<Instruction>(U)) {
73 Function *F = I->getParent()->getParent();
74 if (!AMDGPU::isEntryFunctionCC(F->getCallingConv())) {
75 FuncsToAlwaysInline.insert(F);
79 // No need to look at further users, but we do need to inline any callers.
83 for (User *UU : U->users())
88 bool AMDGPUAlwaysInline::runOnModule(Module &M) {
89 AMDGPUAS AMDGPUAS = AMDGPU::getAMDGPUAS(M);
91 std::vector<GlobalAlias*> AliasesToRemove;
93 SmallPtrSet<Function *, 8> FuncsToAlwaysInline;
94 SmallPtrSet<Function *, 8> FuncsToNoInline;
96 for (GlobalAlias &A : M.aliases()) {
97 if (Function* F = dyn_cast<Function>(A.getAliasee())) {
98 A.replaceAllUsesWith(F);
99 AliasesToRemove.push_back(&A);
102 // FIXME: If the aliasee isn't a function, it's some kind of constant expr
103 // cast that won't be inlined through.
107 for (GlobalAlias* A : AliasesToRemove) {
108 A->eraseFromParent();
112 // Always force inlining of any function that uses an LDS global address. This
113 // is something of a workaround because we don't have a way of supporting LDS
114 // objects defined in functions. LDS is always allocated by a kernel, and it
115 // is difficult to manage LDS usage if a function may be used by multiple
118 // OpenCL doesn't allow declaring LDS in non-kernels, so in practice this
119 // should only appear when IPO passes manages to move LDs defined in a kernel
120 // into a single user function.
122 for (GlobalVariable &GV : M.globals()) {
123 // TODO: Region address
124 unsigned AS = GV.getType()->getAddressSpace();
125 if (AS != AMDGPUAS::LOCAL_ADDRESS && AS != AMDGPUAS.REGION_ADDRESS)
128 recursivelyVisitUsers(GV, FuncsToAlwaysInline);
131 if (!AMDGPUTargetMachine::EnableFunctionCalls || StressCalls) {
133 = StressCalls ? Attribute::AlwaysInline : Attribute::NoInline;
135 for (Function &F : M) {
136 if (!F.isDeclaration() && !F.use_empty() &&
137 !F.hasFnAttribute(IncompatAttr)) {
139 if (!FuncsToAlwaysInline.count(&F))
140 FuncsToNoInline.insert(&F);
142 FuncsToAlwaysInline.insert(&F);
147 for (Function *F : FuncsToAlwaysInline)
148 F->addFnAttr(Attribute::AlwaysInline);
150 for (Function *F : FuncsToNoInline)
151 F->addFnAttr(Attribute::NoInline);
153 return !FuncsToAlwaysInline.empty() || !FuncsToNoInline.empty();
156 ModulePass *llvm::createAMDGPUAlwaysInlinePass(bool GlobalOpt) {
157 return new AMDGPUAlwaysInline(GlobalOpt);