1 //===-- AMDGPUAnnotateKernelFeaturesPass.cpp ------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// \file This pass adds target attributes to functions which use intrinsics
11 /// which will impact calling convention lowering.
13 //===----------------------------------------------------------------------===//
16 #include "AMDGPUSubtarget.h"
17 #include "llvm/ADT/Triple.h"
18 #include "llvm/IR/Constants.h"
19 #include "llvm/IR/Instructions.h"
20 #include "llvm/IR/Module.h"
22 #define DEBUG_TYPE "amdgpu-annotate-kernel-features"
28 class AMDGPUAnnotateKernelFeatures : public ModulePass {
30 const TargetMachine *TM;
32 static bool hasAddrSpaceCast(const Function &F, AMDGPUAS AS);
34 void addAttrToCallers(Function *Intrin, StringRef AttrName);
35 bool addAttrsForIntrinsics(Module &M, ArrayRef<StringRef[2]>);
40 AMDGPUAnnotateKernelFeatures(const TargetMachine *TM_ = nullptr) :
41 ModulePass(ID), TM(TM_) {}
42 bool runOnModule(Module &M) override;
43 StringRef getPassName() const override {
44 return "AMDGPU Annotate Kernel Features";
47 void getAnalysisUsage(AnalysisUsage &AU) const override {
49 ModulePass::getAnalysisUsage(AU);
52 static bool visitConstantExpr(const ConstantExpr *CE, AMDGPUAS AS);
53 static bool visitConstantExprsRecursively(
54 const Constant *EntryC,
55 SmallPtrSet<const Constant *, 8> &ConstantExprVisited,
61 char AMDGPUAnnotateKernelFeatures::ID = 0;
63 char &llvm::AMDGPUAnnotateKernelFeaturesID = AMDGPUAnnotateKernelFeatures::ID;
65 INITIALIZE_PASS(AMDGPUAnnotateKernelFeatures, DEBUG_TYPE,
66 "Add AMDGPU function attributes", false, false)
69 // The queue ptr is only needed when casting to flat, not from it.
70 static bool castRequiresQueuePtr(unsigned SrcAS, const AMDGPUAS &AS) {
71 return SrcAS == AS.LOCAL_ADDRESS || SrcAS == AS.PRIVATE_ADDRESS;
74 static bool castRequiresQueuePtr(const AddrSpaceCastInst *ASC,
76 return castRequiresQueuePtr(ASC->getSrcAddressSpace(), AS);
79 bool AMDGPUAnnotateKernelFeatures::visitConstantExpr(const ConstantExpr *CE,
81 if (CE->getOpcode() == Instruction::AddrSpaceCast) {
82 unsigned SrcAS = CE->getOperand(0)->getType()->getPointerAddressSpace();
83 return castRequiresQueuePtr(SrcAS, AS);
89 bool AMDGPUAnnotateKernelFeatures::visitConstantExprsRecursively(
90 const Constant *EntryC,
91 SmallPtrSet<const Constant *, 8> &ConstantExprVisited,
94 if (!ConstantExprVisited.insert(EntryC).second)
97 SmallVector<const Constant *, 16> Stack;
98 Stack.push_back(EntryC);
100 while (!Stack.empty()) {
101 const Constant *C = Stack.pop_back_val();
103 // Check this constant expression.
104 if (const auto *CE = dyn_cast<ConstantExpr>(C)) {
105 if (visitConstantExpr(CE, AS))
109 // Visit all sub-expressions.
110 for (const Use &U : C->operands()) {
111 const auto *OpC = dyn_cast<Constant>(U);
115 if (!ConstantExprVisited.insert(OpC).second)
118 Stack.push_back(OpC);
125 // Return true if an addrspacecast is used that requires the queue ptr.
126 bool AMDGPUAnnotateKernelFeatures::hasAddrSpaceCast(const Function &F,
128 SmallPtrSet<const Constant *, 8> ConstantExprVisited;
130 for (const BasicBlock &BB : F) {
131 for (const Instruction &I : BB) {
132 if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(&I)) {
133 if (castRequiresQueuePtr(ASC, AS))
137 for (const Use &U : I.operands()) {
138 const auto *OpC = dyn_cast<Constant>(U);
142 if (visitConstantExprsRecursively(OpC, ConstantExprVisited, AS))
151 void AMDGPUAnnotateKernelFeatures::addAttrToCallers(Function *Intrin,
152 StringRef AttrName) {
153 SmallPtrSet<Function *, 4> SeenFuncs;
155 for (User *U : Intrin->users()) {
156 // CallInst is the only valid user for an intrinsic.
157 CallInst *CI = cast<CallInst>(U);
159 Function *CallingFunction = CI->getParent()->getParent();
160 if (SeenFuncs.insert(CallingFunction).second)
161 CallingFunction->addFnAttr(AttrName);
165 bool AMDGPUAnnotateKernelFeatures::addAttrsForIntrinsics(
167 ArrayRef<StringRef[2]> IntrinsicToAttr) {
168 bool Changed = false;
170 for (const StringRef *Arr : IntrinsicToAttr) {
171 if (Function *Fn = M.getFunction(Arr[0])) {
172 addAttrToCallers(Fn, Arr[1]);
180 bool AMDGPUAnnotateKernelFeatures::runOnModule(Module &M) {
181 Triple TT(M.getTargetTriple());
182 AS = AMDGPU::getAMDGPUAS(M);
184 static const StringRef IntrinsicToAttr[][2] = {
186 { "llvm.amdgcn.workitem.id.y", "amdgpu-work-item-id-y" },
187 { "llvm.amdgcn.workitem.id.z", "amdgpu-work-item-id-z" },
189 { "llvm.amdgcn.workgroup.id.y", "amdgpu-work-group-id-y" },
190 { "llvm.amdgcn.workgroup.id.z", "amdgpu-work-group-id-z" },
192 { "llvm.r600.read.tgid.y", "amdgpu-work-group-id-y" },
193 { "llvm.r600.read.tgid.z", "amdgpu-work-group-id-z" },
196 { "llvm.r600.read.tidig.y", "amdgpu-work-item-id-y" },
197 { "llvm.r600.read.tidig.z", "amdgpu-work-item-id-z" }
200 static const StringRef HSAIntrinsicToAttr[][2] = {
201 { "llvm.amdgcn.dispatch.ptr", "amdgpu-dispatch-ptr" },
202 { "llvm.amdgcn.queue.ptr", "amdgpu-queue-ptr" },
203 { "llvm.amdgcn.dispatch.id", "amdgpu-dispatch-id" },
204 { "llvm.trap", "amdgpu-queue-ptr" },
205 { "llvm.debugtrap", "amdgpu-queue-ptr" }
208 // TODO: We should not add the attributes if the known compile time workgroup
209 // size is 1 for y/z.
211 // TODO: Intrinsics that require queue ptr.
213 // We do not need to note the x workitem or workgroup id because they are
214 // always initialized.
216 bool Changed = addAttrsForIntrinsics(M, IntrinsicToAttr);
217 if (TT.getOS() == Triple::AMDHSA || TT.getOS() == Triple::Mesa3D) {
218 Changed |= addAttrsForIntrinsics(M, HSAIntrinsicToAttr);
220 for (Function &F : M) {
221 if (F.hasFnAttribute("amdgpu-queue-ptr"))
224 bool HasApertureRegs =
225 TM && TM->getSubtarget<AMDGPUSubtarget>(F).hasApertureRegs();
226 if (!HasApertureRegs && hasAddrSpaceCast(F, AS))
227 F.addFnAttr("amdgpu-queue-ptr");
234 ModulePass *llvm::createAMDGPUAnnotateKernelFeaturesPass(const TargetMachine *TM) {
235 return new AMDGPUAnnotateKernelFeatures(TM);