1 //===---- CGOpenMPRuntimeNVPTX.cpp - Interface to OpenMP NVPTX Runtimes ---===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This provides a class for OpenMP runtime code generation specialized to NVPTX
13 //===----------------------------------------------------------------------===//
15 #include "CGOpenMPRuntimeNVPTX.h"
16 #include "clang/AST/DeclOpenMP.h"
17 #include "CodeGenFunction.h"
18 #include "clang/AST/StmtOpenMP.h"
20 using namespace clang;
21 using namespace CodeGen;
24 enum OpenMPRTLFunctionNVPTX {
25 /// \brief Call to void __kmpc_kernel_init(kmp_int32 thread_limit);
26 OMPRTL_NVPTX__kmpc_kernel_init,
27 /// \brief Call to void __kmpc_kernel_deinit();
28 OMPRTL_NVPTX__kmpc_kernel_deinit,
32 /// Get the GPU warp size.
33 static llvm::Value *getNVPTXWarpSize(CodeGenFunction &CGF) {
34 CGBuilderTy &Bld = CGF.Builder;
35 return Bld.CreateCall(
36 llvm::Intrinsic::getDeclaration(
37 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_warpsize),
38 llvm::None, "nvptx_warp_size");
41 /// Get the id of the current thread on the GPU.
42 static llvm::Value *getNVPTXThreadID(CodeGenFunction &CGF) {
43 CGBuilderTy &Bld = CGF.Builder;
44 return Bld.CreateCall(
45 llvm::Intrinsic::getDeclaration(
46 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x),
47 llvm::None, "nvptx_tid");
50 /// Get the maximum number of threads in a block of the GPU.
51 static llvm::Value *getNVPTXNumThreads(CodeGenFunction &CGF) {
52 CGBuilderTy &Bld = CGF.Builder;
53 return Bld.CreateCall(
54 llvm::Intrinsic::getDeclaration(
55 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x),
56 llvm::None, "nvptx_num_threads");
59 /// Get barrier to synchronize all threads in a block.
60 static void getNVPTXCTABarrier(CodeGenFunction &CGF) {
61 CGBuilderTy &Bld = CGF.Builder;
62 Bld.CreateCall(llvm::Intrinsic::getDeclaration(
63 &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_barrier0));
66 /// Synchronize all GPU threads in a block.
67 static void syncCTAThreads(CodeGenFunction &CGF) { getNVPTXCTABarrier(CGF); }
69 /// Get the value of the thread_limit clause in the teams directive.
70 /// The runtime encodes thread_limit in the launch parameter, always starting
71 /// thread_limit+warpSize threads per team.
72 static llvm::Value *getThreadLimit(CodeGenFunction &CGF) {
73 CGBuilderTy &Bld = CGF.Builder;
74 return Bld.CreateSub(getNVPTXNumThreads(CGF), getNVPTXWarpSize(CGF),
78 /// Get the thread id of the OMP master thread.
79 /// The master thread id is the first thread (lane) of the last warp in the
80 /// GPU block. Warp size is assumed to be some power of 2.
81 /// Thread id is 0 indexed.
82 /// E.g: If NumThreads is 33, master id is 32.
83 /// If NumThreads is 64, master id is 32.
84 /// If NumThreads is 1024, master id is 992.
85 static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) {
86 CGBuilderTy &Bld = CGF.Builder;
87 llvm::Value *NumThreads = getNVPTXNumThreads(CGF);
89 // We assume that the warp size is a power of 2.
90 llvm::Value *Mask = Bld.CreateSub(getNVPTXWarpSize(CGF), Bld.getInt32(1));
92 return Bld.CreateAnd(Bld.CreateSub(NumThreads, Bld.getInt32(1)),
93 Bld.CreateNot(Mask), "master_tid");
96 CGOpenMPRuntimeNVPTX::WorkerFunctionState::WorkerFunctionState(
98 : WorkerFn(nullptr), CGFI(nullptr) {
99 createWorkerFunction(CGM);
102 void CGOpenMPRuntimeNVPTX::WorkerFunctionState::createWorkerFunction(
103 CodeGenModule &CGM) {
104 // Create an worker function with no arguments.
105 CGFI = &CGM.getTypes().arrangeNullaryFunction();
107 WorkerFn = llvm::Function::Create(
108 CGM.getTypes().GetFunctionType(*CGFI), llvm::GlobalValue::InternalLinkage,
109 /* placeholder */ "_worker", &CGM.getModule());
110 CGM.SetInternalFunctionAttributes(/*D=*/nullptr, WorkerFn, *CGFI);
113 void CGOpenMPRuntimeNVPTX::emitGenericKernel(const OMPExecutableDirective &D,
114 StringRef ParentName,
115 llvm::Function *&OutlinedFn,
116 llvm::Constant *&OutlinedFnID,
118 const RegionCodeGenTy &CodeGen) {
119 EntryFunctionState EST;
120 WorkerFunctionState WST(CGM);
122 // Emit target region as a standalone region.
123 class NVPTXPrePostActionTy : public PrePostActionTy {
124 CGOpenMPRuntimeNVPTX &RT;
125 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
126 CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST;
129 NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX &RT,
130 CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
131 CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST)
132 : RT(RT), EST(EST), WST(WST) {}
133 void Enter(CodeGenFunction &CGF) override {
134 RT.emitGenericEntryHeader(CGF, EST, WST);
136 void Exit(CodeGenFunction &CGF) override {
137 RT.emitGenericEntryFooter(CGF, EST);
139 } Action(*this, EST, WST);
140 CodeGen.setAction(Action);
141 emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
142 IsOffloadEntry, CodeGen);
144 // Create the worker function
145 emitWorkerFunction(WST);
147 // Now change the name of the worker function to correspond to this target
148 // region's entry function.
149 WST.WorkerFn->setName(OutlinedFn->getName() + "_worker");
152 // Setup NVPTX threads for master-worker OpenMP scheme.
153 void CGOpenMPRuntimeNVPTX::emitGenericEntryHeader(CodeGenFunction &CGF,
154 EntryFunctionState &EST,
155 WorkerFunctionState &WST) {
156 CGBuilderTy &Bld = CGF.Builder;
158 llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
159 llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
160 llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
161 EST.ExitBB = CGF.createBasicBlock(".exit");
164 Bld.CreateICmpULT(getNVPTXThreadID(CGF), getThreadLimit(CGF));
165 Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
167 CGF.EmitBlock(WorkerBB);
168 CGF.EmitCallOrInvoke(WST.WorkerFn, llvm::None);
169 CGF.EmitBranch(EST.ExitBB);
171 CGF.EmitBlock(MasterCheckBB);
173 Bld.CreateICmpEQ(getNVPTXThreadID(CGF), getMasterThreadID(CGF));
174 Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
176 CGF.EmitBlock(MasterBB);
177 // First action in sequential region:
178 // Initialize the state of the OpenMP runtime library on the GPU.
179 llvm::Value *Args[] = {getThreadLimit(CGF)};
181 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init), Args);
184 void CGOpenMPRuntimeNVPTX::emitGenericEntryFooter(CodeGenFunction &CGF,
185 EntryFunctionState &EST) {
187 EST.ExitBB = CGF.createBasicBlock(".exit");
189 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
190 CGF.EmitBranch(TerminateBB);
192 CGF.EmitBlock(TerminateBB);
193 // Signal termination condition.
195 createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_deinit), None);
196 // Barrier to terminate worker threads.
198 // Master thread jumps to exit point.
199 CGF.EmitBranch(EST.ExitBB);
201 CGF.EmitBlock(EST.ExitBB);
202 EST.ExitBB = nullptr;
205 void CGOpenMPRuntimeNVPTX::emitWorkerFunction(WorkerFunctionState &WST) {
206 auto &Ctx = CGM.getContext();
208 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
209 CGF.disableDebugInfo();
210 CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, *WST.CGFI, {});
211 emitWorkerLoop(CGF, WST);
212 CGF.FinishFunction();
215 void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
216 WorkerFunctionState &WST) {
218 // The workers enter this loop and wait for parallel work from the master.
219 // When the master encounters a parallel region it sets up the work + variable
220 // arguments, and wakes up the workers. The workers first check to see if
221 // they are required for the parallel region, i.e., within the # of requested
222 // parallel threads. The activated workers load the variable arguments and
223 // execute the parallel work.
226 CGBuilderTy &Bld = CGF.Builder;
228 llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work");
229 llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers");
230 llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel");
231 llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel");
232 llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel");
233 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
235 CGF.EmitBranch(AwaitBB);
237 // Workers wait for work from master.
238 CGF.EmitBlock(AwaitBB);
239 // Wait for parallel work
243 CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn");
245 CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status");
246 CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
247 CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
249 // TODO: Call into runtime to get parallel work.
251 // On termination condition (workid == 0), exit loop.
252 llvm::Value *ShouldTerminate =
253 Bld.CreateIsNull(Bld.CreateLoad(WorkFn), "should_terminate");
254 Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
256 // Activate requested workers.
257 CGF.EmitBlock(SelectWorkersBB);
258 llvm::Value *IsActive =
259 Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active");
260 Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB);
262 // Signal start of parallel region.
263 CGF.EmitBlock(ExecuteBB);
264 // TODO: Add parallel work.
266 // Signal end of parallel region.
267 CGF.EmitBlock(TerminateBB);
268 CGF.EmitBranch(BarrierBB);
270 // All active and inactive workers wait at a barrier after parallel region.
271 CGF.EmitBlock(BarrierBB);
272 // Barrier after parallel region.
274 CGF.EmitBranch(AwaitBB);
276 // Exit target region.
277 CGF.EmitBlock(ExitBB);
280 /// \brief Returns specified OpenMP runtime function for the current OpenMP
281 /// implementation. Specialized for the NVPTX device.
282 /// \param Function OpenMP runtime function.
283 /// \return Specified function.
285 CGOpenMPRuntimeNVPTX::createNVPTXRuntimeFunction(unsigned Function) {
286 llvm::Constant *RTLFn = nullptr;
287 switch (static_cast<OpenMPRTLFunctionNVPTX>(Function)) {
288 case OMPRTL_NVPTX__kmpc_kernel_init: {
289 // Build void __kmpc_kernel_init(kmp_int32 thread_limit);
290 llvm::Type *TypeParams[] = {CGM.Int32Ty};
291 llvm::FunctionType *FnTy =
292 llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
293 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_init");
296 case OMPRTL_NVPTX__kmpc_kernel_deinit: {
297 // Build void __kmpc_kernel_deinit();
298 llvm::FunctionType *FnTy =
299 llvm::FunctionType::get(CGM.VoidTy, {}, /*isVarArg*/ false);
300 RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_deinit");
307 void CGOpenMPRuntimeNVPTX::createOffloadEntry(llvm::Constant *ID,
308 llvm::Constant *Addr,
309 uint64_t Size, int32_t) {
310 auto *F = dyn_cast<llvm::Function>(Addr);
311 // TODO: Add support for global variables on the device after declare target
315 llvm::Module *M = F->getParent();
316 llvm::LLVMContext &Ctx = M->getContext();
318 // Get "nvvm.annotations" metadata node
319 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
321 llvm::Metadata *MDVals[] = {
322 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, "kernel"),
323 llvm::ConstantAsMetadata::get(
324 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
325 // Append metadata to nvvm.annotations
326 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
329 void CGOpenMPRuntimeNVPTX::emitTargetOutlinedFunction(
330 const OMPExecutableDirective &D, StringRef ParentName,
331 llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
332 bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
333 if (!IsOffloadEntry) // Nothing to do.
336 assert(!ParentName.empty() && "Invalid target region parent name!");
338 emitGenericKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
342 CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
343 : CGOpenMPRuntime(CGM) {
344 if (!CGM.getLangOpts().OpenMPIsDevice)
345 llvm_unreachable("OpenMP NVPTX can only handle device code.");
348 void CGOpenMPRuntimeNVPTX::emitNumTeamsClause(CodeGenFunction &CGF,
349 const Expr *NumTeams,
350 const Expr *ThreadLimit,
351 SourceLocation Loc) {}
353 llvm::Value *CGOpenMPRuntimeNVPTX::emitParallelOrTeamsOutlinedFunction(
354 const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
355 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
357 llvm::Function *OutlinedFun = nullptr;
358 if (isa<OMPTeamsDirective>(D)) {
359 llvm::Value *OutlinedFunVal =
360 CGOpenMPRuntime::emitParallelOrTeamsOutlinedFunction(
361 D, ThreadIDVar, InnermostKind, CodeGen);
362 OutlinedFun = cast<llvm::Function>(OutlinedFunVal);
363 OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
364 OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
366 llvm_unreachable("parallel directive is not yet supported for nvptx "
372 void CGOpenMPRuntimeNVPTX::emitTeamsCall(CodeGenFunction &CGF,
373 const OMPExecutableDirective &D,
375 llvm::Value *OutlinedFn,
376 ArrayRef<llvm::Value *> CapturedVars) {
377 if (!CGF.HaveInsertPoint())
381 CGF.CreateTempAlloca(CGF.Int32Ty, CharUnits::fromQuantity(4),
382 /*Name*/ ".zero.addr");
383 CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
384 llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
385 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
386 OutlinedFnArgs.push_back(ZeroAddr.getPointer());
387 OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
388 CGF.EmitCallOrInvoke(OutlinedFn, OutlinedFnArgs);